]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2001 | |
19 | * | |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * Manfred Spraul <manfred@colorfullife.com> | |
22 | * | |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
25 | * Papers: | |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
28 | * | |
29 | * For detailed explanation of Read-Copy Update mechanism see - | |
30 | * http://lse.sourceforge.net/locking/rcupdate.html | |
31 | * | |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp.h> | |
38 | #include <linux/interrupt.h> | |
39 | #include <linux/sched.h> | |
40 | #include <asm/atomic.h> | |
41 | #include <linux/bitops.h> | |
42 | #include <linux/module.h> | |
43 | #include <linux/completion.h> | |
44 | #include <linux/moduleparam.h> | |
45 | #include <linux/percpu.h> | |
46 | #include <linux/notifier.h> | |
47 | #include <linux/rcupdate.h> | |
c0dfb290 | 48 | #include <linux/rcuref.h> |
1da177e4 LT |
49 | #include <linux/cpu.h> |
50 | ||
51 | /* Definition for rcupdate control block. */ | |
52 | struct rcu_ctrlblk rcu_ctrlblk = | |
53 | { .cur = -300, .completed = -300 }; | |
54 | struct rcu_ctrlblk rcu_bh_ctrlblk = | |
55 | { .cur = -300, .completed = -300 }; | |
56 | ||
57 | /* Bookkeeping of the progress of the grace period */ | |
58 | struct rcu_state { | |
59 | spinlock_t lock; /* Guard this struct and writes to rcu_ctrlblk */ | |
60 | cpumask_t cpumask; /* CPUs that need to switch in order */ | |
61 | /* for current batch to proceed. */ | |
62 | }; | |
63 | ||
22fc6ecc | 64 | static struct rcu_state rcu_state ____cacheline_internodealigned_in_smp = |
1da177e4 | 65 | {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; |
22fc6ecc | 66 | static struct rcu_state rcu_bh_state ____cacheline_internodealigned_in_smp = |
1da177e4 LT |
67 | {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; |
68 | ||
69 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | |
70 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | |
71 | ||
72 | /* Fake initialization required by compiler */ | |
73 | static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; | |
2cc78eb5 | 74 | static int maxbatch = 10000; |
1da177e4 | 75 | |
c0dfb290 DS |
76 | #ifndef __HAVE_ARCH_CMPXCHG |
77 | /* | |
78 | * We use an array of spinlocks for the rcurefs -- similar to ones in sparc | |
79 | * 32 bit atomic_t implementations, and a hash function similar to that | |
80 | * for our refcounting needs. | |
81 | * Can't help multiprocessors which donot have cmpxchg :( | |
82 | */ | |
83 | ||
84 | spinlock_t __rcuref_hash[RCUREF_HASH_SIZE] = { | |
85 | [0 ... (RCUREF_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED | |
86 | }; | |
87 | #endif | |
88 | ||
1da177e4 LT |
89 | /** |
90 | * call_rcu - Queue an RCU callback for invocation after a grace period. | |
91 | * @head: structure to be used for queueing the RCU updates. | |
92 | * @func: actual update function to be invoked after the grace period | |
93 | * | |
94 | * The update function will be invoked some time after a full grace | |
95 | * period elapses, in other words after all currently executing RCU | |
96 | * read-side critical sections have completed. RCU read-side critical | |
97 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
98 | * and may be nested. | |
99 | */ | |
100 | void fastcall call_rcu(struct rcu_head *head, | |
101 | void (*func)(struct rcu_head *rcu)) | |
102 | { | |
103 | unsigned long flags; | |
104 | struct rcu_data *rdp; | |
105 | ||
106 | head->func = func; | |
107 | head->next = NULL; | |
108 | local_irq_save(flags); | |
109 | rdp = &__get_cpu_var(rcu_data); | |
110 | *rdp->nxttail = head; | |
111 | rdp->nxttail = &head->next; | |
5ee832db ED |
112 | |
113 | if (unlikely(++rdp->count > 10000)) | |
114 | set_need_resched(); | |
115 | ||
1da177e4 LT |
116 | local_irq_restore(flags); |
117 | } | |
118 | ||
ab4720ec DS |
119 | static atomic_t rcu_barrier_cpu_count; |
120 | static struct semaphore rcu_barrier_sema; | |
121 | static struct completion rcu_barrier_completion; | |
122 | ||
1da177e4 LT |
123 | /** |
124 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | |
125 | * @head: structure to be used for queueing the RCU updates. | |
126 | * @func: actual update function to be invoked after the grace period | |
127 | * | |
128 | * The update function will be invoked some time after a full grace | |
129 | * period elapses, in other words after all currently executing RCU | |
130 | * read-side critical sections have completed. call_rcu_bh() assumes | |
131 | * that the read-side critical sections end on completion of a softirq | |
132 | * handler. This means that read-side critical sections in process | |
133 | * context must not be interrupted by softirqs. This interface is to be | |
134 | * used when most of the read-side critical sections are in softirq context. | |
135 | * RCU read-side critical sections are delimited by rcu_read_lock() and | |
136 | * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() | |
137 | * and rcu_read_unlock_bh(), if in process context. These may be nested. | |
138 | */ | |
139 | void fastcall call_rcu_bh(struct rcu_head *head, | |
140 | void (*func)(struct rcu_head *rcu)) | |
141 | { | |
142 | unsigned long flags; | |
143 | struct rcu_data *rdp; | |
144 | ||
145 | head->func = func; | |
146 | head->next = NULL; | |
147 | local_irq_save(flags); | |
148 | rdp = &__get_cpu_var(rcu_bh_data); | |
149 | *rdp->nxttail = head; | |
150 | rdp->nxttail = &head->next; | |
5ee832db ED |
151 | rdp->count++; |
152 | /* | |
153 | * Should we directly call rcu_do_batch() here ? | |
154 | * if (unlikely(rdp->count > 10000)) | |
155 | * rcu_do_batch(rdp); | |
156 | */ | |
1da177e4 LT |
157 | local_irq_restore(flags); |
158 | } | |
159 | ||
a241ec65 PM |
160 | /* |
161 | * Return the number of RCU batches processed thus far. Useful | |
162 | * for debug and statistics. | |
163 | */ | |
164 | long rcu_batches_completed(void) | |
165 | { | |
166 | return rcu_ctrlblk.completed; | |
167 | } | |
168 | ||
ab4720ec DS |
169 | static void rcu_barrier_callback(struct rcu_head *notused) |
170 | { | |
171 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | |
172 | complete(&rcu_barrier_completion); | |
173 | } | |
174 | ||
175 | /* | |
176 | * Called with preemption disabled, and from cross-cpu IRQ context. | |
177 | */ | |
178 | static void rcu_barrier_func(void *notused) | |
179 | { | |
180 | int cpu = smp_processor_id(); | |
181 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
182 | struct rcu_head *head; | |
183 | ||
184 | head = &rdp->barrier; | |
185 | atomic_inc(&rcu_barrier_cpu_count); | |
186 | call_rcu(head, rcu_barrier_callback); | |
187 | } | |
188 | ||
189 | /** | |
190 | * rcu_barrier - Wait until all the in-flight RCUs are complete. | |
191 | */ | |
192 | void rcu_barrier(void) | |
193 | { | |
194 | BUG_ON(in_interrupt()); | |
195 | /* Take cpucontrol semaphore to protect against CPU hotplug */ | |
196 | down(&rcu_barrier_sema); | |
197 | init_completion(&rcu_barrier_completion); | |
198 | atomic_set(&rcu_barrier_cpu_count, 0); | |
199 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); | |
200 | wait_for_completion(&rcu_barrier_completion); | |
201 | up(&rcu_barrier_sema); | |
202 | } | |
203 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
204 | ||
1da177e4 LT |
205 | /* |
206 | * Invoke the completed RCU callbacks. They are expected to be in | |
207 | * a per-cpu list. | |
208 | */ | |
209 | static void rcu_do_batch(struct rcu_data *rdp) | |
210 | { | |
211 | struct rcu_head *next, *list; | |
212 | int count = 0; | |
213 | ||
214 | list = rdp->donelist; | |
215 | while (list) { | |
216 | next = rdp->donelist = list->next; | |
217 | list->func(list); | |
218 | list = next; | |
5ee832db | 219 | rdp->count--; |
1da177e4 LT |
220 | if (++count >= maxbatch) |
221 | break; | |
222 | } | |
223 | if (!rdp->donelist) | |
224 | rdp->donetail = &rdp->donelist; | |
225 | else | |
226 | tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); | |
227 | } | |
228 | ||
229 | /* | |
230 | * Grace period handling: | |
231 | * The grace period handling consists out of two steps: | |
232 | * - A new grace period is started. | |
233 | * This is done by rcu_start_batch. The start is not broadcasted to | |
234 | * all cpus, they must pick this up by comparing rcp->cur with | |
235 | * rdp->quiescbatch. All cpus are recorded in the | |
236 | * rcu_state.cpumask bitmap. | |
237 | * - All cpus must go through a quiescent state. | |
238 | * Since the start of the grace period is not broadcasted, at least two | |
239 | * calls to rcu_check_quiescent_state are required: | |
240 | * The first call just notices that a new grace period is running. The | |
241 | * following calls check if there was a quiescent state since the beginning | |
242 | * of the grace period. If so, it updates rcu_state.cpumask. If | |
243 | * the bitmap is empty, then the grace period is completed. | |
244 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | |
245 | * period (if necessary). | |
246 | */ | |
247 | /* | |
248 | * Register a new batch of callbacks, and start it up if there is currently no | |
249 | * active batch and the batch to be registered has not already occurred. | |
250 | * Caller must hold rcu_state.lock. | |
251 | */ | |
252 | static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp, | |
253 | int next_pending) | |
254 | { | |
255 | if (next_pending) | |
256 | rcp->next_pending = 1; | |
257 | ||
258 | if (rcp->next_pending && | |
259 | rcp->completed == rcp->cur) { | |
1da177e4 | 260 | rcp->next_pending = 0; |
c3f59023 SV |
261 | /* |
262 | * next_pending == 0 must be visible in | |
263 | * __rcu_process_callbacks() before it can see new value of cur. | |
1da177e4 LT |
264 | */ |
265 | smp_wmb(); | |
266 | rcp->cur++; | |
c3f59023 SV |
267 | |
268 | /* | |
269 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | |
270 | * Barrier Otherwise it can cause tickless idle CPUs to be | |
271 | * included in rsp->cpumask, which will extend graceperiods | |
272 | * unnecessarily. | |
273 | */ | |
274 | smp_mb(); | |
275 | cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask); | |
276 | ||
1da177e4 LT |
277 | } |
278 | } | |
279 | ||
280 | /* | |
281 | * cpu went through a quiescent state since the beginning of the grace period. | |
282 | * Clear it from the cpu mask and complete the grace period if it was the last | |
283 | * cpu. Start another grace period if someone has further entries pending | |
284 | */ | |
285 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp) | |
286 | { | |
287 | cpu_clear(cpu, rsp->cpumask); | |
288 | if (cpus_empty(rsp->cpumask)) { | |
289 | /* batch completed ! */ | |
290 | rcp->completed = rcp->cur; | |
291 | rcu_start_batch(rcp, rsp, 0); | |
292 | } | |
293 | } | |
294 | ||
295 | /* | |
296 | * Check if the cpu has gone through a quiescent state (say context | |
297 | * switch). If so and if it already hasn't done so in this RCU | |
298 | * quiescent cycle, then indicate that it has done so. | |
299 | */ | |
300 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |
301 | struct rcu_state *rsp, struct rcu_data *rdp) | |
302 | { | |
303 | if (rdp->quiescbatch != rcp->cur) { | |
304 | /* start new grace period: */ | |
305 | rdp->qs_pending = 1; | |
306 | rdp->passed_quiesc = 0; | |
307 | rdp->quiescbatch = rcp->cur; | |
308 | return; | |
309 | } | |
310 | ||
311 | /* Grace period already completed for this cpu? | |
312 | * qs_pending is checked instead of the actual bitmap to avoid | |
313 | * cacheline trashing. | |
314 | */ | |
315 | if (!rdp->qs_pending) | |
316 | return; | |
317 | ||
318 | /* | |
319 | * Was there a quiescent state since the beginning of the grace | |
320 | * period? If no, then exit and wait for the next call. | |
321 | */ | |
322 | if (!rdp->passed_quiesc) | |
323 | return; | |
324 | rdp->qs_pending = 0; | |
325 | ||
326 | spin_lock(&rsp->lock); | |
327 | /* | |
328 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | |
329 | * during cpu startup. Ignore the quiescent state. | |
330 | */ | |
331 | if (likely(rdp->quiescbatch == rcp->cur)) | |
332 | cpu_quiet(rdp->cpu, rcp, rsp); | |
333 | ||
334 | spin_unlock(&rsp->lock); | |
335 | } | |
336 | ||
337 | ||
338 | #ifdef CONFIG_HOTPLUG_CPU | |
339 | ||
340 | /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing | |
341 | * locking requirements, the list it's pulling from has to belong to a cpu | |
342 | * which is dead and hence not processing interrupts. | |
343 | */ | |
344 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | |
345 | struct rcu_head **tail) | |
346 | { | |
347 | local_irq_disable(); | |
348 | *this_rdp->nxttail = list; | |
349 | if (list) | |
350 | this_rdp->nxttail = tail; | |
351 | local_irq_enable(); | |
352 | } | |
353 | ||
354 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | |
355 | struct rcu_ctrlblk *rcp, struct rcu_state *rsp, struct rcu_data *rdp) | |
356 | { | |
357 | /* if the cpu going offline owns the grace period | |
358 | * we can block indefinitely waiting for it, so flush | |
359 | * it here | |
360 | */ | |
361 | spin_lock_bh(&rsp->lock); | |
362 | if (rcp->cur != rcp->completed) | |
363 | cpu_quiet(rdp->cpu, rcp, rsp); | |
364 | spin_unlock_bh(&rsp->lock); | |
365 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | |
366 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | |
367 | ||
368 | } | |
369 | static void rcu_offline_cpu(int cpu) | |
370 | { | |
371 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); | |
372 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); | |
373 | ||
374 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, &rcu_state, | |
375 | &per_cpu(rcu_data, cpu)); | |
376 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, &rcu_bh_state, | |
377 | &per_cpu(rcu_bh_data, cpu)); | |
378 | put_cpu_var(rcu_data); | |
379 | put_cpu_var(rcu_bh_data); | |
380 | tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); | |
381 | } | |
382 | ||
383 | #else | |
384 | ||
385 | static void rcu_offline_cpu(int cpu) | |
386 | { | |
387 | } | |
388 | ||
389 | #endif | |
390 | ||
391 | /* | |
392 | * This does the RCU processing work from tasklet context. | |
393 | */ | |
394 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |
395 | struct rcu_state *rsp, struct rcu_data *rdp) | |
396 | { | |
397 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { | |
398 | *rdp->donetail = rdp->curlist; | |
399 | rdp->donetail = rdp->curtail; | |
400 | rdp->curlist = NULL; | |
401 | rdp->curtail = &rdp->curlist; | |
402 | } | |
403 | ||
404 | local_irq_disable(); | |
405 | if (rdp->nxtlist && !rdp->curlist) { | |
406 | rdp->curlist = rdp->nxtlist; | |
407 | rdp->curtail = rdp->nxttail; | |
408 | rdp->nxtlist = NULL; | |
409 | rdp->nxttail = &rdp->nxtlist; | |
410 | local_irq_enable(); | |
411 | ||
412 | /* | |
413 | * start the next batch of callbacks | |
414 | */ | |
415 | ||
416 | /* determine batch number */ | |
417 | rdp->batch = rcp->cur + 1; | |
418 | /* see the comment and corresponding wmb() in | |
419 | * the rcu_start_batch() | |
420 | */ | |
421 | smp_rmb(); | |
422 | ||
423 | if (!rcp->next_pending) { | |
424 | /* and start it/schedule start if it's a new batch */ | |
425 | spin_lock(&rsp->lock); | |
426 | rcu_start_batch(rcp, rsp, 1); | |
427 | spin_unlock(&rsp->lock); | |
428 | } | |
429 | } else { | |
430 | local_irq_enable(); | |
431 | } | |
432 | rcu_check_quiescent_state(rcp, rsp, rdp); | |
433 | if (rdp->donelist) | |
434 | rcu_do_batch(rdp); | |
435 | } | |
436 | ||
437 | static void rcu_process_callbacks(unsigned long unused) | |
438 | { | |
439 | __rcu_process_callbacks(&rcu_ctrlblk, &rcu_state, | |
440 | &__get_cpu_var(rcu_data)); | |
441 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &rcu_bh_state, | |
442 | &__get_cpu_var(rcu_bh_data)); | |
443 | } | |
444 | ||
445 | void rcu_check_callbacks(int cpu, int user) | |
446 | { | |
447 | if (user || | |
448 | (idle_cpu(cpu) && !in_softirq() && | |
449 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | |
450 | rcu_qsctr_inc(cpu); | |
451 | rcu_bh_qsctr_inc(cpu); | |
452 | } else if (!in_softirq()) | |
453 | rcu_bh_qsctr_inc(cpu); | |
454 | tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); | |
455 | } | |
456 | ||
457 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | |
458 | struct rcu_data *rdp) | |
459 | { | |
460 | memset(rdp, 0, sizeof(*rdp)); | |
461 | rdp->curtail = &rdp->curlist; | |
462 | rdp->nxttail = &rdp->nxtlist; | |
463 | rdp->donetail = &rdp->donelist; | |
464 | rdp->quiescbatch = rcp->completed; | |
465 | rdp->qs_pending = 0; | |
466 | rdp->cpu = cpu; | |
467 | } | |
468 | ||
469 | static void __devinit rcu_online_cpu(int cpu) | |
470 | { | |
471 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
472 | struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); | |
473 | ||
474 | rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); | |
475 | rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); | |
476 | tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); | |
477 | } | |
478 | ||
479 | static int __devinit rcu_cpu_notify(struct notifier_block *self, | |
480 | unsigned long action, void *hcpu) | |
481 | { | |
482 | long cpu = (long)hcpu; | |
483 | switch (action) { | |
484 | case CPU_UP_PREPARE: | |
485 | rcu_online_cpu(cpu); | |
486 | break; | |
487 | case CPU_DEAD: | |
488 | rcu_offline_cpu(cpu); | |
489 | break; | |
490 | default: | |
491 | break; | |
492 | } | |
493 | return NOTIFY_OK; | |
494 | } | |
495 | ||
496 | static struct notifier_block __devinitdata rcu_nb = { | |
497 | .notifier_call = rcu_cpu_notify, | |
498 | }; | |
499 | ||
500 | /* | |
501 | * Initializes rcu mechanism. Assumed to be called early. | |
502 | * That is before local timer(SMP) or jiffie timer (uniproc) is setup. | |
503 | * Note that rcu_qsctr and friends are implicitly | |
504 | * initialized due to the choice of ``0'' for RCU_CTR_INVALID. | |
505 | */ | |
506 | void __init rcu_init(void) | |
507 | { | |
ab4720ec | 508 | sema_init(&rcu_barrier_sema, 1); |
1da177e4 LT |
509 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, |
510 | (void *)(long)smp_processor_id()); | |
511 | /* Register notifier for non-boot CPUs */ | |
512 | register_cpu_notifier(&rcu_nb); | |
513 | } | |
514 | ||
515 | struct rcu_synchronize { | |
516 | struct rcu_head head; | |
517 | struct completion completion; | |
518 | }; | |
519 | ||
520 | /* Because of FASTCALL declaration of complete, we use this wrapper */ | |
521 | static void wakeme_after_rcu(struct rcu_head *head) | |
522 | { | |
523 | struct rcu_synchronize *rcu; | |
524 | ||
525 | rcu = container_of(head, struct rcu_synchronize, head); | |
526 | complete(&rcu->completion); | |
527 | } | |
528 | ||
529 | /** | |
9b06e818 | 530 | * synchronize_rcu - wait until a grace period has elapsed. |
1da177e4 LT |
531 | * |
532 | * Control will return to the caller some time after a full grace | |
533 | * period has elapsed, in other words after all currently executing RCU | |
534 | * read-side critical sections have completed. RCU read-side critical | |
535 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
536 | * and may be nested. | |
9b06e818 PM |
537 | * |
538 | * If your read-side code is not protected by rcu_read_lock(), do -not- | |
539 | * use synchronize_rcu(). | |
1da177e4 | 540 | */ |
9b06e818 | 541 | void synchronize_rcu(void) |
1da177e4 LT |
542 | { |
543 | struct rcu_synchronize rcu; | |
544 | ||
545 | init_completion(&rcu.completion); | |
546 | /* Will wake me after RCU finished */ | |
547 | call_rcu(&rcu.head, wakeme_after_rcu); | |
548 | ||
549 | /* Wait for it */ | |
550 | wait_for_completion(&rcu.completion); | |
551 | } | |
552 | ||
9b06e818 PM |
553 | /* |
554 | * Deprecated, use synchronize_rcu() or synchronize_sched() instead. | |
555 | */ | |
556 | void synchronize_kernel(void) | |
557 | { | |
558 | synchronize_rcu(); | |
559 | } | |
560 | ||
1da177e4 | 561 | module_param(maxbatch, int, 0); |
a241ec65 | 562 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
66cf8f14 PM |
563 | EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */ |
564 | EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ | |
9b06e818 | 565 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
66cf8f14 | 566 | EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */ |