]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/rcupdate.h
Merge with mainline to remove plat-omap/Kconfig conflict
[mirror_ubuntu-artful-kernel.git] / include / linux / rcupdate.h
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * http://lse.sourceforge.net/locking/rcupdate.html
30 *
31 */
32
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
35
36 #include <linux/cache.h>
37 #include <linux/spinlock.h>
38 #include <linux/threads.h>
39 #include <linux/cpumask.h>
40 #include <linux/seqlock.h>
41 #include <linux/lockdep.h>
42 #include <linux/completion.h>
43
44 /**
45 * struct rcu_head - callback structure for use with RCU
46 * @next: next update requests in a list
47 * @func: actual update function to call after the grace period.
48 */
49 struct rcu_head {
50 struct rcu_head *next;
51 void (*func)(struct rcu_head *head);
52 };
53
54 /* Exported common interfaces */
55 extern void synchronize_rcu_bh(void);
56 extern void synchronize_sched(void);
57 extern void rcu_barrier(void);
58 extern void rcu_barrier_bh(void);
59 extern void rcu_barrier_sched(void);
60 extern void synchronize_sched_expedited(void);
61 extern int sched_expedited_torture_stats(char *page);
62
63 /* Internal to kernel */
64 extern void rcu_init(void);
65 extern int rcu_scheduler_active;
66 extern void rcu_scheduler_starting(void);
67
68 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
69 #include <linux/rcutree.h>
70 #elif defined(CONFIG_TINY_RCU)
71 #include <linux/rcutiny.h>
72 #else
73 #error "Unknown RCU implementation specified to kernel configuration"
74 #endif
75
76 #define RCU_HEAD_INIT { .next = NULL, .func = NULL }
77 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
78 #define INIT_RCU_HEAD(ptr) do { \
79 (ptr)->next = NULL; (ptr)->func = NULL; \
80 } while (0)
81
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83
84 extern struct lockdep_map rcu_lock_map;
85 # define rcu_read_acquire() \
86 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
87 # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
88
89 extern struct lockdep_map rcu_bh_lock_map;
90 # define rcu_read_acquire_bh() \
91 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
92 # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
93
94 extern struct lockdep_map rcu_sched_lock_map;
95 # define rcu_read_acquire_sched() \
96 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
97 # define rcu_read_release_sched() \
98 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
99
100 /**
101 * rcu_read_lock_held - might we be in RCU read-side critical section?
102 *
103 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
104 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
105 * this assumes we are in an RCU read-side critical section unless it can
106 * prove otherwise.
107 */
108 static inline int rcu_read_lock_held(void)
109 {
110 if (debug_locks)
111 return lock_is_held(&rcu_lock_map);
112 return 1;
113 }
114
115 /**
116 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
117 *
118 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
119 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
120 * this assumes we are in an RCU-bh read-side critical section unless it can
121 * prove otherwise.
122 */
123 static inline int rcu_read_lock_bh_held(void)
124 {
125 if (debug_locks)
126 return lock_is_held(&rcu_bh_lock_map);
127 return 1;
128 }
129
130 /**
131 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
132 *
133 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
134 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
135 * this assumes we are in an RCU-sched read-side critical section unless it
136 * can prove otherwise. Note that disabling of preemption (including
137 * disabling irqs) counts as an RCU-sched read-side critical section.
138 */
139 static inline int rcu_read_lock_sched_held(void)
140 {
141 int lockdep_opinion = 0;
142
143 if (debug_locks)
144 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
145 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
146 }
147
148 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
149
150 # define rcu_read_acquire() do { } while (0)
151 # define rcu_read_release() do { } while (0)
152 # define rcu_read_acquire_bh() do { } while (0)
153 # define rcu_read_release_bh() do { } while (0)
154 # define rcu_read_acquire_sched() do { } while (0)
155 # define rcu_read_release_sched() do { } while (0)
156
157 static inline int rcu_read_lock_held(void)
158 {
159 return 1;
160 }
161
162 static inline int rcu_read_lock_bh_held(void)
163 {
164 return 1;
165 }
166
167 static inline int rcu_read_lock_sched_held(void)
168 {
169 return preempt_count() != 0 || !rcu_scheduler_active;
170 }
171
172 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
173
174 #ifdef CONFIG_PROVE_RCU
175
176 /**
177 * rcu_dereference_check - rcu_dereference with debug checking
178 *
179 * Do an rcu_dereference(), but check that the context is correct.
180 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
181 * ensure that the rcu_dereference_check() executes within an RCU
182 * read-side critical section. It is also possible to check for
183 * locks being held, for example, by using lockdep_is_held().
184 */
185 #define rcu_dereference_check(p, c) \
186 ({ \
187 if (debug_locks && !(c)) \
188 lockdep_rcu_dereference(__FILE__, __LINE__); \
189 rcu_dereference_raw(p); \
190 })
191
192 #else /* #ifdef CONFIG_PROVE_RCU */
193
194 #define rcu_dereference_check(p, c) rcu_dereference_raw(p)
195
196 #endif /* #else #ifdef CONFIG_PROVE_RCU */
197
198 /**
199 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
200 *
201 * When synchronize_rcu() is invoked on one CPU while other CPUs
202 * are within RCU read-side critical sections, then the
203 * synchronize_rcu() is guaranteed to block until after all the other
204 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
205 * on one CPU while other CPUs are within RCU read-side critical
206 * sections, invocation of the corresponding RCU callback is deferred
207 * until after the all the other CPUs exit their critical sections.
208 *
209 * Note, however, that RCU callbacks are permitted to run concurrently
210 * with RCU read-side critical sections. One way that this can happen
211 * is via the following sequence of events: (1) CPU 0 enters an RCU
212 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
213 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
214 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
215 * callback is invoked. This is legal, because the RCU read-side critical
216 * section that was running concurrently with the call_rcu() (and which
217 * therefore might be referencing something that the corresponding RCU
218 * callback would free up) has completed before the corresponding
219 * RCU callback is invoked.
220 *
221 * RCU read-side critical sections may be nested. Any deferred actions
222 * will be deferred until the outermost RCU read-side critical section
223 * completes.
224 *
225 * It is illegal to block while in an RCU read-side critical section.
226 */
227 static inline void rcu_read_lock(void)
228 {
229 __rcu_read_lock();
230 __acquire(RCU);
231 rcu_read_acquire();
232 }
233
234 /*
235 * So where is rcu_write_lock()? It does not exist, as there is no
236 * way for writers to lock out RCU readers. This is a feature, not
237 * a bug -- this property is what provides RCU's performance benefits.
238 * Of course, writers must coordinate with each other. The normal
239 * spinlock primitives work well for this, but any other technique may be
240 * used as well. RCU does not care how the writers keep out of each
241 * others' way, as long as they do so.
242 */
243
244 /**
245 * rcu_read_unlock - marks the end of an RCU read-side critical section.
246 *
247 * See rcu_read_lock() for more information.
248 */
249 static inline void rcu_read_unlock(void)
250 {
251 rcu_read_release();
252 __release(RCU);
253 __rcu_read_unlock();
254 }
255
256 /**
257 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
258 *
259 * This is equivalent of rcu_read_lock(), but to be used when updates
260 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
261 * consider completion of a softirq handler to be a quiescent state,
262 * a process in RCU read-side critical section must be protected by
263 * disabling softirqs. Read-side critical sections in interrupt context
264 * can use just rcu_read_lock().
265 *
266 */
267 static inline void rcu_read_lock_bh(void)
268 {
269 __rcu_read_lock_bh();
270 __acquire(RCU_BH);
271 rcu_read_acquire_bh();
272 }
273
274 /*
275 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
276 *
277 * See rcu_read_lock_bh() for more information.
278 */
279 static inline void rcu_read_unlock_bh(void)
280 {
281 rcu_read_release_bh();
282 __release(RCU_BH);
283 __rcu_read_unlock_bh();
284 }
285
286 /**
287 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
288 *
289 * Should be used with either
290 * - synchronize_sched()
291 * or
292 * - call_rcu_sched() and rcu_barrier_sched()
293 * on the write-side to insure proper synchronization.
294 */
295 static inline void rcu_read_lock_sched(void)
296 {
297 preempt_disable();
298 __acquire(RCU_SCHED);
299 rcu_read_acquire_sched();
300 }
301
302 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
303 static inline notrace void rcu_read_lock_sched_notrace(void)
304 {
305 preempt_disable_notrace();
306 __acquire(RCU_SCHED);
307 }
308
309 /*
310 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
311 *
312 * See rcu_read_lock_sched for more information.
313 */
314 static inline void rcu_read_unlock_sched(void)
315 {
316 rcu_read_release_sched();
317 __release(RCU_SCHED);
318 preempt_enable();
319 }
320
321 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
322 static inline notrace void rcu_read_unlock_sched_notrace(void)
323 {
324 __release(RCU_SCHED);
325 preempt_enable_notrace();
326 }
327
328
329 /**
330 * rcu_dereference_raw - fetch an RCU-protected pointer
331 *
332 * The caller must be within some flavor of RCU read-side critical
333 * section, or must be otherwise preventing the pointer from changing,
334 * for example, by holding an appropriate lock. This pointer may later
335 * be safely dereferenced. It is the caller's responsibility to have
336 * done the right thing, as this primitive does no checking of any kind.
337 *
338 * Inserts memory barriers on architectures that require them
339 * (currently only the Alpha), and, more importantly, documents
340 * exactly which pointers are protected by RCU.
341 */
342 #define rcu_dereference_raw(p) ({ \
343 typeof(p) _________p1 = ACCESS_ONCE(p); \
344 smp_read_barrier_depends(); \
345 (_________p1); \
346 })
347
348 /**
349 * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
350 *
351 * Makes rcu_dereference_check() do the dirty work.
352 */
353 #define rcu_dereference(p) \
354 rcu_dereference_check(p, rcu_read_lock_held())
355
356 /**
357 * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
358 *
359 * Makes rcu_dereference_check() do the dirty work.
360 */
361 #define rcu_dereference_bh(p) \
362 rcu_dereference_check(p, rcu_read_lock_bh_held())
363
364 /**
365 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
366 *
367 * Makes rcu_dereference_check() do the dirty work.
368 */
369 #define rcu_dereference_sched(p) \
370 rcu_dereference_check(p, rcu_read_lock_sched_held())
371
372 /**
373 * rcu_assign_pointer - assign (publicize) a pointer to a newly
374 * initialized structure that will be dereferenced by RCU read-side
375 * critical sections. Returns the value assigned.
376 *
377 * Inserts memory barriers on architectures that require them
378 * (pretty much all of them other than x86), and also prevents
379 * the compiler from reordering the code that initializes the
380 * structure after the pointer assignment. More importantly, this
381 * call documents which pointers will be dereferenced by RCU read-side
382 * code.
383 */
384
385 #define rcu_assign_pointer(p, v) \
386 ({ \
387 if (!__builtin_constant_p(v) || \
388 ((v) != NULL)) \
389 smp_wmb(); \
390 (p) = (v); \
391 })
392
393 /* Infrastructure to implement the synchronize_() primitives. */
394
395 struct rcu_synchronize {
396 struct rcu_head head;
397 struct completion completion;
398 };
399
400 extern void wakeme_after_rcu(struct rcu_head *head);
401
402 /**
403 * call_rcu - Queue an RCU callback for invocation after a grace period.
404 * @head: structure to be used for queueing the RCU updates.
405 * @func: actual update function to be invoked after the grace period
406 *
407 * The update function will be invoked some time after a full grace
408 * period elapses, in other words after all currently executing RCU
409 * read-side critical sections have completed. RCU read-side critical
410 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
411 * and may be nested.
412 */
413 extern void call_rcu(struct rcu_head *head,
414 void (*func)(struct rcu_head *head));
415
416 /**
417 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
418 * @head: structure to be used for queueing the RCU updates.
419 * @func: actual update function to be invoked after the grace period
420 *
421 * The update function will be invoked some time after a full grace
422 * period elapses, in other words after all currently executing RCU
423 * read-side critical sections have completed. call_rcu_bh() assumes
424 * that the read-side critical sections end on completion of a softirq
425 * handler. This means that read-side critical sections in process
426 * context must not be interrupted by softirqs. This interface is to be
427 * used when most of the read-side critical sections are in softirq context.
428 * RCU read-side critical sections are delimited by :
429 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
430 * OR
431 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
432 * These may be nested.
433 */
434 extern void call_rcu_bh(struct rcu_head *head,
435 void (*func)(struct rcu_head *head));
436
437 #endif /* __LINUX_RCUPDATE_H */