]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/rcupdate.h
rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare()
[mirror_ubuntu-zesty-kernel.git] / include / linux / rcupdate.h
CommitLineData
1da177e4 1/*
a71fca58 2 * Read-Copy Update mechanism for mutual exclusion
1da177e4
LT
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
01c1c660 18 * Copyright IBM Corporation, 2001
1da177e4
LT
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
a71fca58 21 *
595182bc 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
1da177e4
LT
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 29 * http://lse.sourceforge.net/locking/rcupdate.html
1da177e4
LT
30 *
31 */
32
33#ifndef __LINUX_RCUPDATE_H
34#define __LINUX_RCUPDATE_H
35
1da177e4
LT
36#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
1da177e4
LT
39#include <linux/cpumask.h>
40#include <linux/seqlock.h>
851a67b8 41#include <linux/lockdep.h>
4446a36f 42#include <linux/completion.h>
1da177e4
LT
43
44/**
45 * struct rcu_head - callback structure for use with RCU
46 * @next: next update requests in a list
47 * @func: actual update function to call after the grace period.
48 */
49struct rcu_head {
50 struct rcu_head *next;
51 void (*func)(struct rcu_head *head);
52};
53
03b042bf 54/* Exported common interfaces */
03b042bf 55extern void synchronize_rcu_bh(void);
16e30811 56extern void synchronize_sched(void);
03b042bf
PM
57extern void rcu_barrier(void);
58extern void rcu_barrier_bh(void);
59extern void rcu_barrier_sched(void);
60extern void synchronize_sched_expedited(void);
61extern int sched_expedited_torture_stats(char *page);
62
63/* Internal to kernel */
64extern void rcu_init(void);
d9f1bb6a
PM
65extern int rcu_scheduler_active;
66extern void rcu_scheduler_starting(void);
a6826048 67
f41d911f 68#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
64db4cff 69#include <linux/rcutree.h>
2c28e245 70#elif defined(CONFIG_TINY_RCU)
9b1d82fa 71#include <linux/rcutiny.h>
64db4cff
PM
72#else
73#error "Unknown RCU implementation specified to kernel configuration"
6b3ef48a 74#endif
01c1c660 75
3d76c082 76#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
8b6490e5 77#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
1da177e4
LT
78#define INIT_RCU_HEAD(ptr) do { \
79 (ptr)->next = NULL; (ptr)->func = NULL; \
80} while (0)
81
bc33f24b 82#ifdef CONFIG_DEBUG_LOCK_ALLOC
632ee200 83
bc33f24b 84extern struct lockdep_map rcu_lock_map;
632ee200
PM
85# define rcu_read_acquire() \
86 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
bc33f24b 87# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
632ee200
PM
88
89extern struct lockdep_map rcu_bh_lock_map;
90# define rcu_read_acquire_bh() \
91 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
92# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
93
94extern struct lockdep_map rcu_sched_lock_map;
95# define rcu_read_acquire_sched() \
96 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
97# define rcu_read_release_sched() \
98 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
99
100/**
101 * rcu_read_lock_held - might we be in RCU read-side critical section?
102 *
103 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
104 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
105 * this assumes we are in an RCU read-side critical section unless it can
106 * prove otherwise.
107 */
108static inline int rcu_read_lock_held(void)
109{
110 if (debug_locks)
111 return lock_is_held(&rcu_lock_map);
112 return 1;
113}
114
115/**
116 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
117 *
118 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
119 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
120 * this assumes we are in an RCU-bh read-side critical section unless it can
121 * prove otherwise.
122 */
123static inline int rcu_read_lock_bh_held(void)
124{
125 if (debug_locks)
126 return lock_is_held(&rcu_bh_lock_map);
127 return 1;
128}
129
130/**
131 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
132 *
133 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
134 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
135 * this assumes we are in an RCU-sched read-side critical section unless it
136 * can prove otherwise. Note that disabling of preemption (including
137 * disabling irqs) counts as an RCU-sched read-side critical section.
138 */
e6033e3b 139#ifdef CONFIG_PREEMPT
632ee200
PM
140static inline int rcu_read_lock_sched_held(void)
141{
142 int lockdep_opinion = 0;
143
144 if (debug_locks)
145 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
d9f1bb6a 146 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
632ee200 147}
e6033e3b
PM
148#else /* #ifdef CONFIG_PREEMPT */
149static inline int rcu_read_lock_sched_held(void)
150{
151 return 1;
152}
153#endif /* #else #ifdef CONFIG_PREEMPT */
632ee200
PM
154
155#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
156
157# define rcu_read_acquire() do { } while (0)
158# define rcu_read_release() do { } while (0)
159# define rcu_read_acquire_bh() do { } while (0)
160# define rcu_read_release_bh() do { } while (0)
161# define rcu_read_acquire_sched() do { } while (0)
162# define rcu_read_release_sched() do { } while (0)
163
164static inline int rcu_read_lock_held(void)
165{
166 return 1;
167}
168
169static inline int rcu_read_lock_bh_held(void)
170{
171 return 1;
172}
173
e6033e3b 174#ifdef CONFIG_PREEMPT
632ee200
PM
175static inline int rcu_read_lock_sched_held(void)
176{
0b1c8727 177 return preempt_count() != 0 || !rcu_scheduler_active;
632ee200 178}
e6033e3b
PM
179#else /* #ifdef CONFIG_PREEMPT */
180static inline int rcu_read_lock_sched_held(void)
181{
182 return 1;
183}
184#endif /* #else #ifdef CONFIG_PREEMPT */
632ee200
PM
185
186#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
187
188#ifdef CONFIG_PROVE_RCU
189
190/**
191 * rcu_dereference_check - rcu_dereference with debug checking
192 *
193 * Do an rcu_dereference(), but check that the context is correct.
194 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
195 * ensure that the rcu_dereference_check() executes within an RCU
196 * read-side critical section. It is also possible to check for
197 * locks being held, for example, by using lockdep_is_held().
198 */
199#define rcu_dereference_check(p, c) \
200 ({ \
0632eb3d
PM
201 if (debug_locks && !(c)) \
202 lockdep_rcu_dereference(__FILE__, __LINE__); \
c26d34a5 203 rcu_dereference_raw(p); \
632ee200
PM
204 })
205
206#else /* #ifdef CONFIG_PROVE_RCU */
207
c26d34a5 208#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
632ee200
PM
209
210#endif /* #else #ifdef CONFIG_PROVE_RCU */
bc33f24b 211
1da177e4
LT
212/**
213 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
214 *
9b06e818 215 * When synchronize_rcu() is invoked on one CPU while other CPUs
1da177e4 216 * are within RCU read-side critical sections, then the
9b06e818 217 * synchronize_rcu() is guaranteed to block until after all the other
1da177e4
LT
218 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
219 * on one CPU while other CPUs are within RCU read-side critical
220 * sections, invocation of the corresponding RCU callback is deferred
221 * until after the all the other CPUs exit their critical sections.
222 *
223 * Note, however, that RCU callbacks are permitted to run concurrently
224 * with RCU read-side critical sections. One way that this can happen
225 * is via the following sequence of events: (1) CPU 0 enters an RCU
226 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
227 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
228 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
229 * callback is invoked. This is legal, because the RCU read-side critical
230 * section that was running concurrently with the call_rcu() (and which
231 * therefore might be referencing something that the corresponding RCU
232 * callback would free up) has completed before the corresponding
233 * RCU callback is invoked.
234 *
235 * RCU read-side critical sections may be nested. Any deferred actions
236 * will be deferred until the outermost RCU read-side critical section
237 * completes.
238 *
239 * It is illegal to block while in an RCU read-side critical section.
240 */
bc33f24b
PM
241static inline void rcu_read_lock(void)
242{
243 __rcu_read_lock();
244 __acquire(RCU);
245 rcu_read_acquire();
246}
1da177e4 247
1da177e4
LT
248/*
249 * So where is rcu_write_lock()? It does not exist, as there is no
250 * way for writers to lock out RCU readers. This is a feature, not
251 * a bug -- this property is what provides RCU's performance benefits.
252 * Of course, writers must coordinate with each other. The normal
253 * spinlock primitives work well for this, but any other technique may be
254 * used as well. RCU does not care how the writers keep out of each
255 * others' way, as long as they do so.
256 */
3d76c082
PM
257
258/**
259 * rcu_read_unlock - marks the end of an RCU read-side critical section.
260 *
261 * See rcu_read_lock() for more information.
262 */
bc33f24b
PM
263static inline void rcu_read_unlock(void)
264{
265 rcu_read_release();
266 __release(RCU);
267 __rcu_read_unlock();
268}
1da177e4
LT
269
270/**
271 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
272 *
273 * This is equivalent of rcu_read_lock(), but to be used when updates
274 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
275 * consider completion of a softirq handler to be a quiescent state,
276 * a process in RCU read-side critical section must be protected by
277 * disabling softirqs. Read-side critical sections in interrupt context
278 * can use just rcu_read_lock().
279 *
280 */
bc33f24b
PM
281static inline void rcu_read_lock_bh(void)
282{
283 __rcu_read_lock_bh();
284 __acquire(RCU_BH);
632ee200 285 rcu_read_acquire_bh();
bc33f24b 286}
1da177e4
LT
287
288/*
289 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
290 *
291 * See rcu_read_lock_bh() for more information.
292 */
bc33f24b
PM
293static inline void rcu_read_unlock_bh(void)
294{
632ee200 295 rcu_read_release_bh();
bc33f24b
PM
296 __release(RCU_BH);
297 __rcu_read_unlock_bh();
298}
1da177e4 299
1c50b728
MD
300/**
301 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
302 *
303 * Should be used with either
304 * - synchronize_sched()
305 * or
306 * - call_rcu_sched() and rcu_barrier_sched()
307 * on the write-side to insure proper synchronization.
308 */
d6714c22
PM
309static inline void rcu_read_lock_sched(void)
310{
311 preempt_disable();
bc33f24b 312 __acquire(RCU_SCHED);
632ee200 313 rcu_read_acquire_sched();
d6714c22 314}
1eba8f84
PM
315
316/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
7c614d64 317static inline notrace void rcu_read_lock_sched_notrace(void)
d6714c22
PM
318{
319 preempt_disable_notrace();
bc33f24b 320 __acquire(RCU_SCHED);
d6714c22 321}
1c50b728
MD
322
323/*
324 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
325 *
326 * See rcu_read_lock_sched for more information.
327 */
d6714c22
PM
328static inline void rcu_read_unlock_sched(void)
329{
632ee200 330 rcu_read_release_sched();
bc33f24b 331 __release(RCU_SCHED);
d6714c22
PM
332 preempt_enable();
333}
1eba8f84
PM
334
335/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
7c614d64 336static inline notrace void rcu_read_unlock_sched_notrace(void)
d6714c22 337{
bc33f24b 338 __release(RCU_SCHED);
d6714c22
PM
339 preempt_enable_notrace();
340}
1c50b728
MD
341
342
1da177e4 343/**
c26d34a5
PM
344 * rcu_dereference_raw - fetch an RCU-protected pointer
345 *
346 * The caller must be within some flavor of RCU read-side critical
347 * section, or must be otherwise preventing the pointer from changing,
348 * for example, by holding an appropriate lock. This pointer may later
349 * be safely dereferenced. It is the caller's responsibility to have
350 * done the right thing, as this primitive does no checking of any kind.
1da177e4
LT
351 *
352 * Inserts memory barriers on architectures that require them
353 * (currently only the Alpha), and, more importantly, documents
354 * exactly which pointers are protected by RCU.
355 */
c26d34a5 356#define rcu_dereference_raw(p) ({ \
97b43032 357 typeof(p) _________p1 = ACCESS_ONCE(p); \
1da177e4
LT
358 smp_read_barrier_depends(); \
359 (_________p1); \
360 })
361
c26d34a5
PM
362/**
363 * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
364 *
365 * Makes rcu_dereference_check() do the dirty work.
366 */
367#define rcu_dereference(p) \
368 rcu_dereference_check(p, rcu_read_lock_held())
369
370/**
371 * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
372 *
373 * Makes rcu_dereference_check() do the dirty work.
374 */
375#define rcu_dereference_bh(p) \
376 rcu_dereference_check(p, rcu_read_lock_bh_held())
377
378/**
379 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
380 *
381 * Makes rcu_dereference_check() do the dirty work.
382 */
383#define rcu_dereference_sched(p) \
384 rcu_dereference_check(p, rcu_read_lock_sched_held())
385
1da177e4
LT
386/**
387 * rcu_assign_pointer - assign (publicize) a pointer to a newly
388 * initialized structure that will be dereferenced by RCU read-side
389 * critical sections. Returns the value assigned.
390 *
391 * Inserts memory barriers on architectures that require them
392 * (pretty much all of them other than x86), and also prevents
393 * the compiler from reordering the code that initializes the
394 * structure after the pointer assignment. More importantly, this
395 * call documents which pointers will be dereferenced by RCU read-side
396 * code.
397 */
398
d99c4f6b
PM
399#define rcu_assign_pointer(p, v) \
400 ({ \
401 if (!__builtin_constant_p(v) || \
402 ((v) != NULL)) \
403 smp_wmb(); \
404 (p) = (v); \
405 })
1da177e4 406
4446a36f
PM
407/* Infrastructure to implement the synchronize_() primitives. */
408
409struct rcu_synchronize {
410 struct rcu_head head;
411 struct completion completion;
412};
413
414extern void wakeme_after_rcu(struct rcu_head *head);
415
01c1c660
PM
416/**
417 * call_rcu - Queue an RCU callback for invocation after a grace period.
418 * @head: structure to be used for queueing the RCU updates.
419 * @func: actual update function to be invoked after the grace period
420 *
421 * The update function will be invoked some time after a full grace
422 * period elapses, in other words after all currently executing RCU
423 * read-side critical sections have completed. RCU read-side critical
424 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
425 * and may be nested.
426 */
427extern void call_rcu(struct rcu_head *head,
428 void (*func)(struct rcu_head *head));
429
430/**
431 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
432 * @head: structure to be used for queueing the RCU updates.
433 * @func: actual update function to be invoked after the grace period
434 *
435 * The update function will be invoked some time after a full grace
436 * period elapses, in other words after all currently executing RCU
437 * read-side critical sections have completed. call_rcu_bh() assumes
438 * that the read-side critical sections end on completion of a softirq
439 * handler. This means that read-side critical sections in process
440 * context must not be interrupted by softirqs. This interface is to be
441 * used when most of the read-side critical sections are in softirq context.
442 * RCU read-side critical sections are delimited by :
443 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
444 * OR
445 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
446 * These may be nested.
447 */
448extern void call_rcu_bh(struct rcu_head *head,
449 void (*func)(struct rcu_head *head));
450
1da177e4 451#endif /* __LINUX_RCUPDATE_H */