]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/rcupdate.h
rcu: Remove local_irq_disable() in rcu_preempt_note_context_switch()
[mirror_ubuntu-bionic-kernel.git] / include / linux / rcupdate.h
CommitLineData
1da177e4 1/*
a71fca58 2 * Read-Copy Update mechanism for mutual exclusion
1da177e4
LT
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
87de1cfd
PM
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
1da177e4 17 *
01c1c660 18 * Copyright IBM Corporation, 2001
1da177e4
LT
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
a71fca58 21 *
595182bc 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
1da177e4
LT
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 29 * http://lse.sourceforge.net/locking/rcupdate.html
1da177e4
LT
30 *
31 */
32
33#ifndef __LINUX_RCUPDATE_H
34#define __LINUX_RCUPDATE_H
35
99098751 36#include <linux/types.h>
1da177e4
LT
37#include <linux/cache.h>
38#include <linux/spinlock.h>
39#include <linux/threads.h>
1da177e4
LT
40#include <linux/cpumask.h>
41#include <linux/seqlock.h>
851a67b8 42#include <linux/lockdep.h>
4446a36f 43#include <linux/completion.h>
551d55a9 44#include <linux/debugobjects.h>
187f1882 45#include <linux/bug.h>
ca5ecddf 46#include <linux/compiler.h>
88c18630 47#include <asm/barrier.h>
1da177e4 48
7a754743 49extern int rcu_expedited; /* for sysctl */
e5ab6772
DY
50#ifdef CONFIG_RCU_TORTURE_TEST
51extern int rcutorture_runnable; /* for sysctl */
52#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
53
ad0dc7f9
PM
54enum rcutorture_type {
55 RCU_FLAVOR,
56 RCU_BH_FLAVOR,
57 RCU_SCHED_FLAVOR,
69c60455 58 RCU_TASKS_FLAVOR,
ad0dc7f9
PM
59 SRCU_FLAVOR,
60 INVALID_RCU_FLAVOR
61};
62
4a298656 63#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
ad0dc7f9
PM
64void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
65 unsigned long *gpnum, unsigned long *completed);
584dc4ce
TB
66void rcutorture_record_test_transition(void);
67void rcutorture_record_progress(unsigned long vernum);
68void do_trace_rcu_torture_read(const char *rcutorturename,
69 struct rcu_head *rhp,
70 unsigned long secs,
71 unsigned long c_old,
72 unsigned long c);
4a298656 73#else
ad0dc7f9
PM
74static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
75 int *flags,
76 unsigned long *gpnum,
77 unsigned long *completed)
78{
79 *flags = 0;
80 *gpnum = 0;
81 *completed = 0;
82}
4a298656
PM
83static inline void rcutorture_record_test_transition(void)
84{
85}
86static inline void rcutorture_record_progress(unsigned long vernum)
87{
88}
91afaf30 89#ifdef CONFIG_RCU_TRACE
584dc4ce
TB
90void do_trace_rcu_torture_read(const char *rcutorturename,
91 struct rcu_head *rhp,
92 unsigned long secs,
93 unsigned long c_old,
94 unsigned long c);
91afaf30 95#else
52494535
PM
96#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
97 do { } while (0)
91afaf30 98#endif
4a298656
PM
99#endif
100
e27fc964
TH
101#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
102#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b))
a3dc3fb1
PM
103#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
104#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
c0f4dfd4 105#define ulong2long(a) (*(long *)(&(a)))
a3dc3fb1 106
03b042bf 107/* Exported common interfaces */
2c42818e
PM
108
109#ifdef CONFIG_PREEMPT_RCU
110
111/**
112 * call_rcu() - Queue an RCU callback for invocation after a grace period.
113 * @head: structure to be used for queueing the RCU updates.
114 * @func: actual callback function to be invoked after the grace period
115 *
116 * The callback function will be invoked some time after a full grace
117 * period elapses, in other words after all pre-existing RCU read-side
118 * critical sections have completed. However, the callback function
119 * might well execute concurrently with RCU read-side critical sections
120 * that started after call_rcu() was invoked. RCU read-side critical
121 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
122 * and may be nested.
f0a0e6f2
PM
123 *
124 * Note that all CPUs must agree that the grace period extended beyond
125 * all pre-existing RCU read-side critical section. On systems with more
126 * than one CPU, this means that when "func()" is invoked, each CPU is
127 * guaranteed to have executed a full memory barrier since the end of its
128 * last RCU read-side critical section whose beginning preceded the call
129 * to call_rcu(). It also means that each CPU executing an RCU read-side
130 * critical section that continues beyond the start of "func()" must have
131 * executed a memory barrier after the call_rcu() but before the beginning
132 * of that RCU read-side critical section. Note that these guarantees
133 * include CPUs that are offline, idle, or executing in user mode, as
134 * well as CPUs that are executing in the kernel.
135 *
136 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
137 * resulting RCU callback function "func()", then both CPU A and CPU B are
138 * guaranteed to execute a full memory barrier during the time interval
139 * between the call to call_rcu() and the invocation of "func()" -- even
140 * if CPU A and CPU B are the same CPU (but again only if the system has
141 * more than one CPU).
2c42818e 142 */
584dc4ce
TB
143void call_rcu(struct rcu_head *head,
144 void (*func)(struct rcu_head *head));
2c42818e
PM
145
146#else /* #ifdef CONFIG_PREEMPT_RCU */
147
148/* In classic RCU, call_rcu() is just call_rcu_sched(). */
149#define call_rcu call_rcu_sched
150
151#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
152
153/**
154 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
155 * @head: structure to be used for queueing the RCU updates.
156 * @func: actual callback function to be invoked after the grace period
157 *
158 * The callback function will be invoked some time after a full grace
159 * period elapses, in other words after all currently executing RCU
160 * read-side critical sections have completed. call_rcu_bh() assumes
161 * that the read-side critical sections end on completion of a softirq
162 * handler. This means that read-side critical sections in process
163 * context must not be interrupted by softirqs. This interface is to be
164 * used when most of the read-side critical sections are in softirq context.
165 * RCU read-side critical sections are delimited by :
166 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
167 * OR
168 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
169 * These may be nested.
f0a0e6f2
PM
170 *
171 * See the description of call_rcu() for more detailed information on
172 * memory ordering guarantees.
2c42818e 173 */
584dc4ce
TB
174void call_rcu_bh(struct rcu_head *head,
175 void (*func)(struct rcu_head *head));
2c42818e
PM
176
177/**
178 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
179 * @head: structure to be used for queueing the RCU updates.
180 * @func: actual callback function to be invoked after the grace period
181 *
182 * The callback function will be invoked some time after a full grace
183 * period elapses, in other words after all currently executing RCU
184 * read-side critical sections have completed. call_rcu_sched() assumes
185 * that the read-side critical sections end on enabling of preemption
186 * or on voluntary preemption.
187 * RCU read-side critical sections are delimited by :
188 * - rcu_read_lock_sched() and rcu_read_unlock_sched(),
189 * OR
190 * anything that disables preemption.
191 * These may be nested.
f0a0e6f2
PM
192 *
193 * See the description of call_rcu() for more detailed information on
194 * memory ordering guarantees.
2c42818e 195 */
584dc4ce
TB
196void call_rcu_sched(struct rcu_head *head,
197 void (*func)(struct rcu_head *rcu));
2c42818e 198
584dc4ce 199void synchronize_sched(void);
03b042bf 200
8315f422
PM
201/**
202 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
203 * @head: structure to be used for queueing the RCU updates.
204 * @func: actual callback function to be invoked after the grace period
205 *
206 * The callback function will be invoked some time after a full grace
207 * period elapses, in other words after all currently executing RCU
208 * read-side critical sections have completed. call_rcu_tasks() assumes
209 * that the read-side critical sections end at a voluntary context
210 * switch (not a preemption!), entry into idle, or transition to usermode
211 * execution. As such, there are no read-side primitives analogous to
212 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
213 * to determine that all tasks have passed through a safe state, not so
214 * much for data-strcuture synchronization.
215 *
216 * See the description of call_rcu() for more detailed information on
217 * memory ordering guarantees.
218 */
219void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
53c6d4ed
PM
220void synchronize_rcu_tasks(void);
221void rcu_barrier_tasks(void);
8315f422 222
a3dc3fb1
PM
223#ifdef CONFIG_PREEMPT_RCU
224
584dc4ce
TB
225void __rcu_read_lock(void);
226void __rcu_read_unlock(void);
227void rcu_read_unlock_special(struct task_struct *t);
7b0b759b
PM
228void synchronize_rcu(void);
229
a3dc3fb1
PM
230/*
231 * Defined as a macro as it is a very low level header included from
232 * areas that don't even know about current. This gives the rcu_read_lock()
233 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
234 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
235 */
236#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
237
7b0b759b
PM
238#else /* #ifdef CONFIG_PREEMPT_RCU */
239
240static inline void __rcu_read_lock(void)
241{
242 preempt_disable();
243}
244
245static inline void __rcu_read_unlock(void)
246{
247 preempt_enable();
248}
249
250static inline void synchronize_rcu(void)
251{
252 synchronize_sched();
253}
254
255static inline int rcu_preempt_depth(void)
256{
257 return 0;
258}
259
260#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
261
262/* Internal to kernel */
584dc4ce
TB
263void rcu_init(void);
264void rcu_sched_qs(int cpu);
265void rcu_bh_qs(int cpu);
266void rcu_check_callbacks(int cpu, int user);
7b0b759b 267struct notifier_block;
584dc4ce
TB
268void rcu_idle_enter(void);
269void rcu_idle_exit(void);
270void rcu_irq_enter(void);
271void rcu_irq_exit(void);
2b1d5024 272
61f38db3
RR
273#ifdef CONFIG_RCU_STALL_COMMON
274void rcu_sysrq_start(void);
275void rcu_sysrq_end(void);
276#else /* #ifdef CONFIG_RCU_STALL_COMMON */
277static inline void rcu_sysrq_start(void)
278{
279}
280static inline void rcu_sysrq_end(void)
281{
282}
283#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
284
2b1d5024 285#ifdef CONFIG_RCU_USER_QS
584dc4ce
TB
286void rcu_user_enter(void);
287void rcu_user_exit(void);
2b1d5024
FW
288#else
289static inline void rcu_user_enter(void) { }
290static inline void rcu_user_exit(void) { }
4d9a5d43
FW
291static inline void rcu_user_hooks_switch(struct task_struct *prev,
292 struct task_struct *next) { }
2b1d5024
FW
293#endif /* CONFIG_RCU_USER_QS */
294
8a2ecf47
PM
295/**
296 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
297 * @a: Code that RCU needs to pay attention to.
298 *
299 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
300 * in the inner idle loop, that is, between the rcu_idle_enter() and
301 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
302 * critical sections. However, things like powertop need tracepoints
303 * in the inner idle loop.
304 *
305 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
306 * will tell RCU that it needs to pay attending, invoke its argument
307 * (in this example, a call to the do_something_with_RCU() function),
308 * and then tell RCU to go back to ignoring this CPU. It is permissible
309 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
310 * quite limited. If deeper nesting is required, it will be necessary
311 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
8a2ecf47
PM
312 */
313#define RCU_NONIDLE(a) \
314 do { \
b4270ee3 315 rcu_irq_enter(); \
8a2ecf47 316 do { a; } while (0); \
b4270ee3 317 rcu_irq_exit(); \
8a2ecf47
PM
318 } while (0)
319
8315f422
PM
320/*
321 * Note a voluntary context switch for RCU-tasks benefit. This is a
322 * macro rather than an inline function to avoid #include hell.
323 */
324#ifdef CONFIG_TASKS_RCU
3f95aa81
PM
325#define TASKS_RCU(x) x
326extern struct srcu_struct tasks_rcu_exit_srcu;
8315f422
PM
327#define rcu_note_voluntary_context_switch(t) \
328 do { \
8315f422
PM
329 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
330 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
8315f422
PM
331 } while (0)
332#else /* #ifdef CONFIG_TASKS_RCU */
3f95aa81 333#define TASKS_RCU(x) do { } while (0)
8315f422
PM
334#define rcu_note_voluntary_context_switch(t) do { } while (0)
335#endif /* #else #ifdef CONFIG_TASKS_RCU */
336
bde6c3aa
PM
337/**
338 * cond_resched_rcu_qs - Report potential quiescent states to RCU
339 *
340 * This macro resembles cond_resched(), except that it is defined to
341 * report potential quiescent states to RCU-tasks even if the cond_resched()
342 * machinery were to be shut off, as some advocate for PREEMPT kernels.
343 */
344#define cond_resched_rcu_qs() \
345do { \
346 rcu_note_voluntary_context_switch(current); \
347 cond_resched(); \
348} while (0)
349
cc6783f7 350#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
584dc4ce 351bool __rcu_is_watching(void);
cc6783f7
PM
352#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
353
2c42818e
PM
354/*
355 * Infrastructure to implement the synchronize_() primitives in
356 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
357 */
358
359typedef void call_rcu_func_t(struct rcu_head *head,
360 void (*func)(struct rcu_head *head));
361void wait_rcu_gp(call_rcu_func_t crf);
362
f41d911f 363#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
64db4cff 364#include <linux/rcutree.h>
127781d1 365#elif defined(CONFIG_TINY_RCU)
9b1d82fa 366#include <linux/rcutiny.h>
64db4cff
PM
367#else
368#error "Unknown RCU implementation specified to kernel configuration"
6b3ef48a 369#endif
01c1c660 370
551d55a9
MD
371/*
372 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
373 * initialization and destruction of rcu_head on the stack. rcu_head structures
374 * allocated dynamically in the heap or defined statically don't need any
375 * initialization.
376 */
377#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
546a9d85
PM
378void init_rcu_head(struct rcu_head *head);
379void destroy_rcu_head(struct rcu_head *head);
584dc4ce
TB
380void init_rcu_head_on_stack(struct rcu_head *head);
381void destroy_rcu_head_on_stack(struct rcu_head *head);
551d55a9 382#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
546a9d85
PM
383static inline void init_rcu_head(struct rcu_head *head)
384{
385}
386
387static inline void destroy_rcu_head(struct rcu_head *head)
388{
389}
390
4376030a
MD
391static inline void init_rcu_head_on_stack(struct rcu_head *head)
392{
393}
394
395static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
396{
397}
551d55a9 398#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
4376030a 399
c0d6d01b
PM
400#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
401bool rcu_lockdep_current_cpu_online(void);
402#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
403static inline bool rcu_lockdep_current_cpu_online(void)
404{
405 return 1;
406}
407#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
408
bc33f24b 409#ifdef CONFIG_DEBUG_LOCK_ALLOC
632ee200 410
00f49e57
FW
411static inline void rcu_lock_acquire(struct lockdep_map *map)
412{
fb9edbe9 413 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
00f49e57
FW
414}
415
416static inline void rcu_lock_release(struct lockdep_map *map)
417{
00f49e57
FW
418 lock_release(map, 1, _THIS_IP_);
419}
420
bc33f24b 421extern struct lockdep_map rcu_lock_map;
632ee200 422extern struct lockdep_map rcu_bh_lock_map;
632ee200 423extern struct lockdep_map rcu_sched_lock_map;
24ef659a 424extern struct lockdep_map rcu_callback_map;
a235c091 425int debug_lockdep_rcu_enabled(void);
54dbf96c 426
632ee200 427/**
ca5ecddf 428 * rcu_read_lock_held() - might we be in RCU read-side critical section?
632ee200 429 *
d20200b5
PM
430 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
431 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
632ee200 432 * this assumes we are in an RCU read-side critical section unless it can
ca5ecddf
PM
433 * prove otherwise. This is useful for debug checks in functions that
434 * require that they be called within an RCU read-side critical section.
54dbf96c 435 *
ca5ecddf 436 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
32c141a0 437 * and while lockdep is disabled.
3842a083
PM
438 *
439 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
440 * occur in the same context, for example, it is illegal to invoke
441 * rcu_read_unlock() in process context if the matching rcu_read_lock()
442 * was invoked from within an irq handler.
c0d6d01b
PM
443 *
444 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
445 * offline from an RCU perspective, so check for those as well.
632ee200
PM
446 */
447static inline int rcu_read_lock_held(void)
448{
54dbf96c
PM
449 if (!debug_lockdep_rcu_enabled())
450 return 1;
5c173eb8 451 if (!rcu_is_watching())
e6b80a3b 452 return 0;
c0d6d01b
PM
453 if (!rcu_lockdep_current_cpu_online())
454 return 0;
54dbf96c 455 return lock_is_held(&rcu_lock_map);
632ee200
PM
456}
457
e3818b8d
PM
458/*
459 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
460 * hell.
632ee200 461 */
584dc4ce 462int rcu_read_lock_bh_held(void);
632ee200
PM
463
464/**
ca5ecddf 465 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
632ee200 466 *
d20200b5
PM
467 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
468 * RCU-sched read-side critical section. In absence of
469 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
470 * critical section unless it can prove otherwise. Note that disabling
471 * of preemption (including disabling irqs) counts as an RCU-sched
ca5ecddf
PM
472 * read-side critical section. This is useful for debug checks in functions
473 * that required that they be called within an RCU-sched read-side
474 * critical section.
54dbf96c 475 *
32c141a0
PM
476 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
477 * and while lockdep is disabled.
e6b80a3b
FW
478 *
479 * Note that if the CPU is in the idle loop from an RCU point of
480 * view (ie: that we are in the section between rcu_idle_enter() and
481 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
482 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
483 * that are in such a section, considering these as in extended quiescent
484 * state, so such a CPU is effectively never in an RCU read-side critical
485 * section regardless of what RCU primitives it invokes. This state of
486 * affairs is required --- we need to keep an RCU-free window in idle
487 * where the CPU may possibly enter into low power mode. This way we can
488 * notice an extended quiescent state to other CPUs that started a grace
489 * period. Otherwise we would delay any grace period as long as we run in
490 * the idle task.
c0d6d01b
PM
491 *
492 * Similarly, we avoid claiming an SRCU read lock held if the current
493 * CPU is offline.
632ee200 494 */
bdd4e85d 495#ifdef CONFIG_PREEMPT_COUNT
632ee200
PM
496static inline int rcu_read_lock_sched_held(void)
497{
498 int lockdep_opinion = 0;
499
54dbf96c
PM
500 if (!debug_lockdep_rcu_enabled())
501 return 1;
5c173eb8 502 if (!rcu_is_watching())
e6b80a3b 503 return 0;
c0d6d01b
PM
504 if (!rcu_lockdep_current_cpu_online())
505 return 0;
632ee200
PM
506 if (debug_locks)
507 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
0cff810f 508 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
632ee200 509}
bdd4e85d 510#else /* #ifdef CONFIG_PREEMPT_COUNT */
e6033e3b
PM
511static inline int rcu_read_lock_sched_held(void)
512{
513 return 1;
632ee200 514}
bdd4e85d 515#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
632ee200
PM
516
517#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
518
d8ab29f8
PM
519# define rcu_lock_acquire(a) do { } while (0)
520# define rcu_lock_release(a) do { } while (0)
632ee200
PM
521
522static inline int rcu_read_lock_held(void)
523{
524 return 1;
525}
526
527static inline int rcu_read_lock_bh_held(void)
528{
529 return 1;
530}
531
bdd4e85d 532#ifdef CONFIG_PREEMPT_COUNT
632ee200
PM
533static inline int rcu_read_lock_sched_held(void)
534{
bbad9379 535 return preempt_count() != 0 || irqs_disabled();
632ee200 536}
bdd4e85d 537#else /* #ifdef CONFIG_PREEMPT_COUNT */
e6033e3b
PM
538static inline int rcu_read_lock_sched_held(void)
539{
540 return 1;
632ee200 541}
bdd4e85d 542#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
632ee200
PM
543
544#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
545
546#ifdef CONFIG_PROVE_RCU
547
4221a991
TH
548/**
549 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
550 * @c: condition to check
b3fbab05 551 * @s: informative message
4221a991 552 */
b3fbab05 553#define rcu_lockdep_assert(c, s) \
2b3fc35f 554 do { \
7ccaba53 555 static bool __section(.data.unlikely) __warned; \
2b3fc35f
LJ
556 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
557 __warned = true; \
b3fbab05 558 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
2b3fc35f
LJ
559 } \
560 } while (0)
561
50406b98
PM
562#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
563static inline void rcu_preempt_sleep_check(void)
564{
565 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
5cf05ad7 566 "Illegal context switch in RCU read-side critical section");
50406b98
PM
567}
568#else /* #ifdef CONFIG_PROVE_RCU */
569static inline void rcu_preempt_sleep_check(void)
570{
571}
572#endif /* #else #ifdef CONFIG_PROVE_RCU */
573
b3fbab05
PM
574#define rcu_sleep_check() \
575 do { \
50406b98 576 rcu_preempt_sleep_check(); \
b3fbab05 577 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
41f4abd9 578 "Illegal context switch in RCU-bh read-side critical section"); \
b3fbab05 579 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
41f4abd9 580 "Illegal context switch in RCU-sched read-side critical section"); \
b3fbab05
PM
581 } while (0)
582
ca5ecddf
PM
583#else /* #ifdef CONFIG_PROVE_RCU */
584
b3fbab05
PM
585#define rcu_lockdep_assert(c, s) do { } while (0)
586#define rcu_sleep_check() do { } while (0)
ca5ecddf
PM
587
588#endif /* #else #ifdef CONFIG_PROVE_RCU */
589
590/*
591 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
592 * and rcu_assign_pointer(). Some of these could be folded into their
593 * callers, but they are left separate in order to ease introduction of
594 * multiple flavors of pointers to match the multiple flavors of RCU
595 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
596 * the future.
597 */
53ecfba2
PM
598
599#ifdef __CHECKER__
600#define rcu_dereference_sparse(p, space) \
601 ((void)(((typeof(*p) space *)p) == p))
602#else /* #ifdef __CHECKER__ */
603#define rcu_dereference_sparse(p, space)
604#endif /* #else #ifdef __CHECKER__ */
605
ca5ecddf 606#define __rcu_access_pointer(p, space) \
0adab9b9
JP
607({ \
608 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
609 rcu_dereference_sparse(p, space); \
610 ((typeof(*p) __force __kernel *)(_________p1)); \
611})
ca5ecddf 612#define __rcu_dereference_check(p, c, space) \
0adab9b9
JP
613({ \
614 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
615 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
616 rcu_dereference_sparse(p, space); \
617 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
618 ((typeof(*p) __force __kernel *)(_________p1)); \
619})
ca5ecddf 620#define __rcu_dereference_protected(p, c, space) \
0adab9b9
JP
621({ \
622 rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
623 rcu_dereference_sparse(p, space); \
624 ((typeof(*p) __force __kernel *)(p)); \
625})
ca5ecddf 626
a4dd9925 627#define __rcu_access_index(p, space) \
0adab9b9
JP
628({ \
629 typeof(p) _________p1 = ACCESS_ONCE(p); \
630 rcu_dereference_sparse(p, space); \
631 (_________p1); \
632})
ca5ecddf 633#define __rcu_dereference_index_check(p, c) \
0adab9b9
JP
634({ \
635 typeof(p) _________p1 = ACCESS_ONCE(p); \
636 rcu_lockdep_assert(c, \
637 "suspicious rcu_dereference_index_check() usage"); \
638 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
639 (_________p1); \
640})
462225ae
PM
641
642/**
643 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
644 * @v: The value to statically initialize with.
645 */
646#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
647
648/**
649 * rcu_assign_pointer() - assign to RCU-protected pointer
650 * @p: pointer to assign to
651 * @v: value to assign (publish)
652 *
653 * Assigns the specified value to the specified RCU-protected
654 * pointer, ensuring that any concurrent RCU readers will see
655 * any prior initialization.
656 *
657 * Inserts memory barriers on architectures that require them
658 * (which is most of them), and also prevents the compiler from
659 * reordering the code that initializes the structure after the pointer
660 * assignment. More importantly, this call documents which pointers
661 * will be dereferenced by RCU read-side code.
662 *
663 * In some special cases, you may use RCU_INIT_POINTER() instead
664 * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
665 * to the fact that it does not constrain either the CPU or the compiler.
666 * That said, using RCU_INIT_POINTER() when you should have used
667 * rcu_assign_pointer() is a very bad thing that results in
668 * impossible-to-diagnose memory corruption. So please be careful.
669 * See the RCU_INIT_POINTER() comment header for details.
670 *
671 * Note that rcu_assign_pointer() evaluates each of its arguments only
672 * once, appearances notwithstanding. One of the "extra" evaluations
673 * is in typeof() and the other visible only to sparse (__CHECKER__),
674 * neither of which actually execute the argument. As with most cpp
675 * macros, this execute-arguments-only-once property is important, so
676 * please be careful when making changes to rcu_assign_pointer() and the
677 * other macros that it invokes.
678 */
88c18630 679#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
ca5ecddf
PM
680
681/**
682 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
683 * @p: The pointer to read
684 *
685 * Return the value of the specified RCU-protected pointer, but omit the
686 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
687 * when the value of this pointer is accessed, but the pointer is not
688 * dereferenced, for example, when testing an RCU-protected pointer against
689 * NULL. Although rcu_access_pointer() may also be used in cases where
690 * update-side locks prevent the value of the pointer from changing, you
691 * should instead use rcu_dereference_protected() for this use case.
5e1ee6e1
PM
692 *
693 * It is also permissible to use rcu_access_pointer() when read-side
694 * access to the pointer was removed at least one grace period ago, as
695 * is the case in the context of the RCU callback that is freeing up
696 * the data, or after a synchronize_rcu() returns. This can be useful
697 * when tearing down multi-linked structures after a grace period
698 * has elapsed.
ca5ecddf
PM
699 */
700#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
701
632ee200 702/**
ca5ecddf 703 * rcu_dereference_check() - rcu_dereference with debug checking
c08c68dd
DH
704 * @p: The pointer to read, prior to dereferencing
705 * @c: The conditions under which the dereference will take place
632ee200 706 *
c08c68dd 707 * Do an rcu_dereference(), but check that the conditions under which the
ca5ecddf
PM
708 * dereference will take place are correct. Typically the conditions
709 * indicate the various locking conditions that should be held at that
710 * point. The check should return true if the conditions are satisfied.
711 * An implicit check for being in an RCU read-side critical section
712 * (rcu_read_lock()) is included.
c08c68dd
DH
713 *
714 * For example:
715 *
ca5ecddf 716 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
c08c68dd
DH
717 *
718 * could be used to indicate to lockdep that foo->bar may only be dereferenced
ca5ecddf 719 * if either rcu_read_lock() is held, or that the lock required to replace
c08c68dd
DH
720 * the bar struct at foo->bar is held.
721 *
722 * Note that the list of conditions may also include indications of when a lock
723 * need not be held, for example during initialisation or destruction of the
724 * target struct:
725 *
ca5ecddf 726 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
c08c68dd 727 * atomic_read(&foo->usage) == 0);
ca5ecddf
PM
728 *
729 * Inserts memory barriers on architectures that require them
730 * (currently only the Alpha), prevents the compiler from refetching
731 * (and from merging fetches), and, more importantly, documents exactly
732 * which pointers are protected by RCU and checks that the pointer is
733 * annotated as __rcu.
632ee200
PM
734 */
735#define rcu_dereference_check(p, c) \
ca5ecddf
PM
736 __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
737
738/**
739 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
740 * @p: The pointer to read, prior to dereferencing
741 * @c: The conditions under which the dereference will take place
742 *
743 * This is the RCU-bh counterpart to rcu_dereference_check().
744 */
745#define rcu_dereference_bh_check(p, c) \
746 __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
632ee200 747
b62730ba 748/**
ca5ecddf
PM
749 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
750 * @p: The pointer to read, prior to dereferencing
751 * @c: The conditions under which the dereference will take place
752 *
753 * This is the RCU-sched counterpart to rcu_dereference_check().
754 */
755#define rcu_dereference_sched_check(p, c) \
756 __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
757 __rcu)
758
759#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
760
12bcbe66
SR
761/*
762 * The tracing infrastructure traces RCU (we want that), but unfortunately
763 * some of the RCU checks causes tracing to lock up the system.
764 *
765 * The tracing version of rcu_dereference_raw() must not call
766 * rcu_read_lock_held().
767 */
768#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
769
a4dd9925
PM
770/**
771 * rcu_access_index() - fetch RCU index with no dereferencing
772 * @p: The index to read
773 *
774 * Return the value of the specified RCU-protected index, but omit the
775 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
776 * when the value of this index is accessed, but the index is not
777 * dereferenced, for example, when testing an RCU-protected index against
778 * -1. Although rcu_access_index() may also be used in cases where
779 * update-side locks prevent the value of the index from changing, you
780 * should instead use rcu_dereference_index_protected() for this use case.
781 */
782#define rcu_access_index(p) __rcu_access_index((p), __rcu)
783
ca5ecddf
PM
784/**
785 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
786 * @p: The pointer to read, prior to dereferencing
787 * @c: The conditions under which the dereference will take place
788 *
789 * Similar to rcu_dereference_check(), but omits the sparse checking.
790 * This allows rcu_dereference_index_check() to be used on integers,
791 * which can then be used as array indices. Attempting to use
792 * rcu_dereference_check() on an integer will give compiler warnings
793 * because the sparse address-space mechanism relies on dereferencing
794 * the RCU-protected pointer. Dereferencing integers is not something
795 * that even gcc will put up with.
796 *
797 * Note that this function does not implicitly check for RCU read-side
798 * critical sections. If this function gains lots of uses, it might
799 * make sense to provide versions for each flavor of RCU, but it does
800 * not make sense as of early 2010.
801 */
802#define rcu_dereference_index_check(p, c) \
803 __rcu_dereference_index_check((p), (c))
804
805/**
806 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
807 * @p: The pointer to read, prior to dereferencing
808 * @c: The conditions under which the dereference will take place
b62730ba
PM
809 *
810 * Return the value of the specified RCU-protected pointer, but omit
811 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
812 * is useful in cases where update-side locks prevent the value of the
813 * pointer from changing. Please note that this primitive does -not-
814 * prevent the compiler from repeating this reference or combining it
815 * with other references, so it should not be used without protection
816 * of appropriate locks.
ca5ecddf
PM
817 *
818 * This function is only for update-side use. Using this function
819 * when protected only by rcu_read_lock() will result in infrequent
820 * but very ugly failures.
b62730ba
PM
821 */
822#define rcu_dereference_protected(p, c) \
ca5ecddf 823 __rcu_dereference_protected((p), (c), __rcu)
b62730ba 824
bc33f24b 825
b62730ba 826/**
ca5ecddf
PM
827 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
828 * @p: The pointer to read, prior to dereferencing
b62730ba 829 *
ca5ecddf 830 * This is a simple wrapper around rcu_dereference_check().
b62730ba 831 */
ca5ecddf 832#define rcu_dereference(p) rcu_dereference_check(p, 0)
b62730ba 833
1da177e4 834/**
ca5ecddf
PM
835 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
836 * @p: The pointer to read, prior to dereferencing
837 *
838 * Makes rcu_dereference_check() do the dirty work.
839 */
840#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
841
842/**
843 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
844 * @p: The pointer to read, prior to dereferencing
845 *
846 * Makes rcu_dereference_check() do the dirty work.
847 */
848#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
849
850/**
851 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
1da177e4 852 *
9b06e818 853 * When synchronize_rcu() is invoked on one CPU while other CPUs
1da177e4 854 * are within RCU read-side critical sections, then the
9b06e818 855 * synchronize_rcu() is guaranteed to block until after all the other
1da177e4
LT
856 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
857 * on one CPU while other CPUs are within RCU read-side critical
858 * sections, invocation of the corresponding RCU callback is deferred
859 * until after the all the other CPUs exit their critical sections.
860 *
861 * Note, however, that RCU callbacks are permitted to run concurrently
77d8485a 862 * with new RCU read-side critical sections. One way that this can happen
1da177e4
LT
863 * is via the following sequence of events: (1) CPU 0 enters an RCU
864 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
865 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
866 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
867 * callback is invoked. This is legal, because the RCU read-side critical
868 * section that was running concurrently with the call_rcu() (and which
869 * therefore might be referencing something that the corresponding RCU
870 * callback would free up) has completed before the corresponding
871 * RCU callback is invoked.
872 *
873 * RCU read-side critical sections may be nested. Any deferred actions
874 * will be deferred until the outermost RCU read-side critical section
875 * completes.
876 *
9079fd7c
PM
877 * You can avoid reading and understanding the next paragraph by
878 * following this rule: don't put anything in an rcu_read_lock() RCU
879 * read-side critical section that would block in a !PREEMPT kernel.
880 * But if you want the full story, read on!
881 *
ab74fdfd
PM
882 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
883 * it is illegal to block while in an RCU read-side critical section.
884 * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
885 * kernel builds, RCU read-side critical sections may be preempted,
886 * but explicit blocking is illegal. Finally, in preemptible RCU
887 * implementations in real-time (with -rt patchset) kernel builds, RCU
888 * read-side critical sections may be preempted and they may also block, but
889 * only when acquiring spinlocks that are subject to priority inheritance.
1da177e4 890 */
bc33f24b
PM
891static inline void rcu_read_lock(void)
892{
893 __rcu_read_lock();
894 __acquire(RCU);
d8ab29f8 895 rcu_lock_acquire(&rcu_lock_map);
5c173eb8 896 rcu_lockdep_assert(rcu_is_watching(),
bde23c68 897 "rcu_read_lock() used illegally while idle");
bc33f24b 898}
1da177e4 899
1da177e4
LT
900/*
901 * So where is rcu_write_lock()? It does not exist, as there is no
902 * way for writers to lock out RCU readers. This is a feature, not
903 * a bug -- this property is what provides RCU's performance benefits.
904 * Of course, writers must coordinate with each other. The normal
905 * spinlock primitives work well for this, but any other technique may be
906 * used as well. RCU does not care how the writers keep out of each
907 * others' way, as long as they do so.
908 */
3d76c082
PM
909
910/**
ca5ecddf 911 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
3d76c082 912 *
f27bc487
PM
913 * In most situations, rcu_read_unlock() is immune from deadlock.
914 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
915 * is responsible for deboosting, which it does via rt_mutex_unlock().
916 * Unfortunately, this function acquires the scheduler's runqueue and
917 * priority-inheritance spinlocks. This means that deadlock could result
918 * if the caller of rcu_read_unlock() already holds one of these locks or
919 * any lock that is ever acquired while holding them.
920 *
921 * That said, RCU readers are never priority boosted unless they were
922 * preempted. Therefore, one way to avoid deadlock is to make sure
923 * that preemption never happens within any RCU read-side critical
924 * section whose outermost rcu_read_unlock() is called with one of
925 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
926 * a number of ways, for example, by invoking preempt_disable() before
927 * critical section's outermost rcu_read_lock().
928 *
929 * Given that the set of locks acquired by rt_mutex_unlock() might change
930 * at any time, a somewhat more future-proofed approach is to make sure
931 * that that preemption never happens within any RCU read-side critical
932 * section whose outermost rcu_read_unlock() is called with irqs disabled.
933 * This approach relies on the fact that rt_mutex_unlock() currently only
934 * acquires irq-disabled locks.
935 *
936 * The second of these two approaches is best in most situations,
937 * however, the first approach can also be useful, at least to those
938 * developers willing to keep abreast of the set of locks acquired by
939 * rt_mutex_unlock().
940 *
3d76c082
PM
941 * See rcu_read_lock() for more information.
942 */
bc33f24b
PM
943static inline void rcu_read_unlock(void)
944{
5c173eb8 945 rcu_lockdep_assert(rcu_is_watching(),
bde23c68 946 "rcu_read_unlock() used illegally while idle");
d8ab29f8 947 rcu_lock_release(&rcu_lock_map);
bc33f24b
PM
948 __release(RCU);
949 __rcu_read_unlock();
950}
1da177e4
LT
951
952/**
ca5ecddf 953 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
1da177e4
LT
954 *
955 * This is equivalent of rcu_read_lock(), but to be used when updates
ca5ecddf
PM
956 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
957 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
958 * softirq handler to be a quiescent state, a process in RCU read-side
959 * critical section must be protected by disabling softirqs. Read-side
960 * critical sections in interrupt context can use just rcu_read_lock(),
961 * though this should at least be commented to avoid confusing people
962 * reading the code.
3842a083
PM
963 *
964 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
965 * must occur in the same context, for example, it is illegal to invoke
966 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
967 * was invoked from some other task.
1da177e4 968 */
bc33f24b
PM
969static inline void rcu_read_lock_bh(void)
970{
6206ab9b 971 local_bh_disable();
bc33f24b 972 __acquire(RCU_BH);
d8ab29f8 973 rcu_lock_acquire(&rcu_bh_lock_map);
5c173eb8 974 rcu_lockdep_assert(rcu_is_watching(),
bde23c68 975 "rcu_read_lock_bh() used illegally while idle");
bc33f24b 976}
1da177e4
LT
977
978/*
979 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
980 *
981 * See rcu_read_lock_bh() for more information.
982 */
bc33f24b
PM
983static inline void rcu_read_unlock_bh(void)
984{
5c173eb8 985 rcu_lockdep_assert(rcu_is_watching(),
bde23c68 986 "rcu_read_unlock_bh() used illegally while idle");
d8ab29f8 987 rcu_lock_release(&rcu_bh_lock_map);
bc33f24b 988 __release(RCU_BH);
6206ab9b 989 local_bh_enable();
bc33f24b 990}
1da177e4 991
1c50b728 992/**
ca5ecddf 993 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
1c50b728 994 *
ca5ecddf
PM
995 * This is equivalent of rcu_read_lock(), but to be used when updates
996 * are being done using call_rcu_sched() or synchronize_rcu_sched().
997 * Read-side critical sections can also be introduced by anything that
998 * disables preemption, including local_irq_disable() and friends.
3842a083
PM
999 *
1000 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
1001 * must occur in the same context, for example, it is illegal to invoke
1002 * rcu_read_unlock_sched() from process context if the matching
1003 * rcu_read_lock_sched() was invoked from an NMI handler.
1c50b728 1004 */
d6714c22
PM
1005static inline void rcu_read_lock_sched(void)
1006{
1007 preempt_disable();
bc33f24b 1008 __acquire(RCU_SCHED);
d8ab29f8 1009 rcu_lock_acquire(&rcu_sched_lock_map);
5c173eb8 1010 rcu_lockdep_assert(rcu_is_watching(),
bde23c68 1011 "rcu_read_lock_sched() used illegally while idle");
d6714c22 1012}
1eba8f84
PM
1013
1014/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
7c614d64 1015static inline notrace void rcu_read_lock_sched_notrace(void)
d6714c22
PM
1016{
1017 preempt_disable_notrace();
bc33f24b 1018 __acquire(RCU_SCHED);
d6714c22 1019}
1c50b728
MD
1020
1021/*
1022 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
1023 *
1024 * See rcu_read_lock_sched for more information.
1025 */
d6714c22
PM
1026static inline void rcu_read_unlock_sched(void)
1027{
5c173eb8 1028 rcu_lockdep_assert(rcu_is_watching(),
bde23c68 1029 "rcu_read_unlock_sched() used illegally while idle");
d8ab29f8 1030 rcu_lock_release(&rcu_sched_lock_map);
bc33f24b 1031 __release(RCU_SCHED);
d6714c22
PM
1032 preempt_enable();
1033}
1eba8f84
PM
1034
1035/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
7c614d64 1036static inline notrace void rcu_read_unlock_sched_notrace(void)
d6714c22 1037{
bc33f24b 1038 __release(RCU_SCHED);
d6714c22
PM
1039 preempt_enable_notrace();
1040}
1c50b728 1041
ca5ecddf
PM
1042/**
1043 * RCU_INIT_POINTER() - initialize an RCU protected pointer
1044 *
6846c0c5
PM
1045 * Initialize an RCU-protected pointer in special cases where readers
1046 * do not need ordering constraints on the CPU or the compiler. These
1047 * special cases are:
1048 *
1049 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
1050 * 2. The caller has taken whatever steps are required to prevent
1051 * RCU readers from concurrently accessing this pointer -or-
1052 * 3. The referenced data structure has already been exposed to
1053 * readers either at compile time or via rcu_assign_pointer() -and-
1054 * a. You have not made -any- reader-visible changes to
1055 * this structure since then -or-
1056 * b. It is OK for readers accessing this structure from its
1057 * new location to see the old state of the structure. (For
1058 * example, the changes were to statistical counters or to
1059 * other state where exact synchronization is not required.)
1060 *
1061 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
1062 * result in impossible-to-diagnose memory corruption. As in the structures
1063 * will look OK in crash dumps, but any concurrent RCU readers might
1064 * see pre-initialized values of the referenced data structure. So
1065 * please be very careful how you use RCU_INIT_POINTER()!!!
1066 *
1067 * If you are creating an RCU-protected linked structure that is accessed
1068 * by a single external-to-structure RCU-protected pointer, then you may
1069 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
1070 * pointers, but you must use rcu_assign_pointer() to initialize the
1071 * external-to-structure pointer -after- you have completely initialized
1072 * the reader-accessible portions of the linked structure.
71a9b269
PM
1073 *
1074 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
1075 * ordering guarantees for either the CPU or the compiler.
ca5ecddf
PM
1076 */
1077#define RCU_INIT_POINTER(p, v) \
d1b88eb9 1078 do { \
462225ae 1079 p = RCU_INITIALIZER(v); \
d1b88eb9 1080 } while (0)
9ab1544e 1081
172708d0
PM
1082/**
1083 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
1084 *
1085 * GCC-style initialization for an RCU-protected pointer in a structure field.
1086 */
1087#define RCU_POINTER_INITIALIZER(p, v) \
462225ae 1088 .p = RCU_INITIALIZER(v)
9ab1544e 1089
d8169d4c
JE
1090/*
1091 * Does the specified offset indicate that the corresponding rcu_head
1092 * structure can be handled by kfree_rcu()?
1093 */
1094#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
1095
1096/*
1097 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
1098 */
1099#define __kfree_rcu(head, offset) \
1100 do { \
1101 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
4fa3b6cb 1102 kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
d8169d4c
JE
1103 } while (0)
1104
9ab1544e
LJ
1105/**
1106 * kfree_rcu() - kfree an object after a grace period.
1107 * @ptr: pointer to kfree
1108 * @rcu_head: the name of the struct rcu_head within the type of @ptr.
1109 *
1110 * Many rcu callbacks functions just call kfree() on the base structure.
1111 * These functions are trivial, but their size adds up, and furthermore
1112 * when they are used in a kernel module, that module must invoke the
1113 * high-latency rcu_barrier() function at module-unload time.
1114 *
1115 * The kfree_rcu() function handles this issue. Rather than encoding a
1116 * function address in the embedded rcu_head structure, kfree_rcu() instead
1117 * encodes the offset of the rcu_head structure within the base structure.
1118 * Because the functions are not allowed in the low-order 4096 bytes of
1119 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
1120 * If the offset is larger than 4095 bytes, a compile-time error will
1121 * be generated in __kfree_rcu(). If this error is triggered, you can
1122 * either fall back to use of call_rcu() or rearrange the structure to
1123 * position the rcu_head structure into the first 4096 bytes.
1124 *
1125 * Note that the allowable offset might decrease in the future, for example,
1126 * to allow something like kmem_cache_free_rcu().
d8169d4c
JE
1127 *
1128 * The BUILD_BUG_ON check must not involve any function calls, hence the
1129 * checks are done in macros here.
9ab1544e
LJ
1130 */
1131#define kfree_rcu(ptr, rcu_head) \
1132 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1133
ffa83fb5
PM
1134#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
1135static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1136{
1137 *delta_jiffies = ULONG_MAX;
1138 return 0;
1139}
1140#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
1141
2f33b512
PM
1142#if defined(CONFIG_RCU_NOCB_CPU_ALL)
1143static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
1144#elif defined(CONFIG_RCU_NOCB_CPU)
584dc4ce 1145bool rcu_is_nocb_cpu(int cpu);
d1e43fa5
FW
1146#else
1147static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
2f33b512 1148#endif
d1e43fa5
FW
1149
1150
0edd1b17
PM
1151/* Only for use by adaptive-ticks code. */
1152#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
584dc4ce
TB
1153bool rcu_sys_is_idle(void);
1154void rcu_sysidle_force_exit(void);
0edd1b17
PM
1155#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1156
1157static inline bool rcu_sys_is_idle(void)
1158{
1159 return false;
1160}
1161
1162static inline void rcu_sysidle_force_exit(void)
1163{
1164}
1165
1166#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1167
1168
1da177e4 1169#endif /* __LINUX_RCUPDATE_H */