]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - include/linux/srcu.h
rcu: Warn when srcu_read_lock() is used in an extended quiescent state
[mirror_ubuntu-focal-kernel.git] / include / linux / srcu.h
CommitLineData
621934ee
PM
1/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU/ *.txt
24 *
25 */
26
eabc0694
AS
27#ifndef _LINUX_SRCU_H
28#define _LINUX_SRCU_H
29
d14aada8 30#include <linux/mutex.h>
ff195cb6 31#include <linux/rcupdate.h>
d14aada8 32
621934ee
PM
33struct srcu_struct_array {
34 int c[2];
35};
36
37struct srcu_struct {
38 int completed;
43cf38eb 39 struct srcu_struct_array __percpu *per_cpu_ref;
621934ee 40 struct mutex mutex;
632ee200
PM
41#ifdef CONFIG_DEBUG_LOCK_ALLOC
42 struct lockdep_map dep_map;
43#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
621934ee
PM
44};
45
46#ifndef CONFIG_PREEMPT
47#define srcu_barrier() barrier()
48#else /* #ifndef CONFIG_PREEMPT */
49#define srcu_barrier()
50#endif /* #else #ifndef CONFIG_PREEMPT */
51
632ee200
PM
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53
54int __init_srcu_struct(struct srcu_struct *sp, const char *name,
55 struct lock_class_key *key);
56
57#define init_srcu_struct(sp) \
58({ \
59 static struct lock_class_key __srcu_key; \
60 \
61 __init_srcu_struct((sp), #sp, &__srcu_key); \
62})
63
632ee200
PM
64#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
65
e6a92013 66int init_srcu_struct(struct srcu_struct *sp);
632ee200 67
632ee200
PM
68#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
69
621934ee 70void cleanup_srcu_struct(struct srcu_struct *sp);
632ee200
PM
71int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
72void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
621934ee 73void synchronize_srcu(struct srcu_struct *sp);
0cd397d3 74void synchronize_srcu_expedited(struct srcu_struct *sp);
621934ee 75long srcu_batches_completed(struct srcu_struct *sp);
eabc0694 76
632ee200
PM
77#ifdef CONFIG_DEBUG_LOCK_ALLOC
78
79/**
80 * srcu_read_lock_held - might we be in SRCU read-side critical section?
81 *
d20200b5
PM
82 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
83 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
632ee200
PM
84 * this assumes we are in an SRCU read-side critical section unless it can
85 * prove otherwise.
ff195cb6
PM
86 *
87 * Note that if the CPU is in the idle loop from an RCU point of view
88 * (ie: that we are in the section between rcu_idle_enter() and
89 * rcu_idle_exit()) then srcu_read_lock_held() returns false even if
90 * the CPU did an srcu_read_lock(). The reason for this is that RCU
91 * ignores CPUs that are in such a section, considering these as in
92 * extended quiescent state, so such a CPU is effectively never in an
93 * RCU read-side critical section regardless of what RCU primitives it
94 * invokes. This state of affairs is required --- we need to keep an
95 * RCU-free window in idle where the CPU may possibly enter into low
96 * power mode. This way we can notice an extended quiescent state to
97 * other CPUs that started a grace period. Otherwise we would delay any
98 * grace period as long as we run in the idle task.
632ee200
PM
99 */
100static inline int srcu_read_lock_held(struct srcu_struct *sp)
101{
ff195cb6
PM
102 if (rcu_is_cpu_idle())
103 return 0;
104
105 if (!debug_locks)
106 return 1;
107
108 return lock_is_held(&sp->dep_map);
632ee200
PM
109}
110
111#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
112
113static inline int srcu_read_lock_held(struct srcu_struct *sp)
114{
115 return 1;
116}
117
118#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
119
c26d34a5 120/**
ca5ecddf
PM
121 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
122 * @p: the pointer to fetch and protect for later dereferencing
123 * @sp: pointer to the srcu_struct, which is used to check that we
124 * really are in an SRCU read-side critical section.
125 * @c: condition to check for update-side use
c26d34a5 126 *
ca5ecddf
PM
127 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
128 * critical section will result in an RCU-lockdep splat, unless @c evaluates
129 * to 1. The @c argument will normally be a logical expression containing
130 * lockdep_is_held() calls.
c26d34a5 131 */
ca5ecddf
PM
132#define srcu_dereference_check(p, sp, c) \
133 __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
134
135/**
136 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
137 * @p: the pointer to fetch and protect for later dereferencing
138 * @sp: pointer to the srcu_struct, which is used to check that we
139 * really are in an SRCU read-side critical section.
140 *
141 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
142 * is enabled, invoking this outside of an RCU read-side critical
143 * section will result in an RCU-lockdep splat.
144 */
145#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
c26d34a5 146
632ee200
PM
147/**
148 * srcu_read_lock - register a new reader for an SRCU-protected structure.
149 * @sp: srcu_struct in which to register the new reader.
150 *
151 * Enter an SRCU read-side critical section. Note that SRCU read-side
73d4da4d
PM
152 * critical sections may be nested. However, it is illegal to
153 * call anything that waits on an SRCU grace period for the same
154 * srcu_struct, whether directly or indirectly. Please note that
155 * one way to indirectly wait on an SRCU grace period is to acquire
156 * a mutex that is held elsewhere while calling synchronize_srcu() or
157 * synchronize_srcu_expedited().
632ee200
PM
158 */
159static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
160{
161 int retval = __srcu_read_lock(sp);
162
ff195cb6 163 rcu_lock_acquire(&(sp)->dep_map);
632ee200
PM
164 return retval;
165}
166
167/**
168 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
169 * @sp: srcu_struct in which to unregister the old reader.
170 * @idx: return value from corresponding srcu_read_lock().
171 *
172 * Exit an SRCU read-side critical section.
173 */
174static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
175 __releases(sp)
176{
ff195cb6 177 rcu_lock_release(&(sp)->dep_map);
632ee200
PM
178 __srcu_read_unlock(sp, idx);
179}
180
eabc0694 181#endif