]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/percpu-refcount.h
Merge tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee139...
[mirror_ubuntu-bionic-kernel.git] / include / linux / percpu-refcount.h
1 /*
2 * Percpu refcounts:
3 * (C) 2012 Google, Inc.
4 * Author: Kent Overstreet <koverstreet@google.com>
5 *
6 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7 * atomic_dec_and_test() - but percpu.
8 *
9 * There's one important difference between percpu refs and normal atomic_t
10 * refcounts; you have to keep track of your initial refcount, and then when you
11 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12 * refcount.
13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21 * issuing the appropriate barriers, and then marks the ref as shutting down so
22 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
23 * it's safe to drop the initial ref.
24 *
25 * USAGE:
26 *
27 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28 * is created when userspaces calls io_setup(), and destroyed when userspace
29 * calls io_destroy() or the process exits.
30 *
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put().
36 *
37 * Code that does a two stage shutdown like this often needs some kind of
38 * explicit synchronization to ensure the initial refcount can only be dropped
39 * once - percpu_ref_kill() does this for you, it returns true once and false if
40 * someone else already called it. The aio code uses it this way, but it's not
41 * necessary if the code has some other mechanism to synchronize teardown.
42 * around.
43 */
44
45 #ifndef _LINUX_PERCPU_REFCOUNT_H
46 #define _LINUX_PERCPU_REFCOUNT_H
47
48 #include <linux/atomic.h>
49 #include <linux/kernel.h>
50 #include <linux/percpu.h>
51 #include <linux/rcupdate.h>
52 #include <linux/gfp.h>
53
54 struct percpu_ref;
55 typedef void (percpu_ref_func_t)(struct percpu_ref *);
56
57 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58 enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
61 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
62
63 __PERCPU_REF_FLAG_BITS = 2,
64 };
65
66 /* @flags for percpu_ref_init() */
67 enum {
68 /*
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu(). If initialized
71 * with this flag, the ref will stay in atomic mode until
72 * percpu_ref_switch_to_percpu() is invoked on it.
73 */
74 PERCPU_REF_INIT_ATOMIC = 1 << 0,
75
76 /*
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
78 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
79 */
80 PERCPU_REF_INIT_DEAD = 1 << 1,
81 };
82
83 struct percpu_ref {
84 atomic_long_t count;
85 /*
86 * The low bit of the pointer indicates whether the ref is in percpu
87 * mode; if set, then get/put will manipulate the atomic_t.
88 */
89 unsigned long percpu_count_ptr;
90 percpu_ref_func_t *release;
91 percpu_ref_func_t *confirm_switch;
92 bool force_atomic:1;
93 struct rcu_head rcu;
94 };
95
96 int __must_check percpu_ref_init(struct percpu_ref *ref,
97 percpu_ref_func_t *release, unsigned int flags,
98 gfp_t gfp);
99 void percpu_ref_exit(struct percpu_ref *ref);
100 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 percpu_ref_func_t *confirm_switch);
102 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
103 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
104 percpu_ref_func_t *confirm_kill);
105 void percpu_ref_reinit(struct percpu_ref *ref);
106
107 /**
108 * percpu_ref_kill - drop the initial ref
109 * @ref: percpu_ref to kill
110 *
111 * Must be used to drop the initial ref on a percpu refcount; must be called
112 * precisely once before shutdown.
113 *
114 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
115 * percpu counters and dropping the initial ref.
116 */
117 static inline void percpu_ref_kill(struct percpu_ref *ref)
118 {
119 return percpu_ref_kill_and_confirm(ref, NULL);
120 }
121
122 /*
123 * Internal helper. Don't use outside percpu-refcount proper. The
124 * function doesn't return the pointer and let the caller test it for NULL
125 * because doing so forces the compiler to generate two conditional
126 * branches as it can't assume that @ref->percpu_count is not NULL.
127 */
128 static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp)
130 {
131 /* paired with smp_store_release() in percpu_ref_reinit() */
132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
133
134 /*
135 * Theoretically, the following could test just ATOMIC; however,
136 * then we'd have to mask off DEAD separately as DEAD may be
137 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
138 * implies ATOMIC anyway. Test them together.
139 */
140 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
141 return false;
142
143 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
144 return true;
145 }
146
147 /**
148 * percpu_ref_get_many - increment a percpu refcount
149 * @ref: percpu_ref to get
150 * @nr: number of references to get
151 *
152 * Analogous to atomic_long_add().
153 *
154 * This function is safe to call as long as @ref is between init and exit.
155 */
156 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
157 {
158 unsigned long __percpu *percpu_count;
159
160 rcu_read_lock_sched();
161
162 if (__ref_is_percpu(ref, &percpu_count))
163 this_cpu_add(*percpu_count, nr);
164 else
165 atomic_long_add(nr, &ref->count);
166
167 rcu_read_unlock_sched();
168 }
169
170 /**
171 * percpu_ref_get - increment a percpu refcount
172 * @ref: percpu_ref to get
173 *
174 * Analagous to atomic_long_inc().
175 *
176 * This function is safe to call as long as @ref is between init and exit.
177 */
178 static inline void percpu_ref_get(struct percpu_ref *ref)
179 {
180 percpu_ref_get_many(ref, 1);
181 }
182
183 /**
184 * percpu_ref_tryget - try to increment a percpu refcount
185 * @ref: percpu_ref to try-get
186 *
187 * Increment a percpu refcount unless its count already reached zero.
188 * Returns %true on success; %false on failure.
189 *
190 * This function is safe to call as long as @ref is between init and exit.
191 */
192 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
193 {
194 unsigned long __percpu *percpu_count;
195 int ret;
196
197 rcu_read_lock_sched();
198
199 if (__ref_is_percpu(ref, &percpu_count)) {
200 this_cpu_inc(*percpu_count);
201 ret = true;
202 } else {
203 ret = atomic_long_inc_not_zero(&ref->count);
204 }
205
206 rcu_read_unlock_sched();
207
208 return ret;
209 }
210
211 /**
212 * percpu_ref_tryget_live - try to increment a live percpu refcount
213 * @ref: percpu_ref to try-get
214 *
215 * Increment a percpu refcount unless it has already been killed. Returns
216 * %true on success; %false on failure.
217 *
218 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
219 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
220 * should be used. After the confirm_kill callback is invoked, it's
221 * guaranteed that no new reference will be given out by
222 * percpu_ref_tryget_live().
223 *
224 * This function is safe to call as long as @ref is between init and exit.
225 */
226 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
227 {
228 unsigned long __percpu *percpu_count;
229 int ret = false;
230
231 rcu_read_lock_sched();
232
233 if (__ref_is_percpu(ref, &percpu_count)) {
234 this_cpu_inc(*percpu_count);
235 ret = true;
236 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
237 ret = atomic_long_inc_not_zero(&ref->count);
238 }
239
240 rcu_read_unlock_sched();
241
242 return ret;
243 }
244
245 /**
246 * percpu_ref_put_many - decrement a percpu refcount
247 * @ref: percpu_ref to put
248 * @nr: number of references to put
249 *
250 * Decrement the refcount, and if 0, call the release function (which was passed
251 * to percpu_ref_init())
252 *
253 * This function is safe to call as long as @ref is between init and exit.
254 */
255 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
256 {
257 unsigned long __percpu *percpu_count;
258
259 rcu_read_lock_sched();
260
261 if (__ref_is_percpu(ref, &percpu_count))
262 this_cpu_sub(*percpu_count, nr);
263 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
264 ref->release(ref);
265
266 rcu_read_unlock_sched();
267 }
268
269 /**
270 * percpu_ref_put - decrement a percpu refcount
271 * @ref: percpu_ref to put
272 *
273 * Decrement the refcount, and if 0, call the release function (which was passed
274 * to percpu_ref_init())
275 *
276 * This function is safe to call as long as @ref is between init and exit.
277 */
278 static inline void percpu_ref_put(struct percpu_ref *ref)
279 {
280 percpu_ref_put_many(ref, 1);
281 }
282
283 /**
284 * percpu_ref_is_zero - test whether a percpu refcount reached zero
285 * @ref: percpu_ref to test
286 *
287 * Returns %true if @ref reached zero.
288 *
289 * This function is safe to call as long as @ref is between init and exit.
290 */
291 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
292 {
293 unsigned long __percpu *percpu_count;
294
295 if (__ref_is_percpu(ref, &percpu_count))
296 return false;
297 return !atomic_long_read(&ref->count);
298 }
299
300 #endif