1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "%s: " fmt, __func__
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/wait.h>
7 #include <linux/slab.h>
9 #include <linux/percpu-refcount.h>
12 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
13 * don't try to detect the ref hitting 0 - which means that get/put can just
14 * increment or decrement the local counter. Note that the counter on a
15 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
16 * percpu counters will all sum to the correct value
18 * (More precisely: because modular arithmetic is commutative the sum of all the
19 * percpu_count vars will be equal to what it would have been if all the gets
20 * and puts were done to a single integer, even if some of the percpu integers
21 * overflow or underflow).
23 * The real trick to implementing percpu refcounts is shutdown. We can't detect
24 * the ref hitting 0 on every put - this would require global synchronization
25 * and defeat the whole purpose of using percpu refs.
27 * What we do is require the user to keep track of the initial refcount; we know
28 * the ref can't hit 0 before the user drops the initial ref, so as long as we
29 * convert to non percpu mode before the initial ref is dropped everything
32 * Converting to non percpu mode is done with some RCUish stuff in
33 * percpu_ref_kill. Additionally, we need a bias value so that the
34 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
37 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
39 static DEFINE_SPINLOCK(percpu_ref_switch_lock
);
40 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq
);
42 static unsigned long __percpu
*percpu_count_ptr(struct percpu_ref
*ref
)
44 return (unsigned long __percpu
*)
45 (ref
->percpu_count_ptr
& ~__PERCPU_REF_ATOMIC_DEAD
);
49 * percpu_ref_init - initialize a percpu refcount
50 * @ref: percpu_ref to initialize
51 * @release: function which will be called when refcount hits 0
52 * @flags: PERCPU_REF_INIT_* flags
53 * @gfp: allocation mask to use
55 * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
56 * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
57 * change the start state to atomic with the latter setting the initial refcount
58 * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
60 * Note that @release must not sleep - it may potentially be called from RCU
61 * callback context by percpu_ref_kill().
63 int percpu_ref_init(struct percpu_ref
*ref
, percpu_ref_func_t
*release
,
64 unsigned int flags
, gfp_t gfp
)
66 size_t align
= max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS
,
67 __alignof__(unsigned long));
68 unsigned long start_count
= 0;
69 struct percpu_ref_data
*data
;
71 ref
->percpu_count_ptr
= (unsigned long)
72 __alloc_percpu_gfp(sizeof(unsigned long), align
, gfp
);
73 if (!ref
->percpu_count_ptr
)
76 data
= kzalloc(sizeof(*ref
->data
), gfp
);
78 free_percpu((void __percpu
*)ref
->percpu_count_ptr
);
79 ref
->percpu_count_ptr
= 0;
83 data
->force_atomic
= flags
& PERCPU_REF_INIT_ATOMIC
;
84 data
->allow_reinit
= flags
& PERCPU_REF_ALLOW_REINIT
;
86 if (flags
& (PERCPU_REF_INIT_ATOMIC
| PERCPU_REF_INIT_DEAD
)) {
87 ref
->percpu_count_ptr
|= __PERCPU_REF_ATOMIC
;
88 data
->allow_reinit
= true;
90 start_count
+= PERCPU_COUNT_BIAS
;
93 if (flags
& PERCPU_REF_INIT_DEAD
)
94 ref
->percpu_count_ptr
|= __PERCPU_REF_DEAD
;
98 atomic_long_set(&data
->count
, start_count
);
100 data
->release
= release
;
101 data
->confirm_switch
= NULL
;
106 EXPORT_SYMBOL_GPL(percpu_ref_init
);
108 static void __percpu_ref_exit(struct percpu_ref
*ref
)
110 unsigned long __percpu
*percpu_count
= percpu_count_ptr(ref
);
113 /* non-NULL confirm_switch indicates switching in progress */
114 WARN_ON_ONCE(ref
->data
&& ref
->data
->confirm_switch
);
115 free_percpu(percpu_count
);
116 ref
->percpu_count_ptr
= __PERCPU_REF_ATOMIC_DEAD
;
121 * percpu_ref_exit - undo percpu_ref_init()
122 * @ref: percpu_ref to exit
124 * This function exits @ref. The caller is responsible for ensuring that
125 * @ref is no longer in active use. The usual places to invoke this
126 * function from are the @ref->release() callback or in init failure path
127 * where percpu_ref_init() succeeded but other parts of the initialization
128 * of the embedding object failed.
130 void percpu_ref_exit(struct percpu_ref
*ref
)
132 struct percpu_ref_data
*data
= ref
->data
;
135 __percpu_ref_exit(ref
);
140 spin_lock_irqsave(&percpu_ref_switch_lock
, flags
);
141 ref
->percpu_count_ptr
|= atomic_long_read(&ref
->data
->count
) <<
142 __PERCPU_REF_FLAG_BITS
;
144 spin_unlock_irqrestore(&percpu_ref_switch_lock
, flags
);
148 EXPORT_SYMBOL_GPL(percpu_ref_exit
);
150 static void percpu_ref_call_confirm_rcu(struct rcu_head
*rcu
)
152 struct percpu_ref_data
*data
= container_of(rcu
,
153 struct percpu_ref_data
, rcu
);
154 struct percpu_ref
*ref
= data
->ref
;
156 data
->confirm_switch(ref
);
157 data
->confirm_switch
= NULL
;
158 wake_up_all(&percpu_ref_switch_waitq
);
160 if (!data
->allow_reinit
)
161 __percpu_ref_exit(ref
);
163 /* drop ref from percpu_ref_switch_to_atomic() */
167 static void percpu_ref_switch_to_atomic_rcu(struct rcu_head
*rcu
)
169 struct percpu_ref_data
*data
= container_of(rcu
,
170 struct percpu_ref_data
, rcu
);
171 struct percpu_ref
*ref
= data
->ref
;
172 unsigned long __percpu
*percpu_count
= percpu_count_ptr(ref
);
173 static atomic_t underflows
;
174 unsigned long count
= 0;
177 for_each_possible_cpu(cpu
)
178 count
+= *per_cpu_ptr(percpu_count
, cpu
);
180 pr_debug("global %lu percpu %lu\n",
181 atomic_long_read(&data
->count
), count
);
184 * It's crucial that we sum the percpu counters _before_ adding the sum
185 * to &ref->count; since gets could be happening on one cpu while puts
186 * happen on another, adding a single cpu's count could cause
187 * @ref->count to hit 0 before we've got a consistent value - but the
188 * sum of all the counts will be consistent and correct.
190 * Subtracting the bias value then has to happen _after_ adding count to
191 * &ref->count; we need the bias value to prevent &ref->count from
192 * reaching 0 before we add the percpu counts. But doing it at the same
193 * time is equivalent and saves us atomic operations:
195 atomic_long_add((long)count
- PERCPU_COUNT_BIAS
, &data
->count
);
197 if (WARN_ONCE(atomic_long_read(&data
->count
) <= 0,
198 "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
199 data
->release
, atomic_long_read(&data
->count
)) &&
200 atomic_inc_return(&underflows
) < 4) {
201 pr_err("%s(): percpu_ref underflow", __func__
);
205 /* @ref is viewed as dead on all CPUs, send out switch confirmation */
206 percpu_ref_call_confirm_rcu(rcu
);
209 static void percpu_ref_noop_confirm_switch(struct percpu_ref
*ref
)
213 static void __percpu_ref_switch_to_atomic(struct percpu_ref
*ref
,
214 percpu_ref_func_t
*confirm_switch
)
216 if (ref
->percpu_count_ptr
& __PERCPU_REF_ATOMIC
) {
222 /* switching from percpu to atomic */
223 ref
->percpu_count_ptr
|= __PERCPU_REF_ATOMIC
;
226 * Non-NULL ->confirm_switch is used to indicate that switching is
227 * in progress. Use noop one if unspecified.
229 ref
->data
->confirm_switch
= confirm_switch
?:
230 percpu_ref_noop_confirm_switch
;
232 percpu_ref_get(ref
); /* put after confirmation */
233 call_rcu(&ref
->data
->rcu
, percpu_ref_switch_to_atomic_rcu
);
236 static void __percpu_ref_switch_to_percpu(struct percpu_ref
*ref
)
238 unsigned long __percpu
*percpu_count
= percpu_count_ptr(ref
);
241 BUG_ON(!percpu_count
);
243 if (!(ref
->percpu_count_ptr
& __PERCPU_REF_ATOMIC
))
246 if (WARN_ON_ONCE(!ref
->data
->allow_reinit
))
249 atomic_long_add(PERCPU_COUNT_BIAS
, &ref
->data
->count
);
252 * Restore per-cpu operation. smp_store_release() is paired
253 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
254 * zeroing is visible to all percpu accesses which can see the
255 * following __PERCPU_REF_ATOMIC clearing.
257 for_each_possible_cpu(cpu
)
258 *per_cpu_ptr(percpu_count
, cpu
) = 0;
260 smp_store_release(&ref
->percpu_count_ptr
,
261 ref
->percpu_count_ptr
& ~__PERCPU_REF_ATOMIC
);
264 static void __percpu_ref_switch_mode(struct percpu_ref
*ref
,
265 percpu_ref_func_t
*confirm_switch
)
267 struct percpu_ref_data
*data
= ref
->data
;
269 lockdep_assert_held(&percpu_ref_switch_lock
);
272 * If the previous ATOMIC switching hasn't finished yet, wait for
273 * its completion. If the caller ensures that ATOMIC switching
274 * isn't in progress, this function can be called from any context.
276 wait_event_lock_irq(percpu_ref_switch_waitq
, !data
->confirm_switch
,
277 percpu_ref_switch_lock
);
279 if (data
->force_atomic
|| percpu_ref_is_dying(ref
))
280 __percpu_ref_switch_to_atomic(ref
, confirm_switch
);
282 __percpu_ref_switch_to_percpu(ref
);
286 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
287 * @ref: percpu_ref to switch to atomic mode
288 * @confirm_switch: optional confirmation callback
290 * There's no reason to use this function for the usual reference counting.
291 * Use percpu_ref_kill[_and_confirm]().
293 * Schedule switching of @ref to atomic mode. All its percpu counts will
294 * be collected to the main atomic counter. On completion, when all CPUs
295 * are guaraneed to be in atomic mode, @confirm_switch, which may not
296 * block, is invoked. This function may be invoked concurrently with all
297 * the get/put operations and can safely be mixed with kill and reinit
298 * operations. Note that @ref will stay in atomic mode across kill/reinit
299 * cycles until percpu_ref_switch_to_percpu() is called.
301 * This function may block if @ref is in the process of switching to atomic
302 * mode. If the caller ensures that @ref is not in the process of
303 * switching to atomic mode, this function can be called from any context.
305 void percpu_ref_switch_to_atomic(struct percpu_ref
*ref
,
306 percpu_ref_func_t
*confirm_switch
)
310 spin_lock_irqsave(&percpu_ref_switch_lock
, flags
);
312 ref
->data
->force_atomic
= true;
313 __percpu_ref_switch_mode(ref
, confirm_switch
);
315 spin_unlock_irqrestore(&percpu_ref_switch_lock
, flags
);
317 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic
);
320 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
321 * @ref: percpu_ref to switch to atomic mode
323 * Schedule switching the ref to atomic mode, and wait for the
324 * switch to complete. Caller must ensure that no other thread
325 * will switch back to percpu mode.
327 void percpu_ref_switch_to_atomic_sync(struct percpu_ref
*ref
)
329 percpu_ref_switch_to_atomic(ref
, NULL
);
330 wait_event(percpu_ref_switch_waitq
, !ref
->data
->confirm_switch
);
332 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync
);
335 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
336 * @ref: percpu_ref to switch to percpu mode
338 * There's no reason to use this function for the usual reference counting.
339 * To re-use an expired ref, use percpu_ref_reinit().
341 * Switch @ref to percpu mode. This function may be invoked concurrently
342 * with all the get/put operations and can safely be mixed with kill and
343 * reinit operations. This function reverses the sticky atomic state set
344 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
345 * dying or dead, the actual switching takes place on the following
346 * percpu_ref_reinit().
348 * This function may block if @ref is in the process of switching to atomic
349 * mode. If the caller ensures that @ref is not in the process of
350 * switching to atomic mode, this function can be called from any context.
352 void percpu_ref_switch_to_percpu(struct percpu_ref
*ref
)
356 spin_lock_irqsave(&percpu_ref_switch_lock
, flags
);
358 ref
->data
->force_atomic
= false;
359 __percpu_ref_switch_mode(ref
, NULL
);
361 spin_unlock_irqrestore(&percpu_ref_switch_lock
, flags
);
363 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu
);
366 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
367 * @ref: percpu_ref to kill
368 * @confirm_kill: optional confirmation callback
370 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
371 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
372 * called after @ref is seen as dead from all CPUs at which point all
373 * further invocations of percpu_ref_tryget_live() will fail. See
374 * percpu_ref_tryget_live() for details.
376 * This function normally doesn't block and can be called from any context
377 * but it may block if @confirm_kill is specified and @ref is in the
378 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
380 * There are no implied RCU grace periods between kill and release.
382 void percpu_ref_kill_and_confirm(struct percpu_ref
*ref
,
383 percpu_ref_func_t
*confirm_kill
)
387 spin_lock_irqsave(&percpu_ref_switch_lock
, flags
);
389 WARN_ONCE(percpu_ref_is_dying(ref
),
390 "%s called more than once on %ps!", __func__
,
393 ref
->percpu_count_ptr
|= __PERCPU_REF_DEAD
;
394 __percpu_ref_switch_mode(ref
, confirm_kill
);
397 spin_unlock_irqrestore(&percpu_ref_switch_lock
, flags
);
399 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm
);
402 * percpu_ref_is_zero - test whether a percpu refcount reached zero
403 * @ref: percpu_ref to test
405 * Returns %true if @ref reached zero.
407 * This function is safe to call as long as @ref is between init and exit.
409 bool percpu_ref_is_zero(struct percpu_ref
*ref
)
411 unsigned long __percpu
*percpu_count
;
412 unsigned long count
, flags
;
414 if (__ref_is_percpu(ref
, &percpu_count
))
417 /* protect us from being destroyed */
418 spin_lock_irqsave(&percpu_ref_switch_lock
, flags
);
420 count
= atomic_long_read(&ref
->data
->count
);
422 count
= ref
->percpu_count_ptr
>> __PERCPU_REF_FLAG_BITS
;
423 spin_unlock_irqrestore(&percpu_ref_switch_lock
, flags
);
427 EXPORT_SYMBOL_GPL(percpu_ref_is_zero
);
430 * percpu_ref_reinit - re-initialize a percpu refcount
431 * @ref: perpcu_ref to re-initialize
433 * Re-initialize @ref so that it's in the same state as when it finished
434 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
435 * initialized successfully and reached 0 but not exited.
437 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
438 * this function is in progress.
440 void percpu_ref_reinit(struct percpu_ref
*ref
)
442 WARN_ON_ONCE(!percpu_ref_is_zero(ref
));
444 percpu_ref_resurrect(ref
);
446 EXPORT_SYMBOL_GPL(percpu_ref_reinit
);
449 * percpu_ref_resurrect - modify a percpu refcount from dead to live
450 * @ref: perpcu_ref to resurrect
452 * Modify @ref so that it's in the same state as before percpu_ref_kill() was
453 * called. @ref must be dead but must not yet have exited.
455 * If @ref->release() frees @ref then the caller is responsible for
456 * guaranteeing that @ref->release() does not get called while this
457 * function is in progress.
459 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
460 * this function is in progress.
462 void percpu_ref_resurrect(struct percpu_ref
*ref
)
464 unsigned long __percpu
*percpu_count
;
467 spin_lock_irqsave(&percpu_ref_switch_lock
, flags
);
469 WARN_ON_ONCE(!percpu_ref_is_dying(ref
));
470 WARN_ON_ONCE(__ref_is_percpu(ref
, &percpu_count
));
472 ref
->percpu_count_ptr
&= ~__PERCPU_REF_DEAD
;
474 __percpu_ref_switch_mode(ref
, NULL
);
476 spin_unlock_irqrestore(&percpu_ref_switch_lock
, flags
);
478 EXPORT_SYMBOL_GPL(percpu_ref_resurrect
);