]>
Commit | Line | Data |
---|---|---|
215e262f KO |
1 | /* |
2 | * Percpu refcounts: | |
3 | * (C) 2012 Google, Inc. | |
4 | * Author: Kent Overstreet <koverstreet@google.com> | |
5 | * | |
6 | * This implements a refcount with similar semantics to atomic_t - atomic_inc(), | |
7 | * atomic_dec_and_test() - but percpu. | |
8 | * | |
9 | * There's one important difference between percpu refs and normal atomic_t | |
10 | * refcounts; you have to keep track of your initial refcount, and then when you | |
11 | * start shutting down you call percpu_ref_kill() _before_ dropping the initial | |
12 | * refcount. | |
13 | * | |
14 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less | |
15 | * than an atomic_t - this is because of the way shutdown works, see | |
eecc16ba | 16 | * percpu_ref_kill()/PERCPU_COUNT_BIAS. |
215e262f KO |
17 | * |
18 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the | |
19 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() | |
20 | * puts the ref back in single atomic_t mode, collecting the per cpu refs and | |
21 | * issuing the appropriate barriers, and then marks the ref as shutting down so | |
22 | * that percpu_ref_put() will check for the ref hitting 0. After it returns, | |
23 | * it's safe to drop the initial ref. | |
24 | * | |
25 | * USAGE: | |
26 | * | |
27 | * See fs/aio.c for some example usage; it's used there for struct kioctx, which | |
28 | * is created when userspaces calls io_setup(), and destroyed when userspace | |
29 | * calls io_destroy() or the process exits. | |
30 | * | |
31 | * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it | |
32 | * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove | |
33 | * the kioctx from the proccess's list of kioctxs - after that, there can't be | |
34 | * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop | |
35 | * the initial ref with percpu_ref_put(). | |
36 | * | |
37 | * Code that does a two stage shutdown like this often needs some kind of | |
38 | * explicit synchronization to ensure the initial refcount can only be dropped | |
39 | * once - percpu_ref_kill() does this for you, it returns true once and false if | |
40 | * someone else already called it. The aio code uses it this way, but it's not | |
41 | * necessary if the code has some other mechanism to synchronize teardown. | |
42 | * around. | |
43 | */ | |
44 | ||
45 | #ifndef _LINUX_PERCPU_REFCOUNT_H | |
46 | #define _LINUX_PERCPU_REFCOUNT_H | |
47 | ||
48 | #include <linux/atomic.h> | |
49 | #include <linux/kernel.h> | |
50 | #include <linux/percpu.h> | |
51 | #include <linux/rcupdate.h> | |
a34375ef | 52 | #include <linux/gfp.h> |
215e262f KO |
53 | |
54 | struct percpu_ref; | |
ac899061 | 55 | typedef void (percpu_ref_func_t)(struct percpu_ref *); |
215e262f KO |
56 | |
57 | struct percpu_ref { | |
e625305b | 58 | atomic_long_t count; |
215e262f KO |
59 | /* |
60 | * The low bit of the pointer indicates whether the ref is in percpu | |
9a1049da | 61 | * mode; if set, then get/put will manipulate the atomic_t. |
215e262f | 62 | */ |
eecc16ba | 63 | unsigned long percpu_count_ptr; |
ac899061 | 64 | percpu_ref_func_t *release; |
dbece3a0 | 65 | percpu_ref_func_t *confirm_kill; |
215e262f KO |
66 | struct rcu_head rcu; |
67 | }; | |
68 | ||
acac7883 | 69 | int __must_check percpu_ref_init(struct percpu_ref *ref, |
a34375ef | 70 | percpu_ref_func_t *release, gfp_t gfp); |
9a1049da | 71 | void percpu_ref_exit(struct percpu_ref *ref); |
dbece3a0 TH |
72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
73 | percpu_ref_func_t *confirm_kill); | |
a2237370 | 74 | void percpu_ref_reinit(struct percpu_ref *ref); |
dbece3a0 TH |
75 | |
76 | /** | |
77 | * percpu_ref_kill - drop the initial ref | |
78 | * @ref: percpu_ref to kill | |
79 | * | |
80 | * Must be used to drop the initial ref on a percpu refcount; must be called | |
81 | * precisely once before shutdown. | |
82 | * | |
83 | * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the | |
84 | * percpu counters and dropping the initial ref. | |
85 | */ | |
86 | static inline void percpu_ref_kill(struct percpu_ref *ref) | |
87 | { | |
88 | return percpu_ref_kill_and_confirm(ref, NULL); | |
89 | } | |
215e262f | 90 | |
eecc16ba | 91 | #define __PERCPU_REF_DEAD 1 |
215e262f | 92 | |
eae7975d TH |
93 | /* |
94 | * Internal helper. Don't use outside percpu-refcount proper. The | |
95 | * function doesn't return the pointer and let the caller test it for NULL | |
96 | * because doing so forces the compiler to generate two conditional | |
eecc16ba | 97 | * branches as it can't assume that @ref->percpu_count is not NULL. |
eae7975d | 98 | */ |
eecc16ba TH |
99 | static inline bool __percpu_ref_alive(struct percpu_ref *ref, |
100 | unsigned long __percpu **percpu_countp) | |
eae7975d | 101 | { |
eecc16ba | 102 | unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); |
eae7975d | 103 | |
2d722782 TH |
104 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
105 | smp_read_barrier_depends(); | |
106 | ||
eecc16ba | 107 | if (unlikely(percpu_ptr & __PERCPU_REF_DEAD)) |
eae7975d TH |
108 | return false; |
109 | ||
eecc16ba | 110 | *percpu_countp = (unsigned long __percpu *)percpu_ptr; |
eae7975d TH |
111 | return true; |
112 | } | |
215e262f KO |
113 | |
114 | /** | |
115 | * percpu_ref_get - increment a percpu refcount | |
ac899061 | 116 | * @ref: percpu_ref to get |
215e262f | 117 | * |
6251f997 TH |
118 | * Analagous to atomic_long_inc(). |
119 | * | |
120 | * This function is safe to call as long as @ref is between init and exit. | |
121 | */ | |
215e262f KO |
122 | static inline void percpu_ref_get(struct percpu_ref *ref) |
123 | { | |
eecc16ba | 124 | unsigned long __percpu *percpu_count; |
215e262f | 125 | |
a4244454 | 126 | rcu_read_lock_sched(); |
215e262f | 127 | |
eecc16ba TH |
128 | if (__percpu_ref_alive(ref, &percpu_count)) |
129 | this_cpu_inc(*percpu_count); | |
215e262f | 130 | else |
e625305b | 131 | atomic_long_inc(&ref->count); |
215e262f | 132 | |
a4244454 | 133 | rcu_read_unlock_sched(); |
215e262f KO |
134 | } |
135 | ||
4fb6e250 TH |
136 | /** |
137 | * percpu_ref_tryget - try to increment a percpu refcount | |
138 | * @ref: percpu_ref to try-get | |
139 | * | |
140 | * Increment a percpu refcount unless its count already reached zero. | |
141 | * Returns %true on success; %false on failure. | |
142 | * | |
6251f997 | 143 | * This function is safe to call as long as @ref is between init and exit. |
4fb6e250 TH |
144 | */ |
145 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |
146 | { | |
eecc16ba | 147 | unsigned long __percpu *percpu_count; |
6251f997 | 148 | int ret; |
4fb6e250 TH |
149 | |
150 | rcu_read_lock_sched(); | |
151 | ||
eecc16ba TH |
152 | if (__percpu_ref_alive(ref, &percpu_count)) { |
153 | this_cpu_inc(*percpu_count); | |
4fb6e250 TH |
154 | ret = true; |
155 | } else { | |
e625305b | 156 | ret = atomic_long_inc_not_zero(&ref->count); |
4fb6e250 TH |
157 | } |
158 | ||
159 | rcu_read_unlock_sched(); | |
160 | ||
161 | return ret; | |
162 | } | |
163 | ||
dbece3a0 | 164 | /** |
2070d50e | 165 | * percpu_ref_tryget_live - try to increment a live percpu refcount |
dbece3a0 TH |
166 | * @ref: percpu_ref to try-get |
167 | * | |
168 | * Increment a percpu refcount unless it has already been killed. Returns | |
169 | * %true on success; %false on failure. | |
170 | * | |
6251f997 TH |
171 | * Completion of percpu_ref_kill() in itself doesn't guarantee that this |
172 | * function will fail. For such guarantee, percpu_ref_kill_and_confirm() | |
173 | * should be used. After the confirm_kill callback is invoked, it's | |
174 | * guaranteed that no new reference will be given out by | |
175 | * percpu_ref_tryget_live(). | |
4fb6e250 | 176 | * |
6251f997 | 177 | * This function is safe to call as long as @ref is between init and exit. |
dbece3a0 | 178 | */ |
2070d50e | 179 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
dbece3a0 | 180 | { |
eecc16ba | 181 | unsigned long __percpu *percpu_count; |
dbece3a0 TH |
182 | int ret = false; |
183 | ||
a4244454 | 184 | rcu_read_lock_sched(); |
dbece3a0 | 185 | |
eecc16ba TH |
186 | if (__percpu_ref_alive(ref, &percpu_count)) { |
187 | this_cpu_inc(*percpu_count); | |
dbece3a0 TH |
188 | ret = true; |
189 | } | |
190 | ||
a4244454 | 191 | rcu_read_unlock_sched(); |
dbece3a0 TH |
192 | |
193 | return ret; | |
194 | } | |
195 | ||
215e262f KO |
196 | /** |
197 | * percpu_ref_put - decrement a percpu refcount | |
ac899061 | 198 | * @ref: percpu_ref to put |
215e262f KO |
199 | * |
200 | * Decrement the refcount, and if 0, call the release function (which was passed | |
201 | * to percpu_ref_init()) | |
6251f997 TH |
202 | * |
203 | * This function is safe to call as long as @ref is between init and exit. | |
215e262f KO |
204 | */ |
205 | static inline void percpu_ref_put(struct percpu_ref *ref) | |
206 | { | |
eecc16ba | 207 | unsigned long __percpu *percpu_count; |
215e262f | 208 | |
a4244454 | 209 | rcu_read_lock_sched(); |
215e262f | 210 | |
eecc16ba TH |
211 | if (__percpu_ref_alive(ref, &percpu_count)) |
212 | this_cpu_dec(*percpu_count); | |
e625305b | 213 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) |
215e262f KO |
214 | ref->release(ref); |
215 | ||
a4244454 | 216 | rcu_read_unlock_sched(); |
215e262f KO |
217 | } |
218 | ||
2d722782 TH |
219 | /** |
220 | * percpu_ref_is_zero - test whether a percpu refcount reached zero | |
221 | * @ref: percpu_ref to test | |
222 | * | |
223 | * Returns %true if @ref reached zero. | |
6251f997 TH |
224 | * |
225 | * This function is safe to call as long as @ref is between init and exit. | |
2d722782 TH |
226 | */ |
227 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) | |
228 | { | |
eecc16ba | 229 | unsigned long __percpu *percpu_count; |
2d722782 | 230 | |
eecc16ba | 231 | if (__percpu_ref_alive(ref, &percpu_count)) |
2d722782 | 232 | return false; |
e625305b | 233 | return !atomic_long_read(&ref->count); |
2d722782 TH |
234 | } |
235 | ||
215e262f | 236 | #endif |