]> git.proxmox.com Git - zfsonlinux.git/blob - zfs-patches/0014-Prefix-all-refcount-functions-with-zfs_.patch
update/rebase to zfs-0.7.12 with patches from ZOL
[zfsonlinux.git] / zfs-patches / 0014-Prefix-all-refcount-functions-with-zfs_.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Tim Schumacher <timschumi@gmx.de>
3 Date: Mon, 1 Oct 2018 19:42:05 +0200
4 Subject: [PATCH] Prefix all refcount functions with zfs_
5
6 Recent changes in the Linux kernel made it necessary to prefix
7 the refcount_add() function with zfs_ due to a name collision.
8
9 To bring the other functions in line with that and to avoid future
10 collisions, prefix the other refcount functions as well.
11
12 Reviewed by: Matthew Ahrens <mahrens@delphix.com>
13 Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
14 Signed-off-by: Tim Schumacher <timschumi@gmx.de>
15 Closes #7963
16 ---
17 cmd/ztest/ztest.c | 10 +-
18 include/sys/refcount.h | 70 ++++++-----
19 include/sys/trace_dbuf.h | 2 +-
20 module/zfs/abd.c | 22 ++--
21 module/zfs/arc.c | 301 ++++++++++++++++++++++++-----------------------
22 module/zfs/dbuf.c | 66 +++++------
23 module/zfs/dbuf_stats.c | 4 +-
24 module/zfs/dmu_tx.c | 36 +++---
25 module/zfs/dnode.c | 40 +++----
26 module/zfs/dnode_sync.c | 6 +-
27 module/zfs/dsl_dataset.c | 12 +-
28 module/zfs/dsl_destroy.c | 6 +-
29 module/zfs/metaslab.c | 23 ++--
30 module/zfs/refcount.c | 42 +++----
31 module/zfs/rrwlock.c | 35 +++---
32 module/zfs/sa.c | 8 +-
33 module/zfs/spa.c | 8 +-
34 module/zfs/spa_misc.c | 35 +++---
35 module/zfs/zfs_ctldir.c | 6 +-
36 module/zfs/zfs_znode.c | 10 +-
37 module/zfs/zio.c | 4 +-
38 21 files changed, 381 insertions(+), 365 deletions(-)
39
40 diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
41 index 24967a76..5868d60a 100644
42 --- a/cmd/ztest/ztest.c
43 +++ b/cmd/ztest/ztest.c
44 @@ -1205,7 +1205,7 @@ ztest_znode_init(uint64_t object)
45 ztest_znode_t *zp = umem_alloc(sizeof (*zp), UMEM_NOFAIL);
46
47 list_link_init(&zp->z_lnode);
48 - refcount_create(&zp->z_refcnt);
49 + zfs_refcount_create(&zp->z_refcnt);
50 zp->z_object = object;
51 zfs_rlock_init(&zp->z_range_lock);
52
53 @@ -1215,10 +1215,10 @@ ztest_znode_init(uint64_t object)
54 static void
55 ztest_znode_fini(ztest_znode_t *zp)
56 {
57 - ASSERT(refcount_is_zero(&zp->z_refcnt));
58 + ASSERT(zfs_refcount_is_zero(&zp->z_refcnt));
59 zfs_rlock_destroy(&zp->z_range_lock);
60 zp->z_object = 0;
61 - refcount_destroy(&zp->z_refcnt);
62 + zfs_refcount_destroy(&zp->z_refcnt);
63 list_link_init(&zp->z_lnode);
64 umem_free(zp, sizeof (*zp));
65 }
66 @@ -1268,8 +1268,8 @@ ztest_znode_put(ztest_ds_t *zd, ztest_znode_t *zp)
67 ASSERT3U(zp->z_object, !=, 0);
68 zll = &zd->zd_range_lock[zp->z_object & (ZTEST_OBJECT_LOCKS - 1)];
69 mutex_enter(&zll->z_lock);
70 - refcount_remove(&zp->z_refcnt, RL_TAG);
71 - if (refcount_is_zero(&zp->z_refcnt)) {
72 + zfs_refcount_remove(&zp->z_refcnt, RL_TAG);
73 + if (zfs_refcount_is_zero(&zp->z_refcnt)) {
74 list_remove(&zll->z_list, zp);
75 ztest_znode_fini(zp);
76 }
77 diff --git a/include/sys/refcount.h b/include/sys/refcount.h
78 index 5c5198d8..7eeb1366 100644
79 --- a/include/sys/refcount.h
80 +++ b/include/sys/refcount.h
81 @@ -63,26 +63,24 @@ typedef struct refcount {
82 * refcount_create[_untracked]()
83 */
84
85 -void refcount_create(zfs_refcount_t *rc);
86 -void refcount_create_untracked(zfs_refcount_t *rc);
87 -void refcount_create_tracked(zfs_refcount_t *rc);
88 -void refcount_destroy(zfs_refcount_t *rc);
89 -void refcount_destroy_many(zfs_refcount_t *rc, uint64_t number);
90 -int refcount_is_zero(zfs_refcount_t *rc);
91 -int64_t refcount_count(zfs_refcount_t *rc);
92 -int64_t zfs_refcount_add(zfs_refcount_t *rc, void *holder_tag);
93 -int64_t refcount_remove(zfs_refcount_t *rc, void *holder_tag);
94 -int64_t refcount_add_many(zfs_refcount_t *rc, uint64_t number,
95 - void *holder_tag);
96 -int64_t refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
97 - void *holder_tag);
98 -void refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src);
99 -void refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
100 -boolean_t refcount_held(zfs_refcount_t *, void *);
101 -boolean_t refcount_not_held(zfs_refcount_t *, void *);
102 -
103 -void refcount_init(void);
104 -void refcount_fini(void);
105 +void zfs_refcount_create(zfs_refcount_t *);
106 +void zfs_refcount_create_untracked(zfs_refcount_t *);
107 +void zfs_refcount_create_tracked(zfs_refcount_t *);
108 +void zfs_refcount_destroy(zfs_refcount_t *);
109 +void zfs_refcount_destroy_many(zfs_refcount_t *, uint64_t);
110 +int zfs_refcount_is_zero(zfs_refcount_t *);
111 +int64_t zfs_refcount_count(zfs_refcount_t *);
112 +int64_t zfs_refcount_add(zfs_refcount_t *, void *);
113 +int64_t zfs_refcount_remove(zfs_refcount_t *, void *);
114 +int64_t zfs_refcount_add_many(zfs_refcount_t *, uint64_t, void *);
115 +int64_t zfs_refcount_remove_many(zfs_refcount_t *, uint64_t, void *);
116 +void zfs_refcount_transfer(zfs_refcount_t *, zfs_refcount_t *);
117 +void zfs_refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
118 +boolean_t zfs_refcount_held(zfs_refcount_t *, void *);
119 +boolean_t zfs_refcount_not_held(zfs_refcount_t *, void *);
120 +
121 +void zfs_refcount_init(void);
122 +void zfs_refcount_fini(void);
123
124 #else /* ZFS_DEBUG */
125
126 @@ -90,30 +88,30 @@ typedef struct refcount {
127 uint64_t rc_count;
128 } zfs_refcount_t;
129
130 -#define refcount_create(rc) ((rc)->rc_count = 0)
131 -#define refcount_create_untracked(rc) ((rc)->rc_count = 0)
132 -#define refcount_create_tracked(rc) ((rc)->rc_count = 0)
133 -#define refcount_destroy(rc) ((rc)->rc_count = 0)
134 -#define refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
135 -#define refcount_is_zero(rc) ((rc)->rc_count == 0)
136 -#define refcount_count(rc) ((rc)->rc_count)
137 +#define zfs_refcount_create(rc) ((rc)->rc_count = 0)
138 +#define zfs_refcount_create_untracked(rc) ((rc)->rc_count = 0)
139 +#define zfs_refcount_create_tracked(rc) ((rc)->rc_count = 0)
140 +#define zfs_refcount_destroy(rc) ((rc)->rc_count = 0)
141 +#define zfs_refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
142 +#define zfs_refcount_is_zero(rc) ((rc)->rc_count == 0)
143 +#define zfs_refcount_count(rc) ((rc)->rc_count)
144 #define zfs_refcount_add(rc, holder) atomic_inc_64_nv(&(rc)->rc_count)
145 -#define refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
146 -#define refcount_add_many(rc, number, holder) \
147 +#define zfs_refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
148 +#define zfs_refcount_add_many(rc, number, holder) \
149 atomic_add_64_nv(&(rc)->rc_count, number)
150 -#define refcount_remove_many(rc, number, holder) \
151 +#define zfs_refcount_remove_many(rc, number, holder) \
152 atomic_add_64_nv(&(rc)->rc_count, -number)
153 -#define refcount_transfer(dst, src) { \
154 +#define zfs_refcount_transfer(dst, src) { \
155 uint64_t __tmp = (src)->rc_count; \
156 atomic_add_64(&(src)->rc_count, -__tmp); \
157 atomic_add_64(&(dst)->rc_count, __tmp); \
158 }
159 -#define refcount_transfer_ownership(rc, current_holder, new_holder) (void)0
160 -#define refcount_held(rc, holder) ((rc)->rc_count > 0)
161 -#define refcount_not_held(rc, holder) (B_TRUE)
162 +#define zfs_refcount_transfer_ownership(rc, current_holder, new_holder) (void)0
163 +#define zfs_refcount_held(rc, holder) ((rc)->rc_count > 0)
164 +#define zfs_refcount_not_held(rc, holder) (B_TRUE)
165
166 -#define refcount_init()
167 -#define refcount_fini()
168 +#define zfs_refcount_init()
169 +#define zfs_refcount_fini()
170
171 #endif /* ZFS_DEBUG */
172
173 diff --git a/include/sys/trace_dbuf.h b/include/sys/trace_dbuf.h
174 index c3e70c37..e97b6113 100644
175 --- a/include/sys/trace_dbuf.h
176 +++ b/include/sys/trace_dbuf.h
177 @@ -71,7 +71,7 @@
178 __entry->db_offset = db->db.db_offset; \
179 __entry->db_size = db->db.db_size; \
180 __entry->db_state = db->db_state; \
181 - __entry->db_holds = refcount_count(&db->db_holds); \
182 + __entry->db_holds = zfs_refcount_count(&db->db_holds); \
183 snprintf(__get_str(msg), TRACE_DBUF_MSG_MAX, \
184 DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS); \
185 } else { \
186 diff --git a/module/zfs/abd.c b/module/zfs/abd.c
187 index 138b041c..5a6a8158 100644
188 --- a/module/zfs/abd.c
189 +++ b/module/zfs/abd.c
190 @@ -597,7 +597,7 @@ abd_alloc(size_t size, boolean_t is_metadata)
191 }
192 abd->abd_size = size;
193 abd->abd_parent = NULL;
194 - refcount_create(&abd->abd_children);
195 + zfs_refcount_create(&abd->abd_children);
196
197 abd->abd_u.abd_scatter.abd_offset = 0;
198
199 @@ -614,7 +614,7 @@ abd_free_scatter(abd_t *abd)
200 {
201 abd_free_pages(abd);
202
203 - refcount_destroy(&abd->abd_children);
204 + zfs_refcount_destroy(&abd->abd_children);
205 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
206 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
207 ABDSTAT_INCR(abdstat_scatter_chunk_waste,
208 @@ -641,7 +641,7 @@ abd_alloc_linear(size_t size, boolean_t is_metadata)
209 }
210 abd->abd_size = size;
211 abd->abd_parent = NULL;
212 - refcount_create(&abd->abd_children);
213 + zfs_refcount_create(&abd->abd_children);
214
215 if (is_metadata) {
216 abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
217 @@ -664,7 +664,7 @@ abd_free_linear(abd_t *abd)
218 zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
219 }
220
221 - refcount_destroy(&abd->abd_children);
222 + zfs_refcount_destroy(&abd->abd_children);
223 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
224 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
225
226 @@ -775,8 +775,8 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
227
228 abd->abd_size = size;
229 abd->abd_parent = sabd;
230 - refcount_create(&abd->abd_children);
231 - (void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
232 + zfs_refcount_create(&abd->abd_children);
233 + (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
234
235 return (abd);
236 }
237 @@ -818,7 +818,7 @@ abd_get_from_buf(void *buf, size_t size)
238 abd->abd_flags = ABD_FLAG_LINEAR;
239 abd->abd_size = size;
240 abd->abd_parent = NULL;
241 - refcount_create(&abd->abd_children);
242 + zfs_refcount_create(&abd->abd_children);
243
244 abd->abd_u.abd_linear.abd_buf = buf;
245
246 @@ -836,11 +836,11 @@ abd_put(abd_t *abd)
247 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
248
249 if (abd->abd_parent != NULL) {
250 - (void) refcount_remove_many(&abd->abd_parent->abd_children,
251 + (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
252 abd->abd_size, abd);
253 }
254
255 - refcount_destroy(&abd->abd_children);
256 + zfs_refcount_destroy(&abd->abd_children);
257 abd_free_struct(abd);
258 }
259
260 @@ -872,7 +872,7 @@ abd_borrow_buf(abd_t *abd, size_t n)
261 } else {
262 buf = zio_buf_alloc(n);
263 }
264 - (void) refcount_add_many(&abd->abd_children, n, buf);
265 + (void) zfs_refcount_add_many(&abd->abd_children, n, buf);
266
267 return (buf);
268 }
269 @@ -904,7 +904,7 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
270 ASSERT0(abd_cmp_buf(abd, buf, n));
271 zio_buf_free(buf, n);
272 }
273 - (void) refcount_remove_many(&abd->abd_children, n, buf);
274 + (void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
275 }
276
277 void
278 diff --git a/module/zfs/arc.c b/module/zfs/arc.c
279 index 7518d5c8..32ac0837 100644
280 --- a/module/zfs/arc.c
281 +++ b/module/zfs/arc.c
282 @@ -1181,7 +1181,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
283
284 bzero(hdr, HDR_FULL_SIZE);
285 cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
286 - refcount_create(&hdr->b_l1hdr.b_refcnt);
287 + zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
288 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
289 list_link_init(&hdr->b_l1hdr.b_arc_node);
290 list_link_init(&hdr->b_l2hdr.b_l2node);
291 @@ -1228,7 +1228,7 @@ hdr_full_dest(void *vbuf, void *unused)
292
293 ASSERT(HDR_EMPTY(hdr));
294 cv_destroy(&hdr->b_l1hdr.b_cv);
295 - refcount_destroy(&hdr->b_l1hdr.b_refcnt);
296 + zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
297 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
298 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
299 arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
300 @@ -1893,20 +1893,20 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
301 ASSERT0(hdr->b_l1hdr.b_bufcnt);
302 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
303 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
304 - (void) refcount_add_many(&state->arcs_esize[type],
305 + (void) zfs_refcount_add_many(&state->arcs_esize[type],
306 HDR_GET_LSIZE(hdr), hdr);
307 return;
308 }
309
310 ASSERT(!GHOST_STATE(state));
311 if (hdr->b_l1hdr.b_pabd != NULL) {
312 - (void) refcount_add_many(&state->arcs_esize[type],
313 + (void) zfs_refcount_add_many(&state->arcs_esize[type],
314 arc_hdr_size(hdr), hdr);
315 }
316 for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
317 if (arc_buf_is_shared(buf))
318 continue;
319 - (void) refcount_add_many(&state->arcs_esize[type],
320 + (void) zfs_refcount_add_many(&state->arcs_esize[type],
321 arc_buf_size(buf), buf);
322 }
323 }
324 @@ -1928,20 +1928,20 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
325 ASSERT0(hdr->b_l1hdr.b_bufcnt);
326 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
327 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
328 - (void) refcount_remove_many(&state->arcs_esize[type],
329 + (void) zfs_refcount_remove_many(&state->arcs_esize[type],
330 HDR_GET_LSIZE(hdr), hdr);
331 return;
332 }
333
334 ASSERT(!GHOST_STATE(state));
335 if (hdr->b_l1hdr.b_pabd != NULL) {
336 - (void) refcount_remove_many(&state->arcs_esize[type],
337 + (void) zfs_refcount_remove_many(&state->arcs_esize[type],
338 arc_hdr_size(hdr), hdr);
339 }
340 for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
341 if (arc_buf_is_shared(buf))
342 continue;
343 - (void) refcount_remove_many(&state->arcs_esize[type],
344 + (void) zfs_refcount_remove_many(&state->arcs_esize[type],
345 arc_buf_size(buf), buf);
346 }
347 }
348 @@ -1960,7 +1960,7 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
349 ASSERT(HDR_HAS_L1HDR(hdr));
350 if (!MUTEX_HELD(HDR_LOCK(hdr))) {
351 ASSERT(hdr->b_l1hdr.b_state == arc_anon);
352 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
353 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
354 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
355 }
356
357 @@ -1998,7 +1998,7 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
358 * arc_l2c_only counts as a ghost state so we don't need to explicitly
359 * check to prevent usage of the arc_l2c_only list.
360 */
361 - if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
362 + if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
363 (state != arc_anon)) {
364 multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
365 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
366 @@ -2043,7 +2043,7 @@ arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
367 abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
368 abi->abi_mfu_hits = l1hdr->b_mfu_hits;
369 abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
370 - abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
371 + abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
372 }
373
374 if (l2hdr) {
375 @@ -2079,7 +2079,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
376 */
377 if (HDR_HAS_L1HDR(hdr)) {
378 old_state = hdr->b_l1hdr.b_state;
379 - refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
380 + refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
381 bufcnt = hdr->b_l1hdr.b_bufcnt;
382 update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL);
383 } else {
384 @@ -2148,7 +2148,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
385 * the reference. As a result, we use the arc
386 * header pointer for the reference.
387 */
388 - (void) refcount_add_many(&new_state->arcs_size,
389 + (void) zfs_refcount_add_many(&new_state->arcs_size,
390 HDR_GET_LSIZE(hdr), hdr);
391 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
392 } else {
393 @@ -2175,13 +2175,15 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
394 if (arc_buf_is_shared(buf))
395 continue;
396
397 - (void) refcount_add_many(&new_state->arcs_size,
398 + (void) zfs_refcount_add_many(
399 + &new_state->arcs_size,
400 arc_buf_size(buf), buf);
401 }
402 ASSERT3U(bufcnt, ==, buffers);
403
404 if (hdr->b_l1hdr.b_pabd != NULL) {
405 - (void) refcount_add_many(&new_state->arcs_size,
406 + (void) zfs_refcount_add_many(
407 + &new_state->arcs_size,
408 arc_hdr_size(hdr), hdr);
409 } else {
410 ASSERT(GHOST_STATE(old_state));
411 @@ -2203,7 +2205,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
412 * header on the ghost state.
413 */
414
415 - (void) refcount_remove_many(&old_state->arcs_size,
416 + (void) zfs_refcount_remove_many(&old_state->arcs_size,
417 HDR_GET_LSIZE(hdr), hdr);
418 } else {
419 arc_buf_t *buf;
420 @@ -2229,13 +2231,13 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
421 if (arc_buf_is_shared(buf))
422 continue;
423
424 - (void) refcount_remove_many(
425 + (void) zfs_refcount_remove_many(
426 &old_state->arcs_size, arc_buf_size(buf),
427 buf);
428 }
429 ASSERT3U(bufcnt, ==, buffers);
430 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
431 - (void) refcount_remove_many(
432 + (void) zfs_refcount_remove_many(
433 &old_state->arcs_size, arc_hdr_size(hdr), hdr);
434 }
435 }
436 @@ -2506,7 +2508,7 @@ arc_return_buf(arc_buf_t *buf, void *tag)
437 ASSERT3P(buf->b_data, !=, NULL);
438 ASSERT(HDR_HAS_L1HDR(hdr));
439 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
440 - (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
441 + (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
442
443 arc_loaned_bytes_update(-arc_buf_size(buf));
444 }
445 @@ -2520,7 +2522,7 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
446 ASSERT3P(buf->b_data, !=, NULL);
447 ASSERT(HDR_HAS_L1HDR(hdr));
448 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
449 - (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
450 + (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
451
452 arc_loaned_bytes_update(arc_buf_size(buf));
453 }
454 @@ -2547,13 +2549,13 @@ arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
455
456 /* protected by hash lock, if in the hash table */
457 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
458 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
459 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
460 ASSERT(state != arc_anon && state != arc_l2c_only);
461
462 - (void) refcount_remove_many(&state->arcs_esize[type],
463 + (void) zfs_refcount_remove_many(&state->arcs_esize[type],
464 size, hdr);
465 }
466 - (void) refcount_remove_many(&state->arcs_size, size, hdr);
467 + (void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
468 if (type == ARC_BUFC_METADATA) {
469 arc_space_return(size, ARC_SPACE_META);
470 } else {
471 @@ -2581,7 +2583,8 @@ arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
472 * refcount ownership to the hdr since it always owns
473 * the refcount whenever an arc_buf_t is shared.
474 */
475 - refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf, hdr);
476 + zfs_refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf,
477 + hdr);
478 hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
479 abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
480 HDR_ISTYPE_METADATA(hdr));
481 @@ -2609,7 +2612,8 @@ arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
482 * We are no longer sharing this buffer so we need
483 * to transfer its ownership to the rightful owner.
484 */
485 - refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr, buf);
486 + zfs_refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr,
487 + buf);
488 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
489 abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
490 abd_put(hdr->b_l1hdr.b_pabd);
491 @@ -2833,7 +2837,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
492 * it references and compressed arc enablement.
493 */
494 arc_hdr_alloc_pabd(hdr);
495 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
496 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
497
498 return (hdr);
499 }
500 @@ -2927,8 +2931,10 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
501 * the wrong pointer address when calling arc_hdr_destroy() later.
502 */
503
504 - (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
505 - (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
506 + (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
507 + hdr);
508 + (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr),
509 + nhdr);
510
511 buf_discard_identity(hdr);
512 kmem_cache_free(old, hdr);
513 @@ -3008,7 +3014,7 @@ arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
514
515 vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
516
517 - (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
518 + (void) zfs_refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
519 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
520 }
521
522 @@ -3018,7 +3024,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
523 if (HDR_HAS_L1HDR(hdr)) {
524 ASSERT(hdr->b_l1hdr.b_buf == NULL ||
525 hdr->b_l1hdr.b_bufcnt > 0);
526 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
527 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
528 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
529 }
530 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
531 @@ -3171,7 +3177,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
532 return (bytes_evicted);
533 }
534
535 - ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
536 + ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
537 while (hdr->b_l1hdr.b_buf) {
538 arc_buf_t *buf = hdr->b_l1hdr.b_buf;
539 if (!mutex_tryenter(&buf->b_evict_lock)) {
540 @@ -3484,7 +3490,7 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
541 {
542 uint64_t evicted = 0;
543
544 - while (refcount_count(&state->arcs_esize[type]) != 0) {
545 + while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
546 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
547
548 if (!retry)
549 @@ -3507,7 +3513,7 @@ arc_prune_task(void *ptr)
550 if (func != NULL)
551 func(ap->p_adjust, ap->p_private);
552
553 - refcount_remove(&ap->p_refcnt, func);
554 + zfs_refcount_remove(&ap->p_refcnt, func);
555 }
556
557 /*
558 @@ -3530,14 +3536,14 @@ arc_prune_async(int64_t adjust)
559 for (ap = list_head(&arc_prune_list); ap != NULL;
560 ap = list_next(&arc_prune_list, ap)) {
561
562 - if (refcount_count(&ap->p_refcnt) >= 2)
563 + if (zfs_refcount_count(&ap->p_refcnt) >= 2)
564 continue;
565
566 zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
567 ap->p_adjust = adjust;
568 if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
569 ap, TQ_SLEEP) == TASKQID_INVALID) {
570 - refcount_remove(&ap->p_refcnt, ap->p_pfunc);
571 + zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
572 continue;
573 }
574 ARCSTAT_BUMP(arcstat_prune);
575 @@ -3559,8 +3565,9 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
576 {
577 int64_t delta;
578
579 - if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
580 - delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
581 + if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
582 + delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
583 + bytes);
584 return (arc_evict_state(state, spa, delta, type));
585 }
586
587 @@ -3603,8 +3610,9 @@ restart:
588 */
589 adjustmnt = arc_meta_used - arc_meta_limit;
590
591 - if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) {
592 - delta = MIN(refcount_count(&arc_mru->arcs_esize[type]),
593 + if (adjustmnt > 0 &&
594 + zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
595 + delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
596 adjustmnt);
597 total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
598 adjustmnt -= delta;
599 @@ -3620,8 +3628,9 @@ restart:
600 * simply decrement the amount of data evicted from the MRU.
601 */
602
603 - if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
604 - delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]),
605 + if (adjustmnt > 0 &&
606 + zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
607 + delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
608 adjustmnt);
609 total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
610 }
611 @@ -3629,17 +3638,17 @@ restart:
612 adjustmnt = arc_meta_used - arc_meta_limit;
613
614 if (adjustmnt > 0 &&
615 - refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
616 + zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
617 delta = MIN(adjustmnt,
618 - refcount_count(&arc_mru_ghost->arcs_esize[type]));
619 + zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
620 total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
621 adjustmnt -= delta;
622 }
623
624 if (adjustmnt > 0 &&
625 - refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
626 + zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
627 delta = MIN(adjustmnt,
628 - refcount_count(&arc_mfu_ghost->arcs_esize[type]));
629 + zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
630 total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
631 }
632
633 @@ -3688,8 +3697,8 @@ arc_adjust_meta_only(void)
634 * evict some from the MRU here, and some from the MFU below.
635 */
636 target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
637 - (int64_t)(refcount_count(&arc_anon->arcs_size) +
638 - refcount_count(&arc_mru->arcs_size) - arc_p));
639 + (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
640 + zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
641
642 total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
643
644 @@ -3699,7 +3708,8 @@ arc_adjust_meta_only(void)
645 * space allotted to the MFU (which is defined as arc_c - arc_p).
646 */
647 target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
648 - (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p)));
649 + (int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) - (arc_c -
650 + arc_p)));
651
652 total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
653
654 @@ -3817,8 +3827,8 @@ arc_adjust(void)
655 * arc_p here, and then evict more from the MFU below.
656 */
657 target = MIN((int64_t)(arc_size - arc_c),
658 - (int64_t)(refcount_count(&arc_anon->arcs_size) +
659 - refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
660 + (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
661 + zfs_refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
662
663 /*
664 * If we're below arc_meta_min, always prefer to evict data.
665 @@ -3902,8 +3912,8 @@ arc_adjust(void)
666 * cache. The following logic enforces these limits on the ghost
667 * caches, and evicts from them as needed.
668 */
669 - target = refcount_count(&arc_mru->arcs_size) +
670 - refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
671 + target = zfs_refcount_count(&arc_mru->arcs_size) +
672 + zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
673
674 bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
675 total_evicted += bytes;
676 @@ -3921,8 +3931,8 @@ arc_adjust(void)
677 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
678 * mru ghost + mfu ghost <= arc_c
679 */
680 - target = refcount_count(&arc_mru_ghost->arcs_size) +
681 - refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
682 + target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
683 + zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
684
685 bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
686 total_evicted += bytes;
687 @@ -4422,10 +4432,10 @@ static uint64_t
688 arc_evictable_memory(void)
689 {
690 uint64_t arc_clean =
691 - refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
692 - refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
693 - refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
694 - refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
695 + zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
696 + zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
697 + zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
698 + zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
699 uint64_t arc_dirty = MAX((int64_t)arc_size - (int64_t)arc_clean, 0);
700
701 /*
702 @@ -4532,8 +4542,8 @@ arc_adapt(int bytes, arc_state_t *state)
703 {
704 int mult;
705 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
706 - int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
707 - int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
708 + int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
709 + int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
710
711 if (state == arc_l2c_only)
712 return;
713 @@ -4698,7 +4708,7 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
714 */
715 if (!GHOST_STATE(state)) {
716
717 - (void) refcount_add_many(&state->arcs_size, size, tag);
718 + (void) zfs_refcount_add_many(&state->arcs_size, size, tag);
719
720 /*
721 * If this is reached via arc_read, the link is
722 @@ -4710,8 +4720,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
723 * trying to [add|remove]_reference it.
724 */
725 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
726 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
727 - (void) refcount_add_many(&state->arcs_esize[type],
728 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
729 + (void) zfs_refcount_add_many(&state->arcs_esize[type],
730 size, tag);
731 }
732
733 @@ -4720,8 +4730,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
734 * data, and we have outgrown arc_p, update arc_p
735 */
736 if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
737 - (refcount_count(&arc_anon->arcs_size) +
738 - refcount_count(&arc_mru->arcs_size) > arc_p))
739 + (zfs_refcount_count(&arc_anon->arcs_size) +
740 + zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
741 arc_p = MIN(arc_c, arc_p + size);
742 }
743 }
744 @@ -4758,13 +4768,13 @@ arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
745
746 /* protected by hash lock, if in the hash table */
747 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
748 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
749 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
750 ASSERT(state != arc_anon && state != arc_l2c_only);
751
752 - (void) refcount_remove_many(&state->arcs_esize[type],
753 + (void) zfs_refcount_remove_many(&state->arcs_esize[type],
754 size, tag);
755 }
756 - (void) refcount_remove_many(&state->arcs_size, size, tag);
757 + (void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
758
759 VERIFY3U(hdr->b_type, ==, type);
760 if (type == ARC_BUFC_METADATA) {
761 @@ -4811,7 +4821,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
762 * another prefetch (to make it less likely to be evicted).
763 */
764 if (HDR_PREFETCH(hdr)) {
765 - if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
766 + if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
767 /* link protected by hash lock */
768 ASSERT(multilist_link_active(
769 &hdr->b_l1hdr.b_arc_node));
770 @@ -4852,7 +4862,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
771
772 if (HDR_PREFETCH(hdr)) {
773 new_state = arc_mru;
774 - if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
775 + if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
776 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
777 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
778 } else {
779 @@ -4876,7 +4886,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
780 * the head of the list now.
781 */
782 if ((HDR_PREFETCH(hdr)) != 0) {
783 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
784 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
785 /* link protected by hash_lock */
786 ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
787 }
788 @@ -4896,7 +4906,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
789 * This is a prefetch access...
790 * move this block back to the MRU state.
791 */
792 - ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
793 + ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
794 new_state = arc_mru;
795 }
796
797 @@ -5098,7 +5108,7 @@ arc_read_done(zio_t *zio)
798 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
799 }
800
801 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
802 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
803 callback_list != NULL);
804
805 if (no_zio_error) {
806 @@ -5109,7 +5119,7 @@ arc_read_done(zio_t *zio)
807 arc_change_state(arc_anon, hdr, hash_lock);
808 if (HDR_IN_HASH_TABLE(hdr))
809 buf_hash_remove(hdr);
810 - freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
811 + freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
812 }
813
814 /*
815 @@ -5129,7 +5139,7 @@ arc_read_done(zio_t *zio)
816 * in the cache).
817 */
818 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
819 - freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
820 + freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
821 }
822
823 /* execute each callback and free its structure */
824 @@ -5282,7 +5292,7 @@ top:
825 VERIFY0(arc_buf_alloc_impl(hdr, private,
826 compressed_read, B_TRUE, &buf));
827 } else if (*arc_flags & ARC_FLAG_PREFETCH &&
828 - refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
829 + zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
830 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
831 }
832 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
833 @@ -5348,7 +5358,7 @@ top:
834 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
835 ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
836 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
837 - ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
838 + ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
839 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
840 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
841
842 @@ -5546,7 +5556,7 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private)
843 p->p_pfunc = func;
844 p->p_private = private;
845 list_link_init(&p->p_node);
846 - refcount_create(&p->p_refcnt);
847 + zfs_refcount_create(&p->p_refcnt);
848
849 mutex_enter(&arc_prune_mtx);
850 zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
851 @@ -5562,15 +5572,15 @@ arc_remove_prune_callback(arc_prune_t *p)
852 boolean_t wait = B_FALSE;
853 mutex_enter(&arc_prune_mtx);
854 list_remove(&arc_prune_list, p);
855 - if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
856 + if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
857 wait = B_TRUE;
858 mutex_exit(&arc_prune_mtx);
859
860 /* wait for arc_prune_task to finish */
861 if (wait)
862 taskq_wait_outstanding(arc_prune_taskq, 0);
863 - ASSERT0(refcount_count(&p->p_refcnt));
864 - refcount_destroy(&p->p_refcnt);
865 + ASSERT0(zfs_refcount_count(&p->p_refcnt));
866 + zfs_refcount_destroy(&p->p_refcnt);
867 kmem_free(p, sizeof (*p));
868 }
869
870 @@ -5613,7 +5623,7 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
871 * this hdr, then we don't destroy the hdr.
872 */
873 if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
874 - refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
875 + zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
876 arc_change_state(arc_anon, hdr, hash_lock);
877 arc_hdr_destroy(hdr);
878 mutex_exit(hash_lock);
879 @@ -5659,7 +5669,7 @@ arc_release(arc_buf_t *buf, void *tag)
880 ASSERT(HDR_EMPTY(hdr));
881
882 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
883 - ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
884 + ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
885 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
886
887 hdr->b_l1hdr.b_arc_access = 0;
888 @@ -5687,7 +5697,7 @@ arc_release(arc_buf_t *buf, void *tag)
889 ASSERT3P(state, !=, arc_anon);
890
891 /* this buffer is not on any list */
892 - ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
893 + ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
894
895 if (HDR_HAS_L2HDR(hdr)) {
896 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
897 @@ -5778,12 +5788,13 @@ arc_release(arc_buf_t *buf, void *tag)
898 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
899 ASSERT3P(state, !=, arc_l2c_only);
900
901 - (void) refcount_remove_many(&state->arcs_size,
902 + (void) zfs_refcount_remove_many(&state->arcs_size,
903 arc_buf_size(buf), buf);
904
905 - if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
906 + if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
907 ASSERT3P(state, !=, arc_l2c_only);
908 - (void) refcount_remove_many(&state->arcs_esize[type],
909 + (void) zfs_refcount_remove_many(
910 + &state->arcs_esize[type],
911 arc_buf_size(buf), buf);
912 }
913
914 @@ -5804,7 +5815,7 @@ arc_release(arc_buf_t *buf, void *tag)
915 nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
916 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
917 ASSERT0(nhdr->b_l1hdr.b_bufcnt);
918 - ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
919 + ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
920 VERIFY3U(nhdr->b_type, ==, type);
921 ASSERT(!HDR_SHARED_DATA(nhdr));
922
923 @@ -5819,11 +5830,11 @@ arc_release(arc_buf_t *buf, void *tag)
924 buf->b_hdr = nhdr;
925
926 mutex_exit(&buf->b_evict_lock);
927 - (void) refcount_add_many(&arc_anon->arcs_size,
928 + (void) zfs_refcount_add_many(&arc_anon->arcs_size,
929 HDR_GET_LSIZE(nhdr), buf);
930 } else {
931 mutex_exit(&buf->b_evict_lock);
932 - ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
933 + ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
934 /* protected by hash lock, or hdr is on arc_anon */
935 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
936 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
937 @@ -5860,7 +5871,7 @@ arc_referenced(arc_buf_t *buf)
938 int referenced;
939
940 mutex_enter(&buf->b_evict_lock);
941 - referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
942 + referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
943 mutex_exit(&buf->b_evict_lock);
944 return (referenced);
945 }
946 @@ -5877,7 +5888,7 @@ arc_write_ready(zio_t *zio)
947 fstrans_cookie_t cookie = spl_fstrans_mark();
948
949 ASSERT(HDR_HAS_L1HDR(hdr));
950 - ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
951 + ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
952 ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
953
954 /*
955 @@ -6029,7 +6040,7 @@ arc_write_done(zio_t *zio)
956 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
957 panic("bad overwrite, hdr=%p exists=%p",
958 (void *)hdr, (void *)exists);
959 - ASSERT(refcount_is_zero(
960 + ASSERT(zfs_refcount_is_zero(
961 &exists->b_l1hdr.b_refcnt));
962 arc_change_state(arc_anon, exists, hash_lock);
963 mutex_exit(hash_lock);
964 @@ -6059,7 +6070,7 @@ arc_write_done(zio_t *zio)
965 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
966 }
967
968 - ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
969 + ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
970 callback->awcb_done(zio, buf, callback->awcb_private);
971
972 abd_put(zio->io_abd);
973 @@ -6222,7 +6233,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
974 /* assert that it has not wrapped around */
975 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
976
977 - anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
978 + anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
979 arc_loaned_bytes), 0);
980
981 /*
982 @@ -6245,9 +6256,10 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
983 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
984 anon_size > arc_c / 4) {
985 uint64_t meta_esize =
986 - refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
987 + zfs_refcount_count(
988 + &arc_anon->arcs_esize[ARC_BUFC_METADATA]);
989 uint64_t data_esize =
990 - refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
991 + zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
992 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
993 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
994 arc_tempreserve >> 10, meta_esize >> 10,
995 @@ -6263,11 +6275,11 @@ static void
996 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
997 kstat_named_t *evict_data, kstat_named_t *evict_metadata)
998 {
999 - size->value.ui64 = refcount_count(&state->arcs_size);
1000 + size->value.ui64 = zfs_refcount_count(&state->arcs_size);
1001 evict_data->value.ui64 =
1002 - refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
1003 + zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
1004 evict_metadata->value.ui64 =
1005 - refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
1006 + zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
1007 }
1008
1009 static int
1010 @@ -6484,25 +6496,25 @@ arc_state_init(void)
1011 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
1012 arc_state_multilist_index_func);
1013
1014 - refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
1015 - refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
1016 - refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
1017 - refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
1018 - refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
1019 - refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
1020 - refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
1021 - refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
1022 - refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
1023 - refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
1024 - refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
1025 - refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
1026 -
1027 - refcount_create(&arc_anon->arcs_size);
1028 - refcount_create(&arc_mru->arcs_size);
1029 - refcount_create(&arc_mru_ghost->arcs_size);
1030 - refcount_create(&arc_mfu->arcs_size);
1031 - refcount_create(&arc_mfu_ghost->arcs_size);
1032 - refcount_create(&arc_l2c_only->arcs_size);
1033 + zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
1034 + zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
1035 + zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
1036 + zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
1037 + zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
1038 + zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
1039 + zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
1040 + zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
1041 + zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
1042 + zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
1043 + zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
1044 + zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
1045 +
1046 + zfs_refcount_create(&arc_anon->arcs_size);
1047 + zfs_refcount_create(&arc_mru->arcs_size);
1048 + zfs_refcount_create(&arc_mru_ghost->arcs_size);
1049 + zfs_refcount_create(&arc_mfu->arcs_size);
1050 + zfs_refcount_create(&arc_mfu_ghost->arcs_size);
1051 + zfs_refcount_create(&arc_l2c_only->arcs_size);
1052
1053 arc_anon->arcs_state = ARC_STATE_ANON;
1054 arc_mru->arcs_state = ARC_STATE_MRU;
1055 @@ -6515,25 +6527,25 @@ arc_state_init(void)
1056 static void
1057 arc_state_fini(void)
1058 {
1059 - refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
1060 - refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
1061 - refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
1062 - refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
1063 - refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
1064 - refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
1065 - refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
1066 - refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
1067 - refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
1068 - refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
1069 - refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
1070 - refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
1071 -
1072 - refcount_destroy(&arc_anon->arcs_size);
1073 - refcount_destroy(&arc_mru->arcs_size);
1074 - refcount_destroy(&arc_mru_ghost->arcs_size);
1075 - refcount_destroy(&arc_mfu->arcs_size);
1076 - refcount_destroy(&arc_mfu_ghost->arcs_size);
1077 - refcount_destroy(&arc_l2c_only->arcs_size);
1078 + zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
1079 + zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
1080 + zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
1081 + zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
1082 + zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
1083 + zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
1084 + zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
1085 + zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
1086 + zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
1087 + zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
1088 + zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
1089 + zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
1090 +
1091 + zfs_refcount_destroy(&arc_anon->arcs_size);
1092 + zfs_refcount_destroy(&arc_mru->arcs_size);
1093 + zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
1094 + zfs_refcount_destroy(&arc_mfu->arcs_size);
1095 + zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
1096 + zfs_refcount_destroy(&arc_l2c_only->arcs_size);
1097
1098 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
1099 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
1100 @@ -6704,8 +6716,8 @@ arc_fini(void)
1101 mutex_enter(&arc_prune_mtx);
1102 while ((p = list_head(&arc_prune_list)) != NULL) {
1103 list_remove(&arc_prune_list, p);
1104 - refcount_remove(&p->p_refcnt, &arc_prune_list);
1105 - refcount_destroy(&p->p_refcnt);
1106 + zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
1107 + zfs_refcount_destroy(&p->p_refcnt);
1108 kmem_free(p, sizeof (*p));
1109 }
1110 mutex_exit(&arc_prune_mtx);
1111 @@ -7108,7 +7120,7 @@ top:
1112 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
1113
1114 bytes_dropped += arc_hdr_size(hdr);
1115 - (void) refcount_remove_many(&dev->l2ad_alloc,
1116 + (void) zfs_refcount_remove_many(&dev->l2ad_alloc,
1117 arc_hdr_size(hdr), hdr);
1118 }
1119
1120 @@ -7527,7 +7539,8 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
1121 list_insert_head(&dev->l2ad_buflist, hdr);
1122 mutex_exit(&dev->l2ad_mtx);
1123
1124 - (void) refcount_add_many(&dev->l2ad_alloc, psize, hdr);
1125 + (void) zfs_refcount_add_many(&dev->l2ad_alloc, psize,
1126 + hdr);
1127
1128 /*
1129 * Normally the L2ARC can use the hdr's data, but if
1130 @@ -7762,7 +7775,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
1131 offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
1132
1133 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
1134 - refcount_create(&adddev->l2ad_alloc);
1135 + zfs_refcount_create(&adddev->l2ad_alloc);
1136
1137 /*
1138 * Add device to global list
1139 @@ -7808,7 +7821,7 @@ l2arc_remove_vdev(vdev_t *vd)
1140 l2arc_evict(remdev, 0, B_TRUE);
1141 list_destroy(&remdev->l2ad_buflist);
1142 mutex_destroy(&remdev->l2ad_mtx);
1143 - refcount_destroy(&remdev->l2ad_alloc);
1144 + zfs_refcount_destroy(&remdev->l2ad_alloc);
1145 kmem_free(remdev, sizeof (l2arc_dev_t));
1146 }
1147
1148 diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
1149 index 5101c848..62b77bb0 100644
1150 --- a/module/zfs/dbuf.c
1151 +++ b/module/zfs/dbuf.c
1152 @@ -165,7 +165,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
1153 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
1154 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
1155 multilist_link_init(&db->db_cache_link);
1156 - refcount_create(&db->db_holds);
1157 + zfs_refcount_create(&db->db_holds);
1158 multilist_link_init(&db->db_cache_link);
1159
1160 return (0);
1161 @@ -179,7 +179,7 @@ dbuf_dest(void *vdb, void *unused)
1162 mutex_destroy(&db->db_mtx);
1163 cv_destroy(&db->db_changed);
1164 ASSERT(!multilist_link_active(&db->db_cache_link));
1165 - refcount_destroy(&db->db_holds);
1166 + zfs_refcount_destroy(&db->db_holds);
1167 }
1168
1169 /*
1170 @@ -317,7 +317,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
1171 * We mustn't hold db_mtx to maintain lock ordering:
1172 * DBUF_HASH_MUTEX > db_mtx.
1173 */
1174 - ASSERT(refcount_is_zero(&db->db_holds));
1175 + ASSERT(zfs_refcount_is_zero(&db->db_holds));
1176 ASSERT(db->db_state == DB_EVICTING);
1177 ASSERT(!MUTEX_HELD(&db->db_mtx));
1178
1179 @@ -354,7 +354,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
1180 ASSERT(db->db.db_data != NULL);
1181 ASSERT3U(db->db_state, ==, DB_CACHED);
1182
1183 - holds = refcount_count(&db->db_holds);
1184 + holds = zfs_refcount_count(&db->db_holds);
1185 if (verify_type == DBVU_EVICTING) {
1186 /*
1187 * Immediate eviction occurs when holds == dirtycnt.
1188 @@ -478,7 +478,7 @@ dbuf_cache_above_hiwater(void)
1189 uint64_t dbuf_cache_hiwater_bytes =
1190 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100;
1191
1192 - return (refcount_count(&dbuf_cache_size) >
1193 + return (zfs_refcount_count(&dbuf_cache_size) >
1194 dbuf_cache_target + dbuf_cache_hiwater_bytes);
1195 }
1196
1197 @@ -490,7 +490,7 @@ dbuf_cache_above_lowater(void)
1198 uint64_t dbuf_cache_lowater_bytes =
1199 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100;
1200
1201 - return (refcount_count(&dbuf_cache_size) >
1202 + return (zfs_refcount_count(&dbuf_cache_size) >
1203 dbuf_cache_target - dbuf_cache_lowater_bytes);
1204 }
1205
1206 @@ -524,7 +524,7 @@ dbuf_evict_one(void)
1207 if (db != NULL) {
1208 multilist_sublist_remove(mls, db);
1209 multilist_sublist_unlock(mls);
1210 - (void) refcount_remove_many(&dbuf_cache_size,
1211 + (void) zfs_refcount_remove_many(&dbuf_cache_size,
1212 db->db.db_size, db);
1213 dbuf_destroy(db);
1214 } else {
1215 @@ -611,7 +611,7 @@ dbuf_evict_notify(void)
1216 * because it's OK to occasionally make the wrong decision here,
1217 * and grabbing the lock results in massive lock contention.
1218 */
1219 - if (refcount_count(&dbuf_cache_size) > dbuf_cache_target_bytes()) {
1220 + if (zfs_refcount_count(&dbuf_cache_size) > dbuf_cache_target_bytes()) {
1221 if (dbuf_cache_above_hiwater())
1222 dbuf_evict_one();
1223 cv_signal(&dbuf_evict_cv);
1224 @@ -679,7 +679,7 @@ retry:
1225 dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t),
1226 offsetof(dmu_buf_impl_t, db_cache_link),
1227 dbuf_cache_multilist_index_func);
1228 - refcount_create(&dbuf_cache_size);
1229 + zfs_refcount_create(&dbuf_cache_size);
1230
1231 tsd_create(&zfs_dbuf_evict_key, NULL);
1232 dbuf_evict_thread_exit = B_FALSE;
1233 @@ -723,7 +723,7 @@ dbuf_fini(void)
1234 mutex_destroy(&dbuf_evict_lock);
1235 cv_destroy(&dbuf_evict_cv);
1236
1237 - refcount_destroy(&dbuf_cache_size);
1238 + zfs_refcount_destroy(&dbuf_cache_size);
1239 multilist_destroy(dbuf_cache);
1240 }
1241
1242 @@ -910,7 +910,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1243
1244 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1245 mutex_enter(&db->db_mtx);
1246 - if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
1247 + if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1248 int blksz = db->db.db_size;
1249 spa_t *spa = db->db_objset->os_spa;
1250
1251 @@ -983,7 +983,7 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
1252 /*
1253 * All reads are synchronous, so we must have a hold on the dbuf
1254 */
1255 - ASSERT(refcount_count(&db->db_holds) > 0);
1256 + ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1257 ASSERT(db->db_buf == NULL);
1258 ASSERT(db->db.db_data == NULL);
1259 if (db->db_level == 0 && db->db_freed_in_flight) {
1260 @@ -1017,7 +1017,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1261
1262 DB_DNODE_ENTER(db);
1263 dn = DB_DNODE(db);
1264 - ASSERT(!refcount_is_zero(&db->db_holds));
1265 + ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1266 /* We need the struct_rwlock to prevent db_blkptr from changing. */
1267 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1268 ASSERT(MUTEX_HELD(&db->db_mtx));
1269 @@ -1150,7 +1150,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1270 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1271 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1272 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
1273 - } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1274 + } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1275 int size = arc_buf_size(db->db_buf);
1276 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1277 spa_t *spa = db->db_objset->os_spa;
1278 @@ -1182,7 +1182,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1279 * We don't have to hold the mutex to check db_state because it
1280 * can't be freed while we have a hold on the buffer.
1281 */
1282 - ASSERT(!refcount_is_zero(&db->db_holds));
1283 + ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1284
1285 if (db->db_state == DB_NOFILL)
1286 return (SET_ERROR(EIO));
1287 @@ -1277,7 +1277,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1288 static void
1289 dbuf_noread(dmu_buf_impl_t *db)
1290 {
1291 - ASSERT(!refcount_is_zero(&db->db_holds));
1292 + ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1293 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1294 mutex_enter(&db->db_mtx);
1295 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1296 @@ -1397,7 +1397,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1297 mutex_exit(&db->db_mtx);
1298 continue;
1299 }
1300 - if (refcount_count(&db->db_holds) == 0) {
1301 + if (zfs_refcount_count(&db->db_holds) == 0) {
1302 ASSERT(db->db_buf);
1303 dbuf_destroy(db);
1304 continue;
1305 @@ -1544,7 +1544,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1306 int txgoff = tx->tx_txg & TXG_MASK;
1307
1308 ASSERT(tx->tx_txg != 0);
1309 - ASSERT(!refcount_is_zero(&db->db_holds));
1310 + ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1311 DMU_TX_DIRTY_BUF(tx, db);
1312
1313 DB_DNODE_ENTER(db);
1314 @@ -1912,7 +1912,7 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1315 ASSERT(db->db_dirtycnt > 0);
1316 db->db_dirtycnt -= 1;
1317
1318 - if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1319 + if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1320 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
1321 dbuf_destroy(db);
1322 return (B_TRUE);
1323 @@ -1929,7 +1929,7 @@ dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1324 dbuf_dirty_record_t *dr;
1325
1326 ASSERT(tx->tx_txg != 0);
1327 - ASSERT(!refcount_is_zero(&db->db_holds));
1328 + ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1329
1330 /*
1331 * Quick check for dirtyness. For already dirty blocks, this
1332 @@ -1981,7 +1981,7 @@ dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1333 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1334 ASSERT(tx->tx_txg != 0);
1335 ASSERT(db->db_level == 0);
1336 - ASSERT(!refcount_is_zero(&db->db_holds));
1337 + ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1338
1339 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1340 dmu_tx_private_ok(tx));
1341 @@ -2056,7 +2056,7 @@ dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1342 void
1343 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1344 {
1345 - ASSERT(!refcount_is_zero(&db->db_holds));
1346 + ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1347 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1348 ASSERT(db->db_level == 0);
1349 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
1350 @@ -2075,7 +2075,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1351 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1352
1353 if (db->db_state == DB_CACHED &&
1354 - refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1355 + zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1356 mutex_exit(&db->db_mtx);
1357 (void) dbuf_dirty(db, tx);
1358 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1359 @@ -2120,7 +2120,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
1360 dmu_buf_impl_t *dndb;
1361
1362 ASSERT(MUTEX_HELD(&db->db_mtx));
1363 - ASSERT(refcount_is_zero(&db->db_holds));
1364 + ASSERT(zfs_refcount_is_zero(&db->db_holds));
1365
1366 if (db->db_buf != NULL) {
1367 arc_buf_destroy(db->db_buf, db);
1368 @@ -2140,7 +2140,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
1369
1370 if (multilist_link_active(&db->db_cache_link)) {
1371 multilist_remove(dbuf_cache, db);
1372 - (void) refcount_remove_many(&dbuf_cache_size,
1373 + (void) zfs_refcount_remove_many(&dbuf_cache_size,
1374 db->db.db_size, db);
1375 }
1376
1377 @@ -2186,7 +2186,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
1378 DB_DNODE_EXIT(db);
1379 }
1380
1381 - ASSERT(refcount_is_zero(&db->db_holds));
1382 + ASSERT(zfs_refcount_is_zero(&db->db_holds));
1383
1384 db->db_parent = NULL;
1385
1386 @@ -2383,7 +2383,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1387 dbuf_add_ref(parent, db);
1388
1389 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1390 - refcount_count(&dn->dn_holds) > 0);
1391 + zfs_refcount_count(&dn->dn_holds) > 0);
1392 (void) zfs_refcount_add(&dn->dn_holds, db);
1393 atomic_inc_32(&dn->dn_dbufs_count);
1394
1395 @@ -2744,9 +2744,9 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
1396 }
1397
1398 if (multilist_link_active(&dh->dh_db->db_cache_link)) {
1399 - ASSERT(refcount_is_zero(&dh->dh_db->db_holds));
1400 + ASSERT(zfs_refcount_is_zero(&dh->dh_db->db_holds));
1401 multilist_remove(dbuf_cache, dh->dh_db);
1402 - (void) refcount_remove_many(&dbuf_cache_size,
1403 + (void) zfs_refcount_remove_many(&dbuf_cache_size,
1404 dh->dh_db->db.db_size, dh->dh_db);
1405 }
1406 (void) zfs_refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
1407 @@ -2938,7 +2938,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
1408 * dnode so we can guarantee in dnode_move() that a referenced bonus
1409 * buffer has a corresponding dnode hold.
1410 */
1411 - holds = refcount_remove(&db->db_holds, tag);
1412 + holds = zfs_refcount_remove(&db->db_holds, tag);
1413 ASSERT(holds >= 0);
1414
1415 /*
1416 @@ -3017,7 +3017,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
1417 dbuf_destroy(db);
1418 } else if (!multilist_link_active(&db->db_cache_link)) {
1419 multilist_insert(dbuf_cache, db);
1420 - (void) refcount_add_many(&dbuf_cache_size,
1421 + (void) zfs_refcount_add_many(&dbuf_cache_size,
1422 db->db.db_size, db);
1423 mutex_exit(&db->db_mtx);
1424
1425 @@ -3037,7 +3037,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
1426 uint64_t
1427 dbuf_refcount(dmu_buf_impl_t *db)
1428 {
1429 - return (refcount_count(&db->db_holds));
1430 + return (zfs_refcount_count(&db->db_holds));
1431 }
1432
1433 void *
1434 @@ -3340,7 +3340,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
1435
1436 if (db->db_state != DB_NOFILL &&
1437 dn->dn_object != DMU_META_DNODE_OBJECT &&
1438 - refcount_count(&db->db_holds) > 1 &&
1439 + zfs_refcount_count(&db->db_holds) > 1 &&
1440 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
1441 *datap == db->db_buf) {
1442 /*
1443 diff --git a/module/zfs/dbuf_stats.c b/module/zfs/dbuf_stats.c
1444 index 1712c9c1..7afc9ddc 100644
1445 --- a/module/zfs/dbuf_stats.c
1446 +++ b/module/zfs/dbuf_stats.c
1447 @@ -89,7 +89,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
1448 (u_longlong_t)db->db.db_size,
1449 !!dbuf_is_metadata(db),
1450 db->db_state,
1451 - (ulong_t)refcount_count(&db->db_holds),
1452 + (ulong_t)zfs_refcount_count(&db->db_holds),
1453 /* arc_buf_info_t */
1454 abi.abi_state_type,
1455 abi.abi_state_contents,
1456 @@ -113,7 +113,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
1457 (ulong_t)doi.doi_metadata_block_size,
1458 (u_longlong_t)doi.doi_bonus_size,
1459 (ulong_t)doi.doi_indirection,
1460 - (ulong_t)refcount_count(&dn->dn_holds),
1461 + (ulong_t)zfs_refcount_count(&dn->dn_holds),
1462 (u_longlong_t)doi.doi_fill_count,
1463 (u_longlong_t)doi.doi_max_offset);
1464
1465 diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
1466 index b1508ffa..135743e9 100644
1467 --- a/module/zfs/dmu_tx.c
1468 +++ b/module/zfs/dmu_tx.c
1469 @@ -132,8 +132,8 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
1470 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
1471 txh->txh_tx = tx;
1472 txh->txh_dnode = dn;
1473 - refcount_create(&txh->txh_space_towrite);
1474 - refcount_create(&txh->txh_memory_tohold);
1475 + zfs_refcount_create(&txh->txh_space_towrite);
1476 + zfs_refcount_create(&txh->txh_memory_tohold);
1477 txh->txh_type = type;
1478 txh->txh_arg1 = arg1;
1479 txh->txh_arg2 = arg2;
1480 @@ -228,9 +228,9 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
1481 if (len == 0)
1482 return;
1483
1484 - (void) refcount_add_many(&txh->txh_space_towrite, len, FTAG);
1485 + (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
1486
1487 - if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
1488 + if (zfs_refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
1489 err = SET_ERROR(EFBIG);
1490
1491 if (dn == NULL)
1492 @@ -295,7 +295,8 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
1493 static void
1494 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
1495 {
1496 - (void) refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE, FTAG);
1497 + (void) zfs_refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE,
1498 + FTAG);
1499 }
1500
1501 void
1502 @@ -418,7 +419,7 @@ dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
1503 return;
1504 }
1505
1506 - (void) refcount_add_many(&txh->txh_memory_tohold,
1507 + (void) zfs_refcount_add_many(&txh->txh_memory_tohold,
1508 1 << dn->dn_indblkshift, FTAG);
1509
1510 err = dmu_tx_check_ioerr(zio, dn, 1, i);
1511 @@ -477,7 +478,7 @@ dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
1512 * - 2 blocks for possibly split leaves,
1513 * - 2 grown ptrtbl blocks
1514 */
1515 - (void) refcount_add_many(&txh->txh_space_towrite,
1516 + (void) zfs_refcount_add_many(&txh->txh_space_towrite,
1517 MZAP_MAX_BLKSZ, FTAG);
1518
1519 if (dn == NULL)
1520 @@ -568,7 +569,8 @@ dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
1521 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
1522 DMU_NEW_OBJECT, THT_SPACE, space, 0);
1523 if (txh)
1524 - (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
1525 + (void) zfs_refcount_add_many(&txh->txh_space_towrite, space,
1526 + FTAG);
1527 }
1528
1529 #ifdef ZFS_DEBUG
1530 @@ -919,8 +921,8 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
1531 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
1532 mutex_exit(&dn->dn_mtx);
1533 }
1534 - towrite += refcount_count(&txh->txh_space_towrite);
1535 - tohold += refcount_count(&txh->txh_memory_tohold);
1536 + towrite += zfs_refcount_count(&txh->txh_space_towrite);
1537 + tohold += zfs_refcount_count(&txh->txh_memory_tohold);
1538 }
1539
1540 /* needed allocation: worst-case estimate of write space */
1541 @@ -962,7 +964,7 @@ dmu_tx_unassign(dmu_tx_t *tx)
1542 mutex_enter(&dn->dn_mtx);
1543 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1544
1545 - if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1546 + if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1547 dn->dn_assigned_txg = 0;
1548 cv_broadcast(&dn->dn_notxholds);
1549 }
1550 @@ -1100,10 +1102,10 @@ dmu_tx_destroy(dmu_tx_t *tx)
1551 dnode_t *dn = txh->txh_dnode;
1552
1553 list_remove(&tx->tx_holds, txh);
1554 - refcount_destroy_many(&txh->txh_space_towrite,
1555 - refcount_count(&txh->txh_space_towrite));
1556 - refcount_destroy_many(&txh->txh_memory_tohold,
1557 - refcount_count(&txh->txh_memory_tohold));
1558 + zfs_refcount_destroy_many(&txh->txh_space_towrite,
1559 + zfs_refcount_count(&txh->txh_space_towrite));
1560 + zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1561 + zfs_refcount_count(&txh->txh_memory_tohold));
1562 kmem_free(txh, sizeof (dmu_tx_hold_t));
1563 if (dn != NULL)
1564 dnode_rele(dn, tx);
1565 @@ -1135,7 +1137,7 @@ dmu_tx_commit(dmu_tx_t *tx)
1566 mutex_enter(&dn->dn_mtx);
1567 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1568
1569 - if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1570 + if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1571 dn->dn_assigned_txg = 0;
1572 cv_broadcast(&dn->dn_notxholds);
1573 }
1574 @@ -1250,7 +1252,7 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1575 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1576 THT_SPILL, 0, 0);
1577 if (txh != NULL)
1578 - (void) refcount_add_many(&txh->txh_space_towrite,
1579 + (void) zfs_refcount_add_many(&txh->txh_space_towrite,
1580 SPA_OLD_MAXBLOCKSIZE, FTAG);
1581 }
1582
1583 diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
1584 index 77d38c36..989a8ec7 100644
1585 --- a/module/zfs/dnode.c
1586 +++ b/module/zfs/dnode.c
1587 @@ -124,8 +124,8 @@ dnode_cons(void *arg, void *unused, int kmflag)
1588 * Every dbuf has a reference, and dropping a tracked reference is
1589 * O(number of references), so don't track dn_holds.
1590 */
1591 - refcount_create_untracked(&dn->dn_holds);
1592 - refcount_create(&dn->dn_tx_holds);
1593 + zfs_refcount_create_untracked(&dn->dn_holds);
1594 + zfs_refcount_create(&dn->dn_tx_holds);
1595 list_link_init(&dn->dn_link);
1596
1597 bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
1598 @@ -180,8 +180,8 @@ dnode_dest(void *arg, void *unused)
1599 mutex_destroy(&dn->dn_mtx);
1600 mutex_destroy(&dn->dn_dbufs_mtx);
1601 cv_destroy(&dn->dn_notxholds);
1602 - refcount_destroy(&dn->dn_holds);
1603 - refcount_destroy(&dn->dn_tx_holds);
1604 + zfs_refcount_destroy(&dn->dn_holds);
1605 + zfs_refcount_destroy(&dn->dn_tx_holds);
1606 ASSERT(!list_link_active(&dn->dn_link));
1607
1608 for (i = 0; i < TXG_SIZE; i++) {
1609 @@ -377,7 +377,7 @@ dnode_buf_byteswap(void *vbuf, size_t size)
1610 void
1611 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
1612 {
1613 - ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
1614 + ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
1615
1616 dnode_setdirty(dn, tx);
1617 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1618 @@ -394,7 +394,7 @@ dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
1619 void
1620 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
1621 {
1622 - ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
1623 + ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
1624 dnode_setdirty(dn, tx);
1625 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1626 dn->dn_bonustype = newtype;
1627 @@ -405,7 +405,7 @@ dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
1628 void
1629 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
1630 {
1631 - ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
1632 + ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
1633 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1634 dnode_setdirty(dn, tx);
1635 dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
1636 @@ -596,8 +596,8 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
1637 ASSERT0(dn->dn_allocated_txg);
1638 ASSERT0(dn->dn_assigned_txg);
1639 ASSERT0(dn->dn_dirty_txg);
1640 - ASSERT(refcount_is_zero(&dn->dn_tx_holds));
1641 - ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
1642 + ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
1643 + ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
1644 ASSERT(avl_is_empty(&dn->dn_dbufs));
1645
1646 for (i = 0; i < TXG_SIZE; i++) {
1647 @@ -786,8 +786,8 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
1648 ndn->dn_dirty_txg = odn->dn_dirty_txg;
1649 ndn->dn_dirtyctx = odn->dn_dirtyctx;
1650 ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
1651 - ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
1652 - refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
1653 + ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
1654 + zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
1655 ASSERT(avl_is_empty(&ndn->dn_dbufs));
1656 avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
1657 ndn->dn_dbufs_count = odn->dn_dbufs_count;
1658 @@ -975,7 +975,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
1659 * hold before the dbuf is removed, the hold is discounted, and the
1660 * removal is blocked until the move completes.
1661 */
1662 - refcount = refcount_count(&odn->dn_holds);
1663 + refcount = zfs_refcount_count(&odn->dn_holds);
1664 ASSERT(refcount >= 0);
1665 dbufs = odn->dn_dbufs_count;
1666
1667 @@ -1003,7 +1003,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
1668
1669 list_link_replace(&odn->dn_link, &ndn->dn_link);
1670 /* If the dnode was safe to move, the refcount cannot have changed. */
1671 - ASSERT(refcount == refcount_count(&ndn->dn_holds));
1672 + ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
1673 ASSERT(dbufs == ndn->dn_dbufs_count);
1674 zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
1675 mutex_exit(&os->os_lock);
1676 @@ -1152,7 +1152,7 @@ dnode_special_close(dnode_handle_t *dnh)
1677 * has a hold on this dnode while we are trying to evict this
1678 * dnode.
1679 */
1680 - while (refcount_count(&dn->dn_holds) > 0)
1681 + while (zfs_refcount_count(&dn->dn_holds) > 0)
1682 delay(1);
1683 ASSERT(dn->dn_dbuf == NULL ||
1684 dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
1685 @@ -1207,8 +1207,8 @@ dnode_buf_evict_async(void *dbu)
1686 * it wouldn't be eligible for eviction and this function
1687 * would not have been called.
1688 */
1689 - ASSERT(refcount_is_zero(&dn->dn_holds));
1690 - ASSERT(refcount_is_zero(&dn->dn_tx_holds));
1691 + ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
1692 + ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
1693
1694 dnode_destroy(dn); /* implicit zrl_remove() for first slot */
1695 zrl_destroy(&dnh->dnh_zrlock);
1696 @@ -1460,7 +1460,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
1697 }
1698
1699 mutex_enter(&dn->dn_mtx);
1700 - if (!refcount_is_zero(&dn->dn_holds)) {
1701 + if (!zfs_refcount_is_zero(&dn->dn_holds)) {
1702 DNODE_STAT_BUMP(dnode_hold_free_refcount);
1703 mutex_exit(&dn->dn_mtx);
1704 dnode_slots_rele(dnc, idx, slots);
1705 @@ -1520,7 +1520,7 @@ boolean_t
1706 dnode_add_ref(dnode_t *dn, void *tag)
1707 {
1708 mutex_enter(&dn->dn_mtx);
1709 - if (refcount_is_zero(&dn->dn_holds)) {
1710 + if (zfs_refcount_is_zero(&dn->dn_holds)) {
1711 mutex_exit(&dn->dn_mtx);
1712 return (FALSE);
1713 }
1714 @@ -1544,7 +1544,7 @@ dnode_rele_and_unlock(dnode_t *dn, void *tag)
1715 dmu_buf_impl_t *db = dn->dn_dbuf;
1716 dnode_handle_t *dnh = dn->dn_handle;
1717
1718 - refs = refcount_remove(&dn->dn_holds, tag);
1719 + refs = zfs_refcount_remove(&dn->dn_holds, tag);
1720 mutex_exit(&dn->dn_mtx);
1721
1722 /*
1723 @@ -1608,7 +1608,7 @@ dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1724 return;
1725 }
1726
1727 - ASSERT(!refcount_is_zero(&dn->dn_holds) ||
1728 + ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
1729 !avl_is_empty(&dn->dn_dbufs));
1730 ASSERT(dn->dn_datablksz != 0);
1731 ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
1732 diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
1733 index 8d65e385..2febb520 100644
1734 --- a/module/zfs/dnode_sync.c
1735 +++ b/module/zfs/dnode_sync.c
1736 @@ -422,7 +422,7 @@ dnode_evict_dbufs(dnode_t *dn)
1737
1738 mutex_enter(&db->db_mtx);
1739 if (db->db_state != DB_EVICTING &&
1740 - refcount_is_zero(&db->db_holds)) {
1741 + zfs_refcount_is_zero(&db->db_holds)) {
1742 db_marker->db_level = db->db_level;
1743 db_marker->db_blkid = db->db_blkid;
1744 db_marker->db_state = DB_SEARCH;
1745 @@ -451,7 +451,7 @@ dnode_evict_bonus(dnode_t *dn)
1746 {
1747 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1748 if (dn->dn_bonus != NULL) {
1749 - if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
1750 + if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
1751 mutex_enter(&dn->dn_bonus->db_mtx);
1752 dbuf_destroy(dn->dn_bonus);
1753 dn->dn_bonus = NULL;
1754 @@ -517,7 +517,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
1755 * zfs_obj_to_path() also depends on this being
1756 * commented out.
1757 *
1758 - * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
1759 + * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
1760 */
1761
1762 /* Undirty next bits */
1763 diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
1764 index b7562bcd..2e79c489 100644
1765 --- a/module/zfs/dsl_dataset.c
1766 +++ b/module/zfs/dsl_dataset.c
1767 @@ -287,7 +287,7 @@ dsl_dataset_evict_async(void *dbu)
1768 mutex_destroy(&ds->ds_lock);
1769 mutex_destroy(&ds->ds_opening_lock);
1770 mutex_destroy(&ds->ds_sendstream_lock);
1771 - refcount_destroy(&ds->ds_longholds);
1772 + zfs_refcount_destroy(&ds->ds_longholds);
1773 rrw_destroy(&ds->ds_bp_rwlock);
1774
1775 kmem_free(ds, sizeof (dsl_dataset_t));
1776 @@ -422,7 +422,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
1777 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
1778 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
1779 rrw_init(&ds->ds_bp_rwlock, B_FALSE);
1780 - refcount_create(&ds->ds_longholds);
1781 + zfs_refcount_create(&ds->ds_longholds);
1782
1783 bplist_create(&ds->ds_pending_deadlist);
1784 dsl_deadlist_open(&ds->ds_deadlist,
1785 @@ -458,7 +458,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
1786 mutex_destroy(&ds->ds_lock);
1787 mutex_destroy(&ds->ds_opening_lock);
1788 mutex_destroy(&ds->ds_sendstream_lock);
1789 - refcount_destroy(&ds->ds_longholds);
1790 + zfs_refcount_destroy(&ds->ds_longholds);
1791 bplist_destroy(&ds->ds_pending_deadlist);
1792 dsl_deadlist_close(&ds->ds_deadlist);
1793 kmem_free(ds, sizeof (dsl_dataset_t));
1794 @@ -520,7 +520,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
1795 mutex_destroy(&ds->ds_lock);
1796 mutex_destroy(&ds->ds_opening_lock);
1797 mutex_destroy(&ds->ds_sendstream_lock);
1798 - refcount_destroy(&ds->ds_longholds);
1799 + zfs_refcount_destroy(&ds->ds_longholds);
1800 kmem_free(ds, sizeof (dsl_dataset_t));
1801 if (err != 0) {
1802 dmu_buf_rele(dbuf, tag);
1803 @@ -651,14 +651,14 @@ dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
1804 void
1805 dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
1806 {
1807 - (void) refcount_remove(&ds->ds_longholds, tag);
1808 + (void) zfs_refcount_remove(&ds->ds_longholds, tag);
1809 }
1810
1811 /* Return B_TRUE if there are any long holds on this dataset. */
1812 boolean_t
1813 dsl_dataset_long_held(dsl_dataset_t *ds)
1814 {
1815 - return (!refcount_is_zero(&ds->ds_longholds));
1816 + return (!zfs_refcount_is_zero(&ds->ds_longholds));
1817 }
1818
1819 void
1820 diff --git a/module/zfs/dsl_destroy.c b/module/zfs/dsl_destroy.c
1821 index d980f7d1..946eb1d3 100644
1822 --- a/module/zfs/dsl_destroy.c
1823 +++ b/module/zfs/dsl_destroy.c
1824 @@ -258,7 +258,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
1825 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1826 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
1827 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1828 - ASSERT(refcount_is_zero(&ds->ds_longholds));
1829 + ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
1830
1831 if (defer &&
1832 (ds->ds_userrefs > 0 ||
1833 @@ -619,7 +619,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
1834 if (ds->ds_is_snapshot)
1835 return (SET_ERROR(EINVAL));
1836
1837 - if (refcount_count(&ds->ds_longholds) != expected_holds)
1838 + if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
1839 return (SET_ERROR(EBUSY));
1840
1841 mos = ds->ds_dir->dd_pool->dp_meta_objset;
1842 @@ -647,7 +647,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
1843 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
1844 ds->ds_prev->ds_userrefs == 0) {
1845 /* We need to remove the origin snapshot as well. */
1846 - if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
1847 + if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
1848 return (SET_ERROR(EBUSY));
1849 }
1850 return (0);
1851 diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
1852 index 40658d51..2a5581c3 100644
1853 --- a/module/zfs/metaslab.c
1854 +++ b/module/zfs/metaslab.c
1855 @@ -223,7 +223,7 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
1856 mc->mc_rotor = NULL;
1857 mc->mc_ops = ops;
1858 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
1859 - refcount_create_tracked(&mc->mc_alloc_slots);
1860 + zfs_refcount_create_tracked(&mc->mc_alloc_slots);
1861
1862 return (mc);
1863 }
1864 @@ -237,7 +237,7 @@ metaslab_class_destroy(metaslab_class_t *mc)
1865 ASSERT(mc->mc_space == 0);
1866 ASSERT(mc->mc_dspace == 0);
1867
1868 - refcount_destroy(&mc->mc_alloc_slots);
1869 + zfs_refcount_destroy(&mc->mc_alloc_slots);
1870 mutex_destroy(&mc->mc_lock);
1871 kmem_free(mc, sizeof (metaslab_class_t));
1872 }
1873 @@ -585,7 +585,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
1874 mg->mg_activation_count = 0;
1875 mg->mg_initialized = B_FALSE;
1876 mg->mg_no_free_space = B_TRUE;
1877 - refcount_create_tracked(&mg->mg_alloc_queue_depth);
1878 + zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth);
1879
1880 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
1881 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
1882 @@ -608,7 +608,7 @@ metaslab_group_destroy(metaslab_group_t *mg)
1883 taskq_destroy(mg->mg_taskq);
1884 avl_destroy(&mg->mg_metaslab_tree);
1885 mutex_destroy(&mg->mg_lock);
1886 - refcount_destroy(&mg->mg_alloc_queue_depth);
1887 + zfs_refcount_destroy(&mg->mg_alloc_queue_depth);
1888 kmem_free(mg, sizeof (metaslab_group_t));
1889 }
1890
1891 @@ -907,7 +907,7 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1892 if (mg->mg_no_free_space)
1893 return (B_FALSE);
1894
1895 - qdepth = refcount_count(&mg->mg_alloc_queue_depth);
1896 + qdepth = zfs_refcount_count(&mg->mg_alloc_queue_depth);
1897
1898 /*
1899 * If this metaslab group is below its qmax or it's
1900 @@ -928,7 +928,7 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1901 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1902 qmax = mgp->mg_max_alloc_queue_depth;
1903
1904 - qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
1905 + qdepth = zfs_refcount_count(&mgp->mg_alloc_queue_depth);
1906
1907 /*
1908 * If there is another metaslab group that
1909 @@ -2679,7 +2679,7 @@ metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
1910 if (!mg->mg_class->mc_alloc_throttle_enabled)
1911 return;
1912
1913 - (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
1914 + (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth, tag);
1915 }
1916
1917 void
1918 @@ -2693,7 +2693,7 @@ metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
1919 for (d = 0; d < ndvas; d++) {
1920 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
1921 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
1922 - VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
1923 + VERIFY(zfs_refcount_not_held(&mg->mg_alloc_queue_depth, tag));
1924 }
1925 #endif
1926 }
1927 @@ -3348,7 +3348,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
1928 ASSERT(mc->mc_alloc_throttle_enabled);
1929 mutex_enter(&mc->mc_lock);
1930
1931 - reserved_slots = refcount_count(&mc->mc_alloc_slots);
1932 + reserved_slots = zfs_refcount_count(&mc->mc_alloc_slots);
1933 if (reserved_slots < mc->mc_alloc_max_slots)
1934 available_slots = mc->mc_alloc_max_slots - reserved_slots;
1935
1936 @@ -3360,7 +3360,8 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
1937 * them individually when an I/O completes.
1938 */
1939 for (d = 0; d < slots; d++) {
1940 - reserved_slots = zfs_refcount_add(&mc->mc_alloc_slots, zio);
1941 + reserved_slots = zfs_refcount_add(&mc->mc_alloc_slots,
1942 + zio);
1943 }
1944 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
1945 slot_reserved = B_TRUE;
1946 @@ -3378,7 +3379,7 @@ metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
1947 ASSERT(mc->mc_alloc_throttle_enabled);
1948 mutex_enter(&mc->mc_lock);
1949 for (d = 0; d < slots; d++) {
1950 - (void) refcount_remove(&mc->mc_alloc_slots, zio);
1951 + (void) zfs_refcount_remove(&mc->mc_alloc_slots, zio);
1952 }
1953 mutex_exit(&mc->mc_lock);
1954 }
1955 diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c
1956 index 13f9bb6b..0a93aafb 100644
1957 --- a/module/zfs/refcount.c
1958 +++ b/module/zfs/refcount.c
1959 @@ -38,7 +38,7 @@ static kmem_cache_t *reference_cache;
1960 static kmem_cache_t *reference_history_cache;
1961
1962 void
1963 -refcount_init(void)
1964 +zfs_refcount_init(void)
1965 {
1966 reference_cache = kmem_cache_create("reference_cache",
1967 sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
1968 @@ -48,14 +48,14 @@ refcount_init(void)
1969 }
1970
1971 void
1972 -refcount_fini(void)
1973 +zfs_refcount_fini(void)
1974 {
1975 kmem_cache_destroy(reference_cache);
1976 kmem_cache_destroy(reference_history_cache);
1977 }
1978
1979 void
1980 -refcount_create(zfs_refcount_t *rc)
1981 +zfs_refcount_create(zfs_refcount_t *rc)
1982 {
1983 mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
1984 list_create(&rc->rc_list, sizeof (reference_t),
1985 @@ -68,21 +68,21 @@ refcount_create(zfs_refcount_t *rc)
1986 }
1987
1988 void
1989 -refcount_create_tracked(zfs_refcount_t *rc)
1990 +zfs_refcount_create_tracked(zfs_refcount_t *rc)
1991 {
1992 - refcount_create(rc);
1993 + zfs_refcount_create(rc);
1994 rc->rc_tracked = B_TRUE;
1995 }
1996
1997 void
1998 -refcount_create_untracked(zfs_refcount_t *rc)
1999 +zfs_refcount_create_untracked(zfs_refcount_t *rc)
2000 {
2001 - refcount_create(rc);
2002 + zfs_refcount_create(rc);
2003 rc->rc_tracked = B_FALSE;
2004 }
2005
2006 void
2007 -refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
2008 +zfs_refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
2009 {
2010 reference_t *ref;
2011
2012 @@ -103,25 +103,25 @@ refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
2013 }
2014
2015 void
2016 -refcount_destroy(zfs_refcount_t *rc)
2017 +zfs_refcount_destroy(zfs_refcount_t *rc)
2018 {
2019 - refcount_destroy_many(rc, 0);
2020 + zfs_refcount_destroy_many(rc, 0);
2021 }
2022
2023 int
2024 -refcount_is_zero(zfs_refcount_t *rc)
2025 +zfs_refcount_is_zero(zfs_refcount_t *rc)
2026 {
2027 return (rc->rc_count == 0);
2028 }
2029
2030 int64_t
2031 -refcount_count(zfs_refcount_t *rc)
2032 +zfs_refcount_count(zfs_refcount_t *rc)
2033 {
2034 return (rc->rc_count);
2035 }
2036
2037 int64_t
2038 -refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
2039 +zfs_refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
2040 {
2041 reference_t *ref = NULL;
2042 int64_t count;
2043 @@ -145,11 +145,11 @@ refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
2044 int64_t
2045 zfs_refcount_add(zfs_refcount_t *rc, void *holder)
2046 {
2047 - return (refcount_add_many(rc, 1, holder));
2048 + return (zfs_refcount_add_many(rc, 1, holder));
2049 }
2050
2051 int64_t
2052 -refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
2053 +zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
2054 {
2055 reference_t *ref;
2056 int64_t count;
2057 @@ -197,13 +197,13 @@ refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
2058 }
2059
2060 int64_t
2061 -refcount_remove(zfs_refcount_t *rc, void *holder)
2062 +zfs_refcount_remove(zfs_refcount_t *rc, void *holder)
2063 {
2064 - return (refcount_remove_many(rc, 1, holder));
2065 + return (zfs_refcount_remove_many(rc, 1, holder));
2066 }
2067
2068 void
2069 -refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
2070 +zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
2071 {
2072 int64_t count, removed_count;
2073 list_t list, removed;
2074 @@ -234,7 +234,7 @@ refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
2075 }
2076
2077 void
2078 -refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
2079 +zfs_refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
2080 void *new_holder)
2081 {
2082 reference_t *ref;
2083 @@ -264,7 +264,7 @@ refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
2084 * might be held.
2085 */
2086 boolean_t
2087 -refcount_held(zfs_refcount_t *rc, void *holder)
2088 +zfs_refcount_held(zfs_refcount_t *rc, void *holder)
2089 {
2090 reference_t *ref;
2091
2092 @@ -292,7 +292,7 @@ refcount_held(zfs_refcount_t *rc, void *holder)
2093 * since the reference might not be held.
2094 */
2095 boolean_t
2096 -refcount_not_held(zfs_refcount_t *rc, void *holder)
2097 +zfs_refcount_not_held(zfs_refcount_t *rc, void *holder)
2098 {
2099 reference_t *ref;
2100
2101 diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
2102 index effff330..582b40a5 100644
2103 --- a/module/zfs/rrwlock.c
2104 +++ b/module/zfs/rrwlock.c
2105 @@ -85,7 +85,7 @@ rrn_find(rrwlock_t *rrl)
2106 {
2107 rrw_node_t *rn;
2108
2109 - if (refcount_count(&rrl->rr_linked_rcount) == 0)
2110 + if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
2111 return (NULL);
2112
2113 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
2114 @@ -120,7 +120,7 @@ rrn_find_and_remove(rrwlock_t *rrl, void *tag)
2115 rrw_node_t *rn;
2116 rrw_node_t *prev = NULL;
2117
2118 - if (refcount_count(&rrl->rr_linked_rcount) == 0)
2119 + if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
2120 return (B_FALSE);
2121
2122 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
2123 @@ -143,8 +143,8 @@ rrw_init(rrwlock_t *rrl, boolean_t track_all)
2124 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
2125 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
2126 rrl->rr_writer = NULL;
2127 - refcount_create(&rrl->rr_anon_rcount);
2128 - refcount_create(&rrl->rr_linked_rcount);
2129 + zfs_refcount_create(&rrl->rr_anon_rcount);
2130 + zfs_refcount_create(&rrl->rr_linked_rcount);
2131 rrl->rr_writer_wanted = B_FALSE;
2132 rrl->rr_track_all = track_all;
2133 }
2134 @@ -155,8 +155,8 @@ rrw_destroy(rrwlock_t *rrl)
2135 mutex_destroy(&rrl->rr_lock);
2136 cv_destroy(&rrl->rr_cv);
2137 ASSERT(rrl->rr_writer == NULL);
2138 - refcount_destroy(&rrl->rr_anon_rcount);
2139 - refcount_destroy(&rrl->rr_linked_rcount);
2140 + zfs_refcount_destroy(&rrl->rr_anon_rcount);
2141 + zfs_refcount_destroy(&rrl->rr_linked_rcount);
2142 }
2143
2144 static void
2145 @@ -173,10 +173,10 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
2146 DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
2147 #endif
2148 ASSERT(rrl->rr_writer != curthread);
2149 - ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
2150 + ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
2151
2152 while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
2153 - refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
2154 + zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
2155 rrn_find(rrl) == NULL))
2156 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
2157
2158 @@ -216,8 +216,8 @@ rrw_enter_write(rrwlock_t *rrl)
2159 mutex_enter(&rrl->rr_lock);
2160 ASSERT(rrl->rr_writer != curthread);
2161
2162 - while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
2163 - refcount_count(&rrl->rr_linked_rcount) > 0 ||
2164 + while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
2165 + zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
2166 rrl->rr_writer != NULL) {
2167 rrl->rr_writer_wanted = B_TRUE;
2168 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
2169 @@ -250,24 +250,25 @@ rrw_exit(rrwlock_t *rrl, void *tag)
2170 }
2171 DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
2172 #endif
2173 - ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
2174 - !refcount_is_zero(&rrl->rr_linked_rcount) ||
2175 + ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
2176 + !zfs_refcount_is_zero(&rrl->rr_linked_rcount) ||
2177 rrl->rr_writer != NULL);
2178
2179 if (rrl->rr_writer == NULL) {
2180 int64_t count;
2181 if (rrn_find_and_remove(rrl, tag)) {
2182 - count = refcount_remove(&rrl->rr_linked_rcount, tag);
2183 + count = zfs_refcount_remove(
2184 + &rrl->rr_linked_rcount, tag);
2185 } else {
2186 ASSERT(!rrl->rr_track_all);
2187 - count = refcount_remove(&rrl->rr_anon_rcount, tag);
2188 + count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag);
2189 }
2190 if (count == 0)
2191 cv_broadcast(&rrl->rr_cv);
2192 } else {
2193 ASSERT(rrl->rr_writer == curthread);
2194 - ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
2195 - refcount_is_zero(&rrl->rr_linked_rcount));
2196 + ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) &&
2197 + zfs_refcount_is_zero(&rrl->rr_linked_rcount));
2198 rrl->rr_writer = NULL;
2199 cv_broadcast(&rrl->rr_cv);
2200 }
2201 @@ -288,7 +289,7 @@ rrw_held(rrwlock_t *rrl, krw_t rw)
2202 if (rw == RW_WRITER) {
2203 held = (rrl->rr_writer == curthread);
2204 } else {
2205 - held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
2206 + held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
2207 rrn_find(rrl) != NULL);
2208 }
2209 mutex_exit(&rrl->rr_lock);
2210 diff --git a/module/zfs/sa.c b/module/zfs/sa.c
2211 index df4f6fd8..08f6165d 100644
2212 --- a/module/zfs/sa.c
2213 +++ b/module/zfs/sa.c
2214 @@ -1132,7 +1132,7 @@ sa_tear_down(objset_t *os)
2215 avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) {
2216 sa_idx_tab_t *tab;
2217 while ((tab = list_head(&layout->lot_idx_tab))) {
2218 - ASSERT(refcount_count(&tab->sa_refcount));
2219 + ASSERT(zfs_refcount_count(&tab->sa_refcount));
2220 sa_idx_tab_rele(os, tab);
2221 }
2222 }
2223 @@ -1317,13 +1317,13 @@ sa_idx_tab_rele(objset_t *os, void *arg)
2224 return;
2225
2226 mutex_enter(&sa->sa_lock);
2227 - if (refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
2228 + if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
2229 list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
2230 if (idx_tab->sa_variable_lengths)
2231 kmem_free(idx_tab->sa_variable_lengths,
2232 sizeof (uint16_t) *
2233 idx_tab->sa_layout->lot_var_sizes);
2234 - refcount_destroy(&idx_tab->sa_refcount);
2235 + zfs_refcount_destroy(&idx_tab->sa_refcount);
2236 kmem_free(idx_tab->sa_idx_tab,
2237 sizeof (uint32_t) * sa->sa_num_attrs);
2238 kmem_free(idx_tab, sizeof (sa_idx_tab_t));
2239 @@ -1560,7 +1560,7 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
2240 idx_tab->sa_idx_tab =
2241 kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
2242 idx_tab->sa_layout = tb;
2243 - refcount_create(&idx_tab->sa_refcount);
2244 + zfs_refcount_create(&idx_tab->sa_refcount);
2245 if (tb->lot_var_sizes)
2246 idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
2247 tb->lot_var_sizes, KM_SLEEP);
2248 diff --git a/module/zfs/spa.c b/module/zfs/spa.c
2249 index 02dda927..5002b3cb 100644
2250 --- a/module/zfs/spa.c
2251 +++ b/module/zfs/spa.c
2252 @@ -2302,7 +2302,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2253 * and are making their way through the eviction process.
2254 */
2255 spa_evicting_os_wait(spa);
2256 - spa->spa_minref = refcount_count(&spa->spa_refcount);
2257 + spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
2258 if (error) {
2259 if (error != EEXIST) {
2260 spa->spa_loaded_ts.tv_sec = 0;
2261 @@ -4260,7 +4260,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
2262 * and are making their way through the eviction process.
2263 */
2264 spa_evicting_os_wait(spa);
2265 - spa->spa_minref = refcount_count(&spa->spa_refcount);
2266 + spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
2267 spa->spa_load_state = SPA_LOAD_NONE;
2268
2269 mutex_exit(&spa_namespace_lock);
2270 @@ -6852,12 +6852,12 @@ spa_sync(spa_t *spa, uint64_t txg)
2271 * allocations look at mg_max_alloc_queue_depth, and async
2272 * allocations all happen from spa_sync().
2273 */
2274 - ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
2275 + ASSERT0(zfs_refcount_count(&mg->mg_alloc_queue_depth));
2276 mg->mg_max_alloc_queue_depth = max_queue_depth;
2277 queue_depth_total += mg->mg_max_alloc_queue_depth;
2278 }
2279 mc = spa_normal_class(spa);
2280 - ASSERT0(refcount_count(&mc->mc_alloc_slots));
2281 + ASSERT0(zfs_refcount_count(&mc->mc_alloc_slots));
2282 mc->mc_alloc_max_slots = queue_depth_total;
2283 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
2284
2285 diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
2286 index f6c9b40b..6514813e 100644
2287 --- a/module/zfs/spa_misc.c
2288 +++ b/module/zfs/spa_misc.c
2289 @@ -366,7 +366,7 @@ spa_config_lock_init(spa_t *spa)
2290 spa_config_lock_t *scl = &spa->spa_config_lock[i];
2291 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
2292 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
2293 - refcount_create_untracked(&scl->scl_count);
2294 + zfs_refcount_create_untracked(&scl->scl_count);
2295 scl->scl_writer = NULL;
2296 scl->scl_write_wanted = 0;
2297 }
2298 @@ -381,7 +381,7 @@ spa_config_lock_destroy(spa_t *spa)
2299 spa_config_lock_t *scl = &spa->spa_config_lock[i];
2300 mutex_destroy(&scl->scl_lock);
2301 cv_destroy(&scl->scl_cv);
2302 - refcount_destroy(&scl->scl_count);
2303 + zfs_refcount_destroy(&scl->scl_count);
2304 ASSERT(scl->scl_writer == NULL);
2305 ASSERT(scl->scl_write_wanted == 0);
2306 }
2307 @@ -406,7 +406,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
2308 }
2309 } else {
2310 ASSERT(scl->scl_writer != curthread);
2311 - if (!refcount_is_zero(&scl->scl_count)) {
2312 + if (!zfs_refcount_is_zero(&scl->scl_count)) {
2313 mutex_exit(&scl->scl_lock);
2314 spa_config_exit(spa, locks & ((1 << i) - 1),
2315 tag);
2316 @@ -441,7 +441,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
2317 }
2318 } else {
2319 ASSERT(scl->scl_writer != curthread);
2320 - while (!refcount_is_zero(&scl->scl_count)) {
2321 + while (!zfs_refcount_is_zero(&scl->scl_count)) {
2322 scl->scl_write_wanted++;
2323 cv_wait(&scl->scl_cv, &scl->scl_lock);
2324 scl->scl_write_wanted--;
2325 @@ -464,8 +464,8 @@ spa_config_exit(spa_t *spa, int locks, void *tag)
2326 if (!(locks & (1 << i)))
2327 continue;
2328 mutex_enter(&scl->scl_lock);
2329 - ASSERT(!refcount_is_zero(&scl->scl_count));
2330 - if (refcount_remove(&scl->scl_count, tag) == 0) {
2331 + ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
2332 + if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
2333 ASSERT(scl->scl_writer == NULL ||
2334 scl->scl_writer == curthread);
2335 scl->scl_writer = NULL; /* OK in either case */
2336 @@ -484,7 +484,8 @@ spa_config_held(spa_t *spa, int locks, krw_t rw)
2337 spa_config_lock_t *scl = &spa->spa_config_lock[i];
2338 if (!(locks & (1 << i)))
2339 continue;
2340 - if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
2341 + if ((rw == RW_READER &&
2342 + !zfs_refcount_is_zero(&scl->scl_count)) ||
2343 (rw == RW_WRITER && scl->scl_writer == curthread))
2344 locks_held |= 1 << i;
2345 }
2346 @@ -602,7 +603,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
2347
2348 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
2349
2350 - refcount_create(&spa->spa_refcount);
2351 + zfs_refcount_create(&spa->spa_refcount);
2352 spa_config_lock_init(spa);
2353 spa_stats_init(spa);
2354
2355 @@ -680,7 +681,7 @@ spa_remove(spa_t *spa)
2356
2357 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2358 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
2359 - ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
2360 + ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
2361
2362 nvlist_free(spa->spa_config_splitting);
2363
2364 @@ -705,7 +706,7 @@ spa_remove(spa_t *spa)
2365 nvlist_free(spa->spa_feat_stats);
2366 spa_config_set(spa, NULL);
2367
2368 - refcount_destroy(&spa->spa_refcount);
2369 + zfs_refcount_destroy(&spa->spa_refcount);
2370
2371 spa_stats_destroy(spa);
2372 spa_config_lock_destroy(spa);
2373 @@ -766,7 +767,7 @@ spa_next(spa_t *prev)
2374 void
2375 spa_open_ref(spa_t *spa, void *tag)
2376 {
2377 - ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
2378 + ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
2379 MUTEX_HELD(&spa_namespace_lock));
2380 (void) zfs_refcount_add(&spa->spa_refcount, tag);
2381 }
2382 @@ -778,9 +779,9 @@ spa_open_ref(spa_t *spa, void *tag)
2383 void
2384 spa_close(spa_t *spa, void *tag)
2385 {
2386 - ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
2387 + ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
2388 MUTEX_HELD(&spa_namespace_lock));
2389 - (void) refcount_remove(&spa->spa_refcount, tag);
2390 + (void) zfs_refcount_remove(&spa->spa_refcount, tag);
2391 }
2392
2393 /*
2394 @@ -794,7 +795,7 @@ spa_close(spa_t *spa, void *tag)
2395 void
2396 spa_async_close(spa_t *spa, void *tag)
2397 {
2398 - (void) refcount_remove(&spa->spa_refcount, tag);
2399 + (void) zfs_refcount_remove(&spa->spa_refcount, tag);
2400 }
2401
2402 /*
2403 @@ -807,7 +808,7 @@ spa_refcount_zero(spa_t *spa)
2404 {
2405 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2406
2407 - return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
2408 + return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
2409 }
2410
2411 /*
2412 @@ -1878,7 +1879,7 @@ spa_init(int mode)
2413 #endif
2414
2415 fm_init();
2416 - refcount_init();
2417 + zfs_refcount_init();
2418 unique_init();
2419 range_tree_init();
2420 metaslab_alloc_trace_init();
2421 @@ -1914,7 +1915,7 @@ spa_fini(void)
2422 metaslab_alloc_trace_fini();
2423 range_tree_fini();
2424 unique_fini();
2425 - refcount_fini();
2426 + zfs_refcount_fini();
2427 fm_fini();
2428 qat_fini();
2429
2430 diff --git a/module/zfs/zfs_ctldir.c b/module/zfs/zfs_ctldir.c
2431 index de3c5a41..2964b65a 100644
2432 --- a/module/zfs/zfs_ctldir.c
2433 +++ b/module/zfs/zfs_ctldir.c
2434 @@ -144,7 +144,7 @@ zfsctl_snapshot_alloc(char *full_name, char *full_path, spa_t *spa,
2435 se->se_root_dentry = root_dentry;
2436 se->se_taskqid = TASKQID_INVALID;
2437
2438 - refcount_create(&se->se_refcount);
2439 + zfs_refcount_create(&se->se_refcount);
2440
2441 return (se);
2442 }
2443 @@ -156,7 +156,7 @@ zfsctl_snapshot_alloc(char *full_name, char *full_path, spa_t *spa,
2444 static void
2445 zfsctl_snapshot_free(zfs_snapentry_t *se)
2446 {
2447 - refcount_destroy(&se->se_refcount);
2448 + zfs_refcount_destroy(&se->se_refcount);
2449 strfree(se->se_name);
2450 strfree(se->se_path);
2451
2452 @@ -179,7 +179,7 @@ zfsctl_snapshot_hold(zfs_snapentry_t *se)
2453 static void
2454 zfsctl_snapshot_rele(zfs_snapentry_t *se)
2455 {
2456 - if (refcount_remove(&se->se_refcount, NULL) == 0)
2457 + if (zfs_refcount_remove(&se->se_refcount, NULL) == 0)
2458 zfsctl_snapshot_free(se);
2459 }
2460
2461 diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
2462 index 0ca10f82..7b893dc7 100644
2463 --- a/module/zfs/zfs_znode.c
2464 +++ b/module/zfs/zfs_znode.c
2465 @@ -149,7 +149,7 @@ zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
2466 znode_hold_t *zh = buf;
2467
2468 mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
2469 - refcount_create(&zh->zh_refcount);
2470 + zfs_refcount_create(&zh->zh_refcount);
2471 zh->zh_obj = ZFS_NO_OBJECT;
2472
2473 return (0);
2474 @@ -161,7 +161,7 @@ zfs_znode_hold_cache_destructor(void *buf, void *arg)
2475 znode_hold_t *zh = buf;
2476
2477 mutex_destroy(&zh->zh_lock);
2478 - refcount_destroy(&zh->zh_refcount);
2479 + zfs_refcount_destroy(&zh->zh_refcount);
2480 }
2481
2482 void
2483 @@ -279,7 +279,7 @@ zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
2484 kmem_cache_free(znode_hold_cache, zh_new);
2485
2486 ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
2487 - ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
2488 + ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
2489 mutex_enter(&zh->zh_lock);
2490
2491 return (zh);
2492 @@ -292,11 +292,11 @@ zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
2493 boolean_t remove = B_FALSE;
2494
2495 ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
2496 - ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
2497 + ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
2498 mutex_exit(&zh->zh_lock);
2499
2500 mutex_enter(&zfsvfs->z_hold_locks[i]);
2501 - if (refcount_remove(&zh->zh_refcount, NULL) == 0) {
2502 + if (zfs_refcount_remove(&zh->zh_refcount, NULL) == 0) {
2503 avl_remove(&zfsvfs->z_hold_trees[i], zh);
2504 remove = B_TRUE;
2505 }
2506 diff --git a/module/zfs/zio.c b/module/zfs/zio.c
2507 index dd0dfcdb..3f8fca38 100644
2508 --- a/module/zfs/zio.c
2509 +++ b/module/zfs/zio.c
2510 @@ -2338,7 +2338,7 @@ zio_write_gang_block(zio_t *pio)
2511 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
2512
2513 flags |= METASLAB_ASYNC_ALLOC;
2514 - VERIFY(refcount_held(&mc->mc_alloc_slots, pio));
2515 + VERIFY(zfs_refcount_held(&mc->mc_alloc_slots, pio));
2516
2517 /*
2518 * The logical zio has already placed a reservation for
2519 @@ -3766,7 +3766,7 @@ zio_done(zio_t *zio)
2520 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2521 ASSERT(zio->io_bp != NULL);
2522 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio);
2523 - VERIFY(refcount_not_held(
2524 + VERIFY(zfs_refcount_not_held(
2525 &(spa_normal_class(zio->io_spa)->mc_alloc_slots), zio));
2526 }
2527