]>
Commit | Line | Data |
---|---|---|
70e083d2 TG |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. | |
86e3c28a | 23 | * Copyright (c) 2012, 2017 by Delphix. All rights reserved. |
70e083d2 TG |
24 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
25 | */ | |
26 | ||
27 | #include <sys/zfs_context.h> | |
28 | #include <sys/dbuf.h> | |
29 | #include <sys/dnode.h> | |
30 | #include <sys/dmu.h> | |
31 | #include <sys/dmu_impl.h> | |
32 | #include <sys/dmu_tx.h> | |
33 | #include <sys/dmu_objset.h> | |
34 | #include <sys/dsl_dir.h> | |
35 | #include <sys/dsl_dataset.h> | |
36 | #include <sys/spa.h> | |
37 | #include <sys/zio.h> | |
38 | #include <sys/dmu_zfetch.h> | |
39 | #include <sys/range_tree.h> | |
40 | #include <sys/trace_dnode.h> | |
41 | ||
86e3c28a CIK |
42 | dnode_stats_t dnode_stats = { |
43 | { "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 }, | |
44 | { "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 }, | |
45 | { "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 }, | |
46 | { "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 }, | |
47 | { "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 }, | |
48 | { "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 }, | |
49 | { "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 }, | |
50 | { "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 }, | |
51 | { "dnode_hold_free_hits", KSTAT_DATA_UINT64 }, | |
52 | { "dnode_hold_free_misses", KSTAT_DATA_UINT64 }, | |
53 | { "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 }, | |
54 | { "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 }, | |
55 | { "dnode_hold_free_overflow", KSTAT_DATA_UINT64 }, | |
56 | { "dnode_hold_free_refcount", KSTAT_DATA_UINT64 }, | |
57 | { "dnode_hold_free_txg", KSTAT_DATA_UINT64 }, | |
58 | { "dnode_allocate", KSTAT_DATA_UINT64 }, | |
59 | { "dnode_reallocate", KSTAT_DATA_UINT64 }, | |
60 | { "dnode_buf_evict", KSTAT_DATA_UINT64 }, | |
61 | { "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 }, | |
62 | { "dnode_alloc_race", KSTAT_DATA_UINT64 }, | |
63 | { "dnode_alloc_next_block", KSTAT_DATA_UINT64 }, | |
64 | { "dnode_move_invalid", KSTAT_DATA_UINT64 }, | |
65 | { "dnode_move_recheck1", KSTAT_DATA_UINT64 }, | |
66 | { "dnode_move_recheck2", KSTAT_DATA_UINT64 }, | |
67 | { "dnode_move_special", KSTAT_DATA_UINT64 }, | |
68 | { "dnode_move_handle", KSTAT_DATA_UINT64 }, | |
69 | { "dnode_move_rwlock", KSTAT_DATA_UINT64 }, | |
70 | { "dnode_move_active", KSTAT_DATA_UINT64 }, | |
71 | }; | |
72 | ||
73 | static kstat_t *dnode_ksp; | |
70e083d2 | 74 | static kmem_cache_t *dnode_cache; |
70e083d2 TG |
75 | |
76 | ASSERTV(static dnode_phys_t dnode_phys_zero); | |
77 | ||
78 | int zfs_default_bs = SPA_MINBLOCKSHIFT; | |
79 | int zfs_default_ibs = DN_MAX_INDBLKSHIFT; | |
80 | ||
81 | #ifdef _KERNEL | |
82 | static kmem_cbrc_t dnode_move(void *, void *, size_t, void *); | |
83 | #endif /* _KERNEL */ | |
84 | ||
85 | static int | |
86 | dbuf_compare(const void *x1, const void *x2) | |
87 | { | |
88 | const dmu_buf_impl_t *d1 = x1; | |
89 | const dmu_buf_impl_t *d2 = x2; | |
90 | ||
86e3c28a CIK |
91 | int cmp = AVL_CMP(d1->db_level, d2->db_level); |
92 | if (likely(cmp)) | |
93 | return (cmp); | |
70e083d2 | 94 | |
86e3c28a CIK |
95 | cmp = AVL_CMP(d1->db_blkid, d2->db_blkid); |
96 | if (likely(cmp)) | |
97 | return (cmp); | |
70e083d2 TG |
98 | |
99 | if (d1->db_state == DB_SEARCH) { | |
100 | ASSERT3S(d2->db_state, !=, DB_SEARCH); | |
101 | return (-1); | |
102 | } else if (d2->db_state == DB_SEARCH) { | |
103 | ASSERT3S(d1->db_state, !=, DB_SEARCH); | |
104 | return (1); | |
105 | } | |
106 | ||
86e3c28a | 107 | return (AVL_PCMP(d1, d2)); |
70e083d2 TG |
108 | } |
109 | ||
110 | /* ARGSUSED */ | |
111 | static int | |
112 | dnode_cons(void *arg, void *unused, int kmflag) | |
113 | { | |
114 | dnode_t *dn = arg; | |
115 | int i; | |
116 | ||
86e3c28a | 117 | rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL); |
70e083d2 TG |
118 | mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL); |
119 | mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
120 | cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL); | |
121 | ||
122 | /* | |
123 | * Every dbuf has a reference, and dropping a tracked reference is | |
124 | * O(number of references), so don't track dn_holds. | |
125 | */ | |
126 | refcount_create_untracked(&dn->dn_holds); | |
127 | refcount_create(&dn->dn_tx_holds); | |
128 | list_link_init(&dn->dn_link); | |
129 | ||
130 | bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr)); | |
131 | bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels)); | |
132 | bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift)); | |
133 | bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype)); | |
134 | bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk)); | |
135 | bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen)); | |
136 | bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz)); | |
137 | ||
138 | for (i = 0; i < TXG_SIZE; i++) { | |
139 | list_link_init(&dn->dn_dirty_link[i]); | |
140 | dn->dn_free_ranges[i] = NULL; | |
141 | list_create(&dn->dn_dirty_records[i], | |
142 | sizeof (dbuf_dirty_record_t), | |
143 | offsetof(dbuf_dirty_record_t, dr_dirty_node)); | |
144 | } | |
145 | ||
146 | dn->dn_allocated_txg = 0; | |
147 | dn->dn_free_txg = 0; | |
148 | dn->dn_assigned_txg = 0; | |
149 | dn->dn_dirtyctx = 0; | |
150 | dn->dn_dirtyctx_firstset = NULL; | |
151 | dn->dn_bonus = NULL; | |
152 | dn->dn_have_spill = B_FALSE; | |
153 | dn->dn_zio = NULL; | |
154 | dn->dn_oldused = 0; | |
155 | dn->dn_oldflags = 0; | |
156 | dn->dn_olduid = 0; | |
157 | dn->dn_oldgid = 0; | |
158 | dn->dn_newuid = 0; | |
159 | dn->dn_newgid = 0; | |
160 | dn->dn_id_flags = 0; | |
161 | ||
162 | dn->dn_dbufs_count = 0; | |
70e083d2 TG |
163 | avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t), |
164 | offsetof(dmu_buf_impl_t, db_link)); | |
165 | ||
166 | dn->dn_moved = 0; | |
167 | return (0); | |
168 | } | |
169 | ||
170 | /* ARGSUSED */ | |
171 | static void | |
172 | dnode_dest(void *arg, void *unused) | |
173 | { | |
174 | int i; | |
175 | dnode_t *dn = arg; | |
176 | ||
177 | rw_destroy(&dn->dn_struct_rwlock); | |
178 | mutex_destroy(&dn->dn_mtx); | |
179 | mutex_destroy(&dn->dn_dbufs_mtx); | |
180 | cv_destroy(&dn->dn_notxholds); | |
181 | refcount_destroy(&dn->dn_holds); | |
182 | refcount_destroy(&dn->dn_tx_holds); | |
183 | ASSERT(!list_link_active(&dn->dn_link)); | |
184 | ||
185 | for (i = 0; i < TXG_SIZE; i++) { | |
186 | ASSERT(!list_link_active(&dn->dn_dirty_link[i])); | |
187 | ASSERT3P(dn->dn_free_ranges[i], ==, NULL); | |
188 | list_destroy(&dn->dn_dirty_records[i]); | |
189 | ASSERT0(dn->dn_next_nblkptr[i]); | |
190 | ASSERT0(dn->dn_next_nlevels[i]); | |
191 | ASSERT0(dn->dn_next_indblkshift[i]); | |
192 | ASSERT0(dn->dn_next_bonustype[i]); | |
193 | ASSERT0(dn->dn_rm_spillblk[i]); | |
194 | ASSERT0(dn->dn_next_bonuslen[i]); | |
195 | ASSERT0(dn->dn_next_blksz[i]); | |
196 | } | |
197 | ||
198 | ASSERT0(dn->dn_allocated_txg); | |
199 | ASSERT0(dn->dn_free_txg); | |
200 | ASSERT0(dn->dn_assigned_txg); | |
201 | ASSERT0(dn->dn_dirtyctx); | |
202 | ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL); | |
203 | ASSERT3P(dn->dn_bonus, ==, NULL); | |
204 | ASSERT(!dn->dn_have_spill); | |
205 | ASSERT3P(dn->dn_zio, ==, NULL); | |
206 | ASSERT0(dn->dn_oldused); | |
207 | ASSERT0(dn->dn_oldflags); | |
208 | ASSERT0(dn->dn_olduid); | |
209 | ASSERT0(dn->dn_oldgid); | |
210 | ASSERT0(dn->dn_newuid); | |
211 | ASSERT0(dn->dn_newgid); | |
212 | ASSERT0(dn->dn_id_flags); | |
213 | ||
214 | ASSERT0(dn->dn_dbufs_count); | |
70e083d2 TG |
215 | avl_destroy(&dn->dn_dbufs); |
216 | } | |
217 | ||
218 | void | |
219 | dnode_init(void) | |
220 | { | |
221 | ASSERT(dnode_cache == NULL); | |
222 | dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t), | |
223 | 0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0); | |
224 | kmem_cache_set_move(dnode_cache, dnode_move); | |
86e3c28a CIK |
225 | |
226 | dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc", | |
227 | KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t), | |
228 | KSTAT_FLAG_VIRTUAL); | |
229 | if (dnode_ksp != NULL) { | |
230 | dnode_ksp->ks_data = &dnode_stats; | |
231 | kstat_install(dnode_ksp); | |
232 | } | |
70e083d2 TG |
233 | } |
234 | ||
235 | void | |
236 | dnode_fini(void) | |
237 | { | |
86e3c28a CIK |
238 | if (dnode_ksp != NULL) { |
239 | kstat_delete(dnode_ksp); | |
240 | dnode_ksp = NULL; | |
241 | } | |
242 | ||
70e083d2 TG |
243 | kmem_cache_destroy(dnode_cache); |
244 | dnode_cache = NULL; | |
245 | } | |
246 | ||
247 | ||
248 | #ifdef ZFS_DEBUG | |
249 | void | |
250 | dnode_verify(dnode_t *dn) | |
251 | { | |
252 | int drop_struct_lock = FALSE; | |
253 | ||
254 | ASSERT(dn->dn_phys); | |
255 | ASSERT(dn->dn_objset); | |
256 | ASSERT(dn->dn_handle->dnh_dnode == dn); | |
257 | ||
258 | ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type)); | |
259 | ||
260 | if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY)) | |
261 | return; | |
262 | ||
263 | if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { | |
264 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
265 | drop_struct_lock = TRUE; | |
266 | } | |
267 | if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) { | |
268 | int i; | |
86e3c28a | 269 | int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); |
70e083d2 TG |
270 | ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT); |
271 | if (dn->dn_datablkshift) { | |
272 | ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT); | |
273 | ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT); | |
274 | ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz); | |
275 | } | |
276 | ASSERT3U(dn->dn_nlevels, <=, 30); | |
277 | ASSERT(DMU_OT_IS_VALID(dn->dn_type)); | |
278 | ASSERT3U(dn->dn_nblkptr, >=, 1); | |
279 | ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); | |
86e3c28a | 280 | ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen); |
70e083d2 TG |
281 | ASSERT3U(dn->dn_datablksz, ==, |
282 | dn->dn_datablkszsec << SPA_MINBLOCKSHIFT); | |
283 | ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0); | |
284 | ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) + | |
86e3c28a | 285 | dn->dn_bonuslen, <=, max_bonuslen); |
70e083d2 TG |
286 | for (i = 0; i < TXG_SIZE; i++) { |
287 | ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels); | |
288 | } | |
289 | } | |
290 | if (dn->dn_phys->dn_type != DMU_OT_NONE) | |
291 | ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels); | |
292 | ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL); | |
293 | if (dn->dn_dbuf != NULL) { | |
294 | ASSERT3P(dn->dn_phys, ==, | |
295 | (dnode_phys_t *)dn->dn_dbuf->db.db_data + | |
296 | (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT))); | |
297 | } | |
298 | if (drop_struct_lock) | |
299 | rw_exit(&dn->dn_struct_rwlock); | |
300 | } | |
301 | #endif | |
302 | ||
303 | void | |
304 | dnode_byteswap(dnode_phys_t *dnp) | |
305 | { | |
306 | uint64_t *buf64 = (void*)&dnp->dn_blkptr; | |
307 | int i; | |
308 | ||
309 | if (dnp->dn_type == DMU_OT_NONE) { | |
310 | bzero(dnp, sizeof (dnode_phys_t)); | |
311 | return; | |
312 | } | |
313 | ||
314 | dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec); | |
315 | dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen); | |
86e3c28a | 316 | dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots); |
70e083d2 TG |
317 | dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid); |
318 | dnp->dn_used = BSWAP_64(dnp->dn_used); | |
319 | ||
320 | /* | |
321 | * dn_nblkptr is only one byte, so it's OK to read it in either | |
322 | * byte order. We can't read dn_bouslen. | |
323 | */ | |
324 | ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT); | |
325 | ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR); | |
326 | for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++) | |
327 | buf64[i] = BSWAP_64(buf64[i]); | |
328 | ||
329 | /* | |
330 | * OK to check dn_bonuslen for zero, because it won't matter if | |
331 | * we have the wrong byte order. This is necessary because the | |
332 | * dnode dnode is smaller than a regular dnode. | |
333 | */ | |
334 | if (dnp->dn_bonuslen != 0) { | |
335 | /* | |
336 | * Note that the bonus length calculated here may be | |
337 | * longer than the actual bonus buffer. This is because | |
338 | * we always put the bonus buffer after the last block | |
339 | * pointer (instead of packing it against the end of the | |
340 | * dnode buffer). | |
341 | */ | |
342 | int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t); | |
86e3c28a CIK |
343 | int slots = dnp->dn_extra_slots + 1; |
344 | size_t len = DN_SLOTS_TO_BONUSLEN(slots) - off; | |
70e083d2 TG |
345 | dmu_object_byteswap_t byteswap; |
346 | ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype)); | |
347 | byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype); | |
348 | dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len); | |
349 | } | |
350 | ||
351 | /* Swap SPILL block if we have one */ | |
352 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) | |
86e3c28a | 353 | byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t)); |
70e083d2 TG |
354 | } |
355 | ||
356 | void | |
357 | dnode_buf_byteswap(void *vbuf, size_t size) | |
358 | { | |
86e3c28a | 359 | int i = 0; |
70e083d2 TG |
360 | |
361 | ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT)); | |
362 | ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0); | |
363 | ||
86e3c28a CIK |
364 | while (i < size) { |
365 | dnode_phys_t *dnp = (void *)(((char *)vbuf) + i); | |
366 | dnode_byteswap(dnp); | |
367 | ||
368 | i += DNODE_MIN_SIZE; | |
369 | if (dnp->dn_type != DMU_OT_NONE) | |
370 | i += dnp->dn_extra_slots * DNODE_MIN_SIZE; | |
70e083d2 TG |
371 | } |
372 | } | |
373 | ||
374 | void | |
375 | dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx) | |
376 | { | |
377 | ASSERT3U(refcount_count(&dn->dn_holds), >=, 1); | |
378 | ||
379 | dnode_setdirty(dn, tx); | |
380 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
86e3c28a | 381 | ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - |
70e083d2 TG |
382 | (dn->dn_nblkptr-1) * sizeof (blkptr_t)); |
383 | dn->dn_bonuslen = newsize; | |
384 | if (newsize == 0) | |
385 | dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN; | |
386 | else | |
387 | dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; | |
388 | rw_exit(&dn->dn_struct_rwlock); | |
389 | } | |
390 | ||
391 | void | |
392 | dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx) | |
393 | { | |
394 | ASSERT3U(refcount_count(&dn->dn_holds), >=, 1); | |
395 | dnode_setdirty(dn, tx); | |
396 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
397 | dn->dn_bonustype = newtype; | |
398 | dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; | |
399 | rw_exit(&dn->dn_struct_rwlock); | |
400 | } | |
401 | ||
402 | void | |
403 | dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx) | |
404 | { | |
405 | ASSERT3U(refcount_count(&dn->dn_holds), >=, 1); | |
406 | ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); | |
407 | dnode_setdirty(dn, tx); | |
408 | dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK; | |
409 | dn->dn_have_spill = B_FALSE; | |
410 | } | |
411 | ||
412 | static void | |
413 | dnode_setdblksz(dnode_t *dn, int size) | |
414 | { | |
415 | ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE)); | |
416 | ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); | |
417 | ASSERT3U(size, >=, SPA_MINBLOCKSIZE); | |
418 | ASSERT3U(size >> SPA_MINBLOCKSHIFT, <, | |
419 | 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8)); | |
420 | dn->dn_datablksz = size; | |
421 | dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT; | |
422 | dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0; | |
423 | } | |
424 | ||
425 | static dnode_t * | |
426 | dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db, | |
427 | uint64_t object, dnode_handle_t *dnh) | |
428 | { | |
429 | dnode_t *dn; | |
430 | ||
431 | dn = kmem_cache_alloc(dnode_cache, KM_SLEEP); | |
432 | ASSERT(!POINTER_IS_VALID(dn->dn_objset)); | |
433 | dn->dn_moved = 0; | |
434 | ||
435 | /* | |
436 | * Defer setting dn_objset until the dnode is ready to be a candidate | |
437 | * for the dnode_move() callback. | |
438 | */ | |
439 | dn->dn_object = object; | |
440 | dn->dn_dbuf = db; | |
441 | dn->dn_handle = dnh; | |
442 | dn->dn_phys = dnp; | |
443 | ||
444 | if (dnp->dn_datablkszsec) { | |
445 | dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); | |
446 | } else { | |
447 | dn->dn_datablksz = 0; | |
448 | dn->dn_datablkszsec = 0; | |
449 | dn->dn_datablkshift = 0; | |
450 | } | |
451 | dn->dn_indblkshift = dnp->dn_indblkshift; | |
452 | dn->dn_nlevels = dnp->dn_nlevels; | |
453 | dn->dn_type = dnp->dn_type; | |
454 | dn->dn_nblkptr = dnp->dn_nblkptr; | |
455 | dn->dn_checksum = dnp->dn_checksum; | |
456 | dn->dn_compress = dnp->dn_compress; | |
457 | dn->dn_bonustype = dnp->dn_bonustype; | |
458 | dn->dn_bonuslen = dnp->dn_bonuslen; | |
86e3c28a | 459 | dn->dn_num_slots = dnp->dn_extra_slots + 1; |
70e083d2 TG |
460 | dn->dn_maxblkid = dnp->dn_maxblkid; |
461 | dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0); | |
462 | dn->dn_id_flags = 0; | |
463 | ||
464 | dmu_zfetch_init(&dn->dn_zfetch, dn); | |
465 | ||
466 | ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type)); | |
86e3c28a CIK |
467 | ASSERT(zrl_is_locked(&dnh->dnh_zrlock)); |
468 | ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode)); | |
70e083d2 TG |
469 | |
470 | mutex_enter(&os->os_lock); | |
70e083d2 TG |
471 | |
472 | /* | |
473 | * Exclude special dnodes from os_dnodes so an empty os_dnodes | |
474 | * signifies that the special dnodes have no references from | |
475 | * their children (the entries in os_dnodes). This allows | |
476 | * dnode_destroy() to easily determine if the last child has | |
477 | * been removed and then complete eviction of the objset. | |
478 | */ | |
479 | if (!DMU_OBJECT_IS_SPECIAL(object)) | |
480 | list_insert_head(&os->os_dnodes, dn); | |
481 | membar_producer(); | |
482 | ||
483 | /* | |
484 | * Everything else must be valid before assigning dn_objset | |
485 | * makes the dnode eligible for dnode_move(). | |
486 | */ | |
487 | dn->dn_objset = os; | |
488 | ||
489 | dnh->dnh_dnode = dn; | |
490 | mutex_exit(&os->os_lock); | |
491 | ||
86e3c28a CIK |
492 | arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE); |
493 | ||
70e083d2 TG |
494 | return (dn); |
495 | } | |
496 | ||
497 | /* | |
498 | * Caller must be holding the dnode handle, which is released upon return. | |
499 | */ | |
500 | static void | |
501 | dnode_destroy(dnode_t *dn) | |
502 | { | |
503 | objset_t *os = dn->dn_objset; | |
504 | boolean_t complete_os_eviction = B_FALSE; | |
505 | ||
506 | ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0); | |
507 | ||
508 | mutex_enter(&os->os_lock); | |
509 | POINTER_INVALIDATE(&dn->dn_objset); | |
510 | if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { | |
511 | list_remove(&os->os_dnodes, dn); | |
512 | complete_os_eviction = | |
513 | list_is_empty(&os->os_dnodes) && | |
514 | list_link_active(&os->os_evicting_node); | |
515 | } | |
516 | mutex_exit(&os->os_lock); | |
517 | ||
518 | /* the dnode can no longer move, so we can release the handle */ | |
519 | zrl_remove(&dn->dn_handle->dnh_zrlock); | |
520 | ||
521 | dn->dn_allocated_txg = 0; | |
522 | dn->dn_free_txg = 0; | |
523 | dn->dn_assigned_txg = 0; | |
524 | ||
525 | dn->dn_dirtyctx = 0; | |
526 | if (dn->dn_dirtyctx_firstset != NULL) { | |
527 | kmem_free(dn->dn_dirtyctx_firstset, 1); | |
528 | dn->dn_dirtyctx_firstset = NULL; | |
529 | } | |
530 | if (dn->dn_bonus != NULL) { | |
531 | mutex_enter(&dn->dn_bonus->db_mtx); | |
86e3c28a | 532 | dbuf_destroy(dn->dn_bonus); |
70e083d2 TG |
533 | dn->dn_bonus = NULL; |
534 | } | |
535 | dn->dn_zio = NULL; | |
536 | ||
537 | dn->dn_have_spill = B_FALSE; | |
538 | dn->dn_oldused = 0; | |
539 | dn->dn_oldflags = 0; | |
540 | dn->dn_olduid = 0; | |
541 | dn->dn_oldgid = 0; | |
542 | dn->dn_newuid = 0; | |
543 | dn->dn_newgid = 0; | |
544 | dn->dn_id_flags = 0; | |
70e083d2 | 545 | |
86e3c28a | 546 | dmu_zfetch_fini(&dn->dn_zfetch); |
70e083d2 | 547 | kmem_cache_free(dnode_cache, dn); |
86e3c28a | 548 | arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE); |
70e083d2 TG |
549 | |
550 | if (complete_os_eviction) | |
551 | dmu_objset_evict_done(os); | |
552 | } | |
553 | ||
554 | void | |
555 | dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs, | |
86e3c28a | 556 | dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx) |
70e083d2 TG |
557 | { |
558 | int i; | |
559 | ||
86e3c28a CIK |
560 | ASSERT3U(dn_slots, >, 0); |
561 | ASSERT3U(dn_slots << DNODE_SHIFT, <=, | |
562 | spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))); | |
70e083d2 TG |
563 | ASSERT3U(blocksize, <=, |
564 | spa_maxblocksize(dmu_objset_spa(dn->dn_objset))); | |
565 | if (blocksize == 0) | |
566 | blocksize = 1 << zfs_default_bs; | |
567 | else | |
568 | blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE); | |
569 | ||
570 | if (ibs == 0) | |
571 | ibs = zfs_default_ibs; | |
572 | ||
573 | ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT); | |
574 | ||
86e3c28a CIK |
575 | dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n", |
576 | dn->dn_objset, dn->dn_object, tx->tx_txg, blocksize, ibs, dn_slots); | |
577 | DNODE_STAT_BUMP(dnode_allocate); | |
70e083d2 TG |
578 | |
579 | ASSERT(dn->dn_type == DMU_OT_NONE); | |
580 | ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0); | |
581 | ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE); | |
582 | ASSERT(ot != DMU_OT_NONE); | |
583 | ASSERT(DMU_OT_IS_VALID(ot)); | |
584 | ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || | |
585 | (bonustype == DMU_OT_SA && bonuslen == 0) || | |
586 | (bonustype != DMU_OT_NONE && bonuslen != 0)); | |
587 | ASSERT(DMU_OT_IS_VALID(bonustype)); | |
86e3c28a | 588 | ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots)); |
70e083d2 TG |
589 | ASSERT(dn->dn_type == DMU_OT_NONE); |
590 | ASSERT0(dn->dn_maxblkid); | |
591 | ASSERT0(dn->dn_allocated_txg); | |
592 | ASSERT0(dn->dn_assigned_txg); | |
593 | ASSERT(refcount_is_zero(&dn->dn_tx_holds)); | |
594 | ASSERT3U(refcount_count(&dn->dn_holds), <=, 1); | |
595 | ASSERT(avl_is_empty(&dn->dn_dbufs)); | |
596 | ||
597 | for (i = 0; i < TXG_SIZE; i++) { | |
598 | ASSERT0(dn->dn_next_nblkptr[i]); | |
599 | ASSERT0(dn->dn_next_nlevels[i]); | |
600 | ASSERT0(dn->dn_next_indblkshift[i]); | |
601 | ASSERT0(dn->dn_next_bonuslen[i]); | |
602 | ASSERT0(dn->dn_next_bonustype[i]); | |
603 | ASSERT0(dn->dn_rm_spillblk[i]); | |
604 | ASSERT0(dn->dn_next_blksz[i]); | |
605 | ASSERT(!list_link_active(&dn->dn_dirty_link[i])); | |
606 | ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL); | |
607 | ASSERT3P(dn->dn_free_ranges[i], ==, NULL); | |
608 | } | |
609 | ||
610 | dn->dn_type = ot; | |
611 | dnode_setdblksz(dn, blocksize); | |
612 | dn->dn_indblkshift = ibs; | |
613 | dn->dn_nlevels = 1; | |
86e3c28a | 614 | dn->dn_num_slots = dn_slots; |
70e083d2 TG |
615 | if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */ |
616 | dn->dn_nblkptr = 1; | |
86e3c28a CIK |
617 | else { |
618 | dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR, | |
619 | 1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >> | |
620 | SPA_BLKPTRSHIFT)); | |
621 | } | |
622 | ||
70e083d2 TG |
623 | dn->dn_bonustype = bonustype; |
624 | dn->dn_bonuslen = bonuslen; | |
625 | dn->dn_checksum = ZIO_CHECKSUM_INHERIT; | |
626 | dn->dn_compress = ZIO_COMPRESS_INHERIT; | |
627 | dn->dn_dirtyctx = 0; | |
628 | ||
629 | dn->dn_free_txg = 0; | |
630 | if (dn->dn_dirtyctx_firstset) { | |
631 | kmem_free(dn->dn_dirtyctx_firstset, 1); | |
632 | dn->dn_dirtyctx_firstset = NULL; | |
633 | } | |
634 | ||
635 | dn->dn_allocated_txg = tx->tx_txg; | |
636 | dn->dn_id_flags = 0; | |
637 | ||
638 | dnode_setdirty(dn, tx); | |
639 | dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs; | |
640 | dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; | |
641 | dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; | |
642 | dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz; | |
643 | } | |
644 | ||
645 | void | |
646 | dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, | |
86e3c28a | 647 | dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx) |
70e083d2 TG |
648 | { |
649 | int nblkptr; | |
650 | ||
651 | ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE); | |
652 | ASSERT3U(blocksize, <=, | |
653 | spa_maxblocksize(dmu_objset_spa(dn->dn_objset))); | |
654 | ASSERT0(blocksize % SPA_MINBLOCKSIZE); | |
655 | ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx)); | |
656 | ASSERT(tx->tx_txg != 0); | |
657 | ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || | |
658 | (bonustype != DMU_OT_NONE && bonuslen != 0) || | |
659 | (bonustype == DMU_OT_SA && bonuslen == 0)); | |
660 | ASSERT(DMU_OT_IS_VALID(bonustype)); | |
86e3c28a CIK |
661 | ASSERT3U(bonuslen, <=, |
662 | DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)))); | |
663 | ||
664 | dn_slots = dn_slots > 0 ? dn_slots : DNODE_MIN_SLOTS; | |
665 | DNODE_STAT_BUMP(dnode_reallocate); | |
70e083d2 TG |
666 | |
667 | /* clean up any unreferenced dbufs */ | |
668 | dnode_evict_dbufs(dn); | |
669 | ||
670 | dn->dn_id_flags = 0; | |
671 | ||
672 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
673 | dnode_setdirty(dn, tx); | |
674 | if (dn->dn_datablksz != blocksize) { | |
675 | /* change blocksize */ | |
676 | ASSERT(dn->dn_maxblkid == 0 && | |
677 | (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) || | |
678 | dnode_block_freed(dn, 0))); | |
679 | dnode_setdblksz(dn, blocksize); | |
680 | dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize; | |
681 | } | |
682 | if (dn->dn_bonuslen != bonuslen) | |
683 | dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen; | |
684 | ||
685 | if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */ | |
686 | nblkptr = 1; | |
687 | else | |
86e3c28a CIK |
688 | nblkptr = MIN(DN_MAX_NBLKPTR, |
689 | 1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >> | |
690 | SPA_BLKPTRSHIFT)); | |
70e083d2 TG |
691 | if (dn->dn_bonustype != bonustype) |
692 | dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype; | |
693 | if (dn->dn_nblkptr != nblkptr) | |
694 | dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr; | |
695 | if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
696 | dbuf_rm_spill(dn, tx); | |
697 | dnode_rm_spill(dn, tx); | |
698 | } | |
699 | rw_exit(&dn->dn_struct_rwlock); | |
700 | ||
701 | /* change type */ | |
702 | dn->dn_type = ot; | |
703 | ||
704 | /* change bonus size and type */ | |
705 | mutex_enter(&dn->dn_mtx); | |
706 | dn->dn_bonustype = bonustype; | |
707 | dn->dn_bonuslen = bonuslen; | |
86e3c28a | 708 | dn->dn_num_slots = dn_slots; |
70e083d2 TG |
709 | dn->dn_nblkptr = nblkptr; |
710 | dn->dn_checksum = ZIO_CHECKSUM_INHERIT; | |
711 | dn->dn_compress = ZIO_COMPRESS_INHERIT; | |
712 | ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); | |
713 | ||
714 | /* fix up the bonus db_size */ | |
715 | if (dn->dn_bonus) { | |
716 | dn->dn_bonus->db.db_size = | |
86e3c28a CIK |
717 | DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - |
718 | (dn->dn_nblkptr-1) * sizeof (blkptr_t); | |
70e083d2 TG |
719 | ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size); |
720 | } | |
721 | ||
722 | dn->dn_allocated_txg = tx->tx_txg; | |
723 | mutex_exit(&dn->dn_mtx); | |
724 | } | |
725 | ||
726 | #ifdef _KERNEL | |
70e083d2 TG |
727 | static void |
728 | dnode_move_impl(dnode_t *odn, dnode_t *ndn) | |
729 | { | |
730 | int i; | |
731 | ||
732 | ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock)); | |
733 | ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx)); | |
734 | ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx)); | |
735 | ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock)); | |
736 | ||
737 | /* Copy fields. */ | |
738 | ndn->dn_objset = odn->dn_objset; | |
739 | ndn->dn_object = odn->dn_object; | |
740 | ndn->dn_dbuf = odn->dn_dbuf; | |
741 | ndn->dn_handle = odn->dn_handle; | |
742 | ndn->dn_phys = odn->dn_phys; | |
743 | ndn->dn_type = odn->dn_type; | |
744 | ndn->dn_bonuslen = odn->dn_bonuslen; | |
745 | ndn->dn_bonustype = odn->dn_bonustype; | |
746 | ndn->dn_nblkptr = odn->dn_nblkptr; | |
747 | ndn->dn_checksum = odn->dn_checksum; | |
748 | ndn->dn_compress = odn->dn_compress; | |
749 | ndn->dn_nlevels = odn->dn_nlevels; | |
750 | ndn->dn_indblkshift = odn->dn_indblkshift; | |
751 | ndn->dn_datablkshift = odn->dn_datablkshift; | |
752 | ndn->dn_datablkszsec = odn->dn_datablkszsec; | |
753 | ndn->dn_datablksz = odn->dn_datablksz; | |
754 | ndn->dn_maxblkid = odn->dn_maxblkid; | |
86e3c28a | 755 | ndn->dn_num_slots = odn->dn_num_slots; |
70e083d2 TG |
756 | bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0], |
757 | sizeof (odn->dn_next_nblkptr)); | |
758 | bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0], | |
759 | sizeof (odn->dn_next_nlevels)); | |
760 | bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0], | |
761 | sizeof (odn->dn_next_indblkshift)); | |
762 | bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0], | |
763 | sizeof (odn->dn_next_bonustype)); | |
764 | bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0], | |
765 | sizeof (odn->dn_rm_spillblk)); | |
766 | bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0], | |
767 | sizeof (odn->dn_next_bonuslen)); | |
768 | bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0], | |
769 | sizeof (odn->dn_next_blksz)); | |
770 | for (i = 0; i < TXG_SIZE; i++) { | |
771 | list_move_tail(&ndn->dn_dirty_records[i], | |
772 | &odn->dn_dirty_records[i]); | |
773 | } | |
774 | bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0], | |
775 | sizeof (odn->dn_free_ranges)); | |
776 | ndn->dn_allocated_txg = odn->dn_allocated_txg; | |
777 | ndn->dn_free_txg = odn->dn_free_txg; | |
778 | ndn->dn_assigned_txg = odn->dn_assigned_txg; | |
779 | ndn->dn_dirtyctx = odn->dn_dirtyctx; | |
780 | ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset; | |
781 | ASSERT(refcount_count(&odn->dn_tx_holds) == 0); | |
782 | refcount_transfer(&ndn->dn_holds, &odn->dn_holds); | |
783 | ASSERT(avl_is_empty(&ndn->dn_dbufs)); | |
784 | avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs); | |
785 | ndn->dn_dbufs_count = odn->dn_dbufs_count; | |
70e083d2 TG |
786 | ndn->dn_bonus = odn->dn_bonus; |
787 | ndn->dn_have_spill = odn->dn_have_spill; | |
788 | ndn->dn_zio = odn->dn_zio; | |
789 | ndn->dn_oldused = odn->dn_oldused; | |
790 | ndn->dn_oldflags = odn->dn_oldflags; | |
791 | ndn->dn_olduid = odn->dn_olduid; | |
792 | ndn->dn_oldgid = odn->dn_oldgid; | |
793 | ndn->dn_newuid = odn->dn_newuid; | |
794 | ndn->dn_newgid = odn->dn_newgid; | |
795 | ndn->dn_id_flags = odn->dn_id_flags; | |
796 | dmu_zfetch_init(&ndn->dn_zfetch, NULL); | |
797 | list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream); | |
798 | ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode; | |
70e083d2 TG |
799 | |
800 | /* | |
801 | * Update back pointers. Updating the handle fixes the back pointer of | |
802 | * every descendant dbuf as well as the bonus dbuf. | |
803 | */ | |
804 | ASSERT(ndn->dn_handle->dnh_dnode == odn); | |
805 | ndn->dn_handle->dnh_dnode = ndn; | |
806 | if (ndn->dn_zfetch.zf_dnode == odn) { | |
807 | ndn->dn_zfetch.zf_dnode = ndn; | |
808 | } | |
809 | ||
810 | /* | |
811 | * Invalidate the original dnode by clearing all of its back pointers. | |
812 | */ | |
813 | odn->dn_dbuf = NULL; | |
814 | odn->dn_handle = NULL; | |
815 | avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t), | |
816 | offsetof(dmu_buf_impl_t, db_link)); | |
817 | odn->dn_dbufs_count = 0; | |
70e083d2 TG |
818 | odn->dn_bonus = NULL; |
819 | odn->dn_zfetch.zf_dnode = NULL; | |
820 | ||
821 | /* | |
822 | * Set the low bit of the objset pointer to ensure that dnode_move() | |
823 | * recognizes the dnode as invalid in any subsequent callback. | |
824 | */ | |
825 | POINTER_INVALIDATE(&odn->dn_objset); | |
826 | ||
827 | /* | |
828 | * Satisfy the destructor. | |
829 | */ | |
830 | for (i = 0; i < TXG_SIZE; i++) { | |
831 | list_create(&odn->dn_dirty_records[i], | |
832 | sizeof (dbuf_dirty_record_t), | |
833 | offsetof(dbuf_dirty_record_t, dr_dirty_node)); | |
834 | odn->dn_free_ranges[i] = NULL; | |
835 | odn->dn_next_nlevels[i] = 0; | |
836 | odn->dn_next_indblkshift[i] = 0; | |
837 | odn->dn_next_bonustype[i] = 0; | |
838 | odn->dn_rm_spillblk[i] = 0; | |
839 | odn->dn_next_bonuslen[i] = 0; | |
840 | odn->dn_next_blksz[i] = 0; | |
841 | } | |
842 | odn->dn_allocated_txg = 0; | |
843 | odn->dn_free_txg = 0; | |
844 | odn->dn_assigned_txg = 0; | |
845 | odn->dn_dirtyctx = 0; | |
846 | odn->dn_dirtyctx_firstset = NULL; | |
847 | odn->dn_have_spill = B_FALSE; | |
848 | odn->dn_zio = NULL; | |
849 | odn->dn_oldused = 0; | |
850 | odn->dn_oldflags = 0; | |
851 | odn->dn_olduid = 0; | |
852 | odn->dn_oldgid = 0; | |
853 | odn->dn_newuid = 0; | |
854 | odn->dn_newgid = 0; | |
855 | odn->dn_id_flags = 0; | |
856 | ||
857 | /* | |
858 | * Mark the dnode. | |
859 | */ | |
860 | ndn->dn_moved = 1; | |
861 | odn->dn_moved = (uint8_t)-1; | |
862 | } | |
863 | ||
864 | /*ARGSUSED*/ | |
865 | static kmem_cbrc_t | |
866 | dnode_move(void *buf, void *newbuf, size_t size, void *arg) | |
867 | { | |
868 | dnode_t *odn = buf, *ndn = newbuf; | |
869 | objset_t *os; | |
870 | int64_t refcount; | |
871 | uint32_t dbufs; | |
872 | ||
873 | /* | |
874 | * The dnode is on the objset's list of known dnodes if the objset | |
875 | * pointer is valid. We set the low bit of the objset pointer when | |
876 | * freeing the dnode to invalidate it, and the memory patterns written | |
877 | * by kmem (baddcafe and deadbeef) set at least one of the two low bits. | |
878 | * A newly created dnode sets the objset pointer last of all to indicate | |
879 | * that the dnode is known and in a valid state to be moved by this | |
880 | * function. | |
881 | */ | |
882 | os = odn->dn_objset; | |
883 | if (!POINTER_IS_VALID(os)) { | |
86e3c28a | 884 | DNODE_STAT_BUMP(dnode_move_invalid); |
70e083d2 TG |
885 | return (KMEM_CBRC_DONT_KNOW); |
886 | } | |
887 | ||
888 | /* | |
889 | * Ensure that the objset does not go away during the move. | |
890 | */ | |
891 | rw_enter(&os_lock, RW_WRITER); | |
892 | if (os != odn->dn_objset) { | |
893 | rw_exit(&os_lock); | |
86e3c28a | 894 | DNODE_STAT_BUMP(dnode_move_recheck1); |
70e083d2 TG |
895 | return (KMEM_CBRC_DONT_KNOW); |
896 | } | |
897 | ||
898 | /* | |
899 | * If the dnode is still valid, then so is the objset. We know that no | |
900 | * valid objset can be freed while we hold os_lock, so we can safely | |
901 | * ensure that the objset remains in use. | |
902 | */ | |
903 | mutex_enter(&os->os_lock); | |
904 | ||
905 | /* | |
906 | * Recheck the objset pointer in case the dnode was removed just before | |
907 | * acquiring the lock. | |
908 | */ | |
909 | if (os != odn->dn_objset) { | |
910 | mutex_exit(&os->os_lock); | |
911 | rw_exit(&os_lock); | |
86e3c28a | 912 | DNODE_STAT_BUMP(dnode_move_recheck2); |
70e083d2 TG |
913 | return (KMEM_CBRC_DONT_KNOW); |
914 | } | |
915 | ||
916 | /* | |
917 | * At this point we know that as long as we hold os->os_lock, the dnode | |
918 | * cannot be freed and fields within the dnode can be safely accessed. | |
919 | * The objset listing this dnode cannot go away as long as this dnode is | |
920 | * on its list. | |
921 | */ | |
922 | rw_exit(&os_lock); | |
923 | if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) { | |
924 | mutex_exit(&os->os_lock); | |
86e3c28a | 925 | DNODE_STAT_BUMP(dnode_move_special); |
70e083d2 TG |
926 | return (KMEM_CBRC_NO); |
927 | } | |
928 | ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */ | |
929 | ||
930 | /* | |
931 | * Lock the dnode handle to prevent the dnode from obtaining any new | |
932 | * holds. This also prevents the descendant dbufs and the bonus dbuf | |
933 | * from accessing the dnode, so that we can discount their holds. The | |
934 | * handle is safe to access because we know that while the dnode cannot | |
935 | * go away, neither can its handle. Once we hold dnh_zrlock, we can | |
936 | * safely move any dnode referenced only by dbufs. | |
937 | */ | |
938 | if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) { | |
939 | mutex_exit(&os->os_lock); | |
86e3c28a | 940 | DNODE_STAT_BUMP(dnode_move_handle); |
70e083d2 TG |
941 | return (KMEM_CBRC_LATER); |
942 | } | |
943 | ||
944 | /* | |
945 | * Ensure a consistent view of the dnode's holds and the dnode's dbufs. | |
946 | * We need to guarantee that there is a hold for every dbuf in order to | |
947 | * determine whether the dnode is actively referenced. Falsely matching | |
948 | * a dbuf to an active hold would lead to an unsafe move. It's possible | |
949 | * that a thread already having an active dnode hold is about to add a | |
950 | * dbuf, and we can't compare hold and dbuf counts while the add is in | |
951 | * progress. | |
952 | */ | |
953 | if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) { | |
954 | zrl_exit(&odn->dn_handle->dnh_zrlock); | |
955 | mutex_exit(&os->os_lock); | |
86e3c28a | 956 | DNODE_STAT_BUMP(dnode_move_rwlock); |
70e083d2 TG |
957 | return (KMEM_CBRC_LATER); |
958 | } | |
959 | ||
960 | /* | |
961 | * A dbuf may be removed (evicted) without an active dnode hold. In that | |
962 | * case, the dbuf count is decremented under the handle lock before the | |
963 | * dbuf's hold is released. This order ensures that if we count the hold | |
964 | * after the dbuf is removed but before its hold is released, we will | |
965 | * treat the unmatched hold as active and exit safely. If we count the | |
966 | * hold before the dbuf is removed, the hold is discounted, and the | |
967 | * removal is blocked until the move completes. | |
968 | */ | |
969 | refcount = refcount_count(&odn->dn_holds); | |
970 | ASSERT(refcount >= 0); | |
971 | dbufs = odn->dn_dbufs_count; | |
972 | ||
973 | /* We can't have more dbufs than dnode holds. */ | |
974 | ASSERT3U(dbufs, <=, refcount); | |
975 | DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount, | |
976 | uint32_t, dbufs); | |
977 | ||
978 | if (refcount > dbufs) { | |
979 | rw_exit(&odn->dn_struct_rwlock); | |
980 | zrl_exit(&odn->dn_handle->dnh_zrlock); | |
981 | mutex_exit(&os->os_lock); | |
86e3c28a | 982 | DNODE_STAT_BUMP(dnode_move_active); |
70e083d2 TG |
983 | return (KMEM_CBRC_LATER); |
984 | } | |
985 | ||
986 | rw_exit(&odn->dn_struct_rwlock); | |
987 | ||
988 | /* | |
989 | * At this point we know that anyone with a hold on the dnode is not | |
990 | * actively referencing it. The dnode is known and in a valid state to | |
991 | * move. We're holding the locks needed to execute the critical section. | |
992 | */ | |
993 | dnode_move_impl(odn, ndn); | |
994 | ||
995 | list_link_replace(&odn->dn_link, &ndn->dn_link); | |
996 | /* If the dnode was safe to move, the refcount cannot have changed. */ | |
997 | ASSERT(refcount == refcount_count(&ndn->dn_holds)); | |
998 | ASSERT(dbufs == ndn->dn_dbufs_count); | |
999 | zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */ | |
1000 | mutex_exit(&os->os_lock); | |
1001 | ||
1002 | return (KMEM_CBRC_YES); | |
1003 | } | |
1004 | #endif /* _KERNEL */ | |
1005 | ||
86e3c28a CIK |
1006 | static void |
1007 | dnode_slots_hold(dnode_children_t *children, int idx, int slots) | |
1008 | { | |
1009 | ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
1010 | ||
1011 | for (int i = idx; i < idx + slots; i++) { | |
1012 | dnode_handle_t *dnh = &children->dnc_children[i]; | |
1013 | zrl_add(&dnh->dnh_zrlock); | |
1014 | } | |
1015 | } | |
1016 | ||
1017 | static void | |
1018 | dnode_slots_rele(dnode_children_t *children, int idx, int slots) | |
1019 | { | |
1020 | ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
1021 | ||
1022 | for (int i = idx; i < idx + slots; i++) { | |
1023 | dnode_handle_t *dnh = &children->dnc_children[i]; | |
1024 | ||
1025 | if (zrl_is_locked(&dnh->dnh_zrlock)) | |
1026 | zrl_exit(&dnh->dnh_zrlock); | |
1027 | else | |
1028 | zrl_remove(&dnh->dnh_zrlock); | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | static int | |
1033 | dnode_slots_tryenter(dnode_children_t *children, int idx, int slots) | |
1034 | { | |
1035 | ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
1036 | ||
1037 | for (int i = idx; i < idx + slots; i++) { | |
1038 | dnode_handle_t *dnh = &children->dnc_children[i]; | |
1039 | ||
1040 | if (!zrl_tryenter(&dnh->dnh_zrlock)) { | |
1041 | for (int j = idx; j < i; j++) { | |
1042 | dnh = &children->dnc_children[j]; | |
1043 | zrl_exit(&dnh->dnh_zrlock); | |
1044 | } | |
1045 | ||
1046 | return (0); | |
1047 | } | |
1048 | } | |
1049 | ||
1050 | return (1); | |
1051 | } | |
1052 | ||
1053 | static void | |
1054 | dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr) | |
1055 | { | |
1056 | ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
1057 | ||
1058 | for (int i = idx; i < idx + slots; i++) { | |
1059 | dnode_handle_t *dnh = &children->dnc_children[i]; | |
1060 | dnh->dnh_dnode = ptr; | |
1061 | } | |
1062 | } | |
1063 | ||
1064 | static boolean_t | |
1065 | dnode_check_slots(dnode_children_t *children, int idx, int slots, void *ptr) | |
1066 | { | |
1067 | ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
1068 | ||
1069 | for (int i = idx; i < idx + slots; i++) { | |
1070 | dnode_handle_t *dnh = &children->dnc_children[i]; | |
1071 | if (dnh->dnh_dnode != ptr) | |
1072 | return (B_FALSE); | |
1073 | } | |
1074 | ||
1075 | return (B_TRUE); | |
1076 | } | |
1077 | ||
70e083d2 TG |
1078 | void |
1079 | dnode_special_close(dnode_handle_t *dnh) | |
1080 | { | |
1081 | dnode_t *dn = dnh->dnh_dnode; | |
1082 | ||
1083 | /* | |
1084 | * Wait for final references to the dnode to clear. This can | |
86e3c28a | 1085 | * only happen if the arc is asynchronously evicting state that |
70e083d2 TG |
1086 | * has a hold on this dnode while we are trying to evict this |
1087 | * dnode. | |
1088 | */ | |
1089 | while (refcount_count(&dn->dn_holds) > 0) | |
1090 | delay(1); | |
1091 | ASSERT(dn->dn_dbuf == NULL || | |
1092 | dmu_buf_get_user(&dn->dn_dbuf->db) == NULL); | |
1093 | zrl_add(&dnh->dnh_zrlock); | |
1094 | dnode_destroy(dn); /* implicit zrl_remove() */ | |
1095 | zrl_destroy(&dnh->dnh_zrlock); | |
1096 | dnh->dnh_dnode = NULL; | |
1097 | } | |
1098 | ||
1099 | void | |
1100 | dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object, | |
1101 | dnode_handle_t *dnh) | |
1102 | { | |
1103 | dnode_t *dn; | |
1104 | ||
70e083d2 | 1105 | zrl_init(&dnh->dnh_zrlock); |
86e3c28a CIK |
1106 | zrl_tryenter(&dnh->dnh_zrlock); |
1107 | ||
1108 | dn = dnode_create(os, dnp, NULL, object, dnh); | |
70e083d2 | 1109 | DNODE_VERIFY(dn); |
86e3c28a CIK |
1110 | |
1111 | zrl_exit(&dnh->dnh_zrlock); | |
70e083d2 TG |
1112 | } |
1113 | ||
1114 | static void | |
86e3c28a | 1115 | dnode_buf_evict_async(void *dbu) |
70e083d2 | 1116 | { |
86e3c28a | 1117 | dnode_children_t *dnc = dbu; |
70e083d2 | 1118 | |
86e3c28a CIK |
1119 | DNODE_STAT_BUMP(dnode_buf_evict); |
1120 | ||
1121 | for (int i = 0; i < dnc->dnc_count; i++) { | |
1122 | dnode_handle_t *dnh = &dnc->dnc_children[i]; | |
70e083d2 TG |
1123 | dnode_t *dn; |
1124 | ||
1125 | /* | |
1126 | * The dnode handle lock guards against the dnode moving to | |
1127 | * another valid address, so there is no need here to guard | |
1128 | * against changes to or from NULL. | |
1129 | */ | |
86e3c28a | 1130 | if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) { |
70e083d2 | 1131 | zrl_destroy(&dnh->dnh_zrlock); |
86e3c28a | 1132 | dnh->dnh_dnode = DN_SLOT_UNINIT; |
70e083d2 TG |
1133 | continue; |
1134 | } | |
1135 | ||
1136 | zrl_add(&dnh->dnh_zrlock); | |
1137 | dn = dnh->dnh_dnode; | |
1138 | /* | |
1139 | * If there are holds on this dnode, then there should | |
1140 | * be holds on the dnode's containing dbuf as well; thus | |
1141 | * it wouldn't be eligible for eviction and this function | |
1142 | * would not have been called. | |
1143 | */ | |
1144 | ASSERT(refcount_is_zero(&dn->dn_holds)); | |
1145 | ASSERT(refcount_is_zero(&dn->dn_tx_holds)); | |
1146 | ||
86e3c28a | 1147 | dnode_destroy(dn); /* implicit zrl_remove() for first slot */ |
70e083d2 | 1148 | zrl_destroy(&dnh->dnh_zrlock); |
86e3c28a | 1149 | dnh->dnh_dnode = DN_SLOT_UNINIT; |
70e083d2 | 1150 | } |
86e3c28a CIK |
1151 | kmem_free(dnc, sizeof (dnode_children_t) + |
1152 | dnc->dnc_count * sizeof (dnode_handle_t)); | |
70e083d2 TG |
1153 | } |
1154 | ||
1155 | /* | |
1156 | * errors: | |
86e3c28a CIK |
1157 | * EINVAL - Invalid object number or flags. |
1158 | * ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE) | |
1159 | * EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE) | |
1160 | * - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED) | |
1161 | * ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED) | |
1162 | * EIO - I/O error when reading the meta dnode dbuf. | |
1163 | * | |
70e083d2 TG |
1164 | * succeeds even for free dnodes. |
1165 | */ | |
1166 | int | |
86e3c28a | 1167 | dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, |
70e083d2 TG |
1168 | void *tag, dnode_t **dnp) |
1169 | { | |
1170 | int epb, idx, err; | |
1171 | int drop_struct_lock = FALSE; | |
1172 | int type; | |
1173 | uint64_t blk; | |
1174 | dnode_t *mdn, *dn; | |
1175 | dmu_buf_impl_t *db; | |
86e3c28a CIK |
1176 | dnode_children_t *dnc; |
1177 | dnode_phys_t *dn_block; | |
70e083d2 TG |
1178 | dnode_handle_t *dnh; |
1179 | ||
86e3c28a CIK |
1180 | ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0)); |
1181 | ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0)); | |
1182 | ||
70e083d2 TG |
1183 | /* |
1184 | * If you are holding the spa config lock as writer, you shouldn't | |
1185 | * be asking the DMU to do *anything* unless it's the root pool | |
1186 | * which may require us to read from the root filesystem while | |
1187 | * holding some (not all) of the locks as writer. | |
1188 | */ | |
1189 | ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 || | |
1190 | (spa_is_root(os->os_spa) && | |
1191 | spa_config_held(os->os_spa, SCL_STATE, RW_WRITER))); | |
1192 | ||
1193 | if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) { | |
1194 | dn = (object == DMU_USERUSED_OBJECT) ? | |
1195 | DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os); | |
1196 | if (dn == NULL) | |
1197 | return (SET_ERROR(ENOENT)); | |
1198 | type = dn->dn_type; | |
1199 | if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) | |
1200 | return (SET_ERROR(ENOENT)); | |
1201 | if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE) | |
1202 | return (SET_ERROR(EEXIST)); | |
1203 | DNODE_VERIFY(dn); | |
1204 | (void) refcount_add(&dn->dn_holds, tag); | |
1205 | *dnp = dn; | |
1206 | return (0); | |
1207 | } | |
1208 | ||
1209 | if (object == 0 || object >= DN_MAX_OBJECT) | |
1210 | return (SET_ERROR(EINVAL)); | |
1211 | ||
1212 | mdn = DMU_META_DNODE(os); | |
1213 | ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT); | |
1214 | ||
1215 | DNODE_VERIFY(mdn); | |
1216 | ||
1217 | if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) { | |
1218 | rw_enter(&mdn->dn_struct_rwlock, RW_READER); | |
1219 | drop_struct_lock = TRUE; | |
1220 | } | |
1221 | ||
86e3c28a | 1222 | blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t)); |
70e083d2 TG |
1223 | |
1224 | db = dbuf_hold(mdn, blk, FTAG); | |
1225 | if (drop_struct_lock) | |
1226 | rw_exit(&mdn->dn_struct_rwlock); | |
86e3c28a CIK |
1227 | if (db == NULL) { |
1228 | DNODE_STAT_BUMP(dnode_hold_dbuf_hold); | |
70e083d2 | 1229 | return (SET_ERROR(EIO)); |
86e3c28a | 1230 | } |
70e083d2 TG |
1231 | err = dbuf_read(db, NULL, DB_RF_CANFAIL); |
1232 | if (err) { | |
86e3c28a | 1233 | DNODE_STAT_BUMP(dnode_hold_dbuf_read); |
70e083d2 TG |
1234 | dbuf_rele(db, FTAG); |
1235 | return (err); | |
1236 | } | |
1237 | ||
1238 | ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT); | |
1239 | epb = db->db.db_size >> DNODE_SHIFT; | |
1240 | ||
86e3c28a CIK |
1241 | idx = object & (epb - 1); |
1242 | dn_block = (dnode_phys_t *)db->db.db_data; | |
70e083d2 TG |
1243 | |
1244 | ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE); | |
86e3c28a CIK |
1245 | dnc = dmu_buf_get_user(&db->db); |
1246 | dnh = NULL; | |
1247 | if (dnc == NULL) { | |
70e083d2 | 1248 | dnode_children_t *winner; |
86e3c28a CIK |
1249 | int skip = 0; |
1250 | ||
1251 | dnc = kmem_zalloc(sizeof (dnode_children_t) + | |
70e083d2 | 1252 | epb * sizeof (dnode_handle_t), KM_SLEEP); |
86e3c28a CIK |
1253 | dnc->dnc_count = epb; |
1254 | dnh = &dnc->dnc_children[0]; | |
1255 | ||
1256 | /* Initialize dnode slot status from dnode_phys_t */ | |
1257 | for (int i = 0; i < epb; i++) { | |
70e083d2 | 1258 | zrl_init(&dnh[i].dnh_zrlock); |
86e3c28a CIK |
1259 | |
1260 | if (skip) { | |
1261 | skip--; | |
1262 | continue; | |
1263 | } | |
1264 | ||
1265 | if (dn_block[i].dn_type != DMU_OT_NONE) { | |
1266 | int interior = dn_block[i].dn_extra_slots; | |
1267 | ||
1268 | dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED); | |
1269 | dnode_set_slots(dnc, i + 1, interior, | |
1270 | DN_SLOT_INTERIOR); | |
1271 | skip = interior; | |
1272 | } else { | |
1273 | dnh[i].dnh_dnode = DN_SLOT_FREE; | |
1274 | skip = 0; | |
1275 | } | |
70e083d2 | 1276 | } |
86e3c28a CIK |
1277 | |
1278 | dmu_buf_init_user(&dnc->dnc_dbu, NULL, | |
1279 | dnode_buf_evict_async, NULL); | |
1280 | winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu); | |
70e083d2 TG |
1281 | if (winner != NULL) { |
1282 | ||
86e3c28a | 1283 | for (int i = 0; i < epb; i++) |
70e083d2 | 1284 | zrl_destroy(&dnh[i].dnh_zrlock); |
70e083d2 | 1285 | |
86e3c28a | 1286 | kmem_free(dnc, sizeof (dnode_children_t) + |
70e083d2 | 1287 | epb * sizeof (dnode_handle_t)); |
86e3c28a | 1288 | dnc = winner; |
70e083d2 TG |
1289 | } |
1290 | } | |
70e083d2 | 1291 | |
86e3c28a CIK |
1292 | ASSERT(dnc->dnc_count == epb); |
1293 | dn = DN_SLOT_UNINIT; | |
1294 | ||
1295 | if (flag & DNODE_MUST_BE_ALLOCATED) { | |
1296 | slots = 1; | |
1297 | ||
1298 | while (dn == DN_SLOT_UNINIT) { | |
1299 | dnode_slots_hold(dnc, idx, slots); | |
1300 | dnh = &dnc->dnc_children[idx]; | |
1301 | ||
1302 | if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { | |
1303 | dn = dnh->dnh_dnode; | |
1304 | break; | |
1305 | } else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) { | |
1306 | DNODE_STAT_BUMP(dnode_hold_alloc_interior); | |
1307 | dnode_slots_rele(dnc, idx, slots); | |
1308 | dbuf_rele(db, FTAG); | |
1309 | return (SET_ERROR(EEXIST)); | |
1310 | } else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) { | |
1311 | DNODE_STAT_BUMP(dnode_hold_alloc_misses); | |
1312 | dnode_slots_rele(dnc, idx, slots); | |
1313 | dbuf_rele(db, FTAG); | |
1314 | return (SET_ERROR(ENOENT)); | |
1315 | } | |
1316 | ||
1317 | dnode_slots_rele(dnc, idx, slots); | |
1318 | if (!dnode_slots_tryenter(dnc, idx, slots)) { | |
1319 | DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry); | |
1320 | continue; | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * Someone else won the race and called dnode_create() | |
1325 | * after we checked DN_SLOT_IS_PTR() above but before | |
1326 | * we acquired the lock. | |
1327 | */ | |
1328 | if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { | |
1329 | DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses); | |
1330 | dn = dnh->dnh_dnode; | |
1331 | } else { | |
1332 | dn = dnode_create(os, dn_block + idx, db, | |
1333 | object, dnh); | |
1334 | } | |
1335 | } | |
1336 | ||
1337 | mutex_enter(&dn->dn_mtx); | |
1338 | if (dn->dn_type == DMU_OT_NONE) { | |
1339 | DNODE_STAT_BUMP(dnode_hold_alloc_type_none); | |
1340 | mutex_exit(&dn->dn_mtx); | |
1341 | dnode_slots_rele(dnc, idx, slots); | |
1342 | dbuf_rele(db, FTAG); | |
1343 | return (SET_ERROR(ENOENT)); | |
1344 | } | |
1345 | ||
1346 | DNODE_STAT_BUMP(dnode_hold_alloc_hits); | |
1347 | } else if (flag & DNODE_MUST_BE_FREE) { | |
1348 | ||
1349 | if (idx + slots - 1 >= DNODES_PER_BLOCK) { | |
1350 | DNODE_STAT_BUMP(dnode_hold_free_overflow); | |
1351 | dbuf_rele(db, FTAG); | |
1352 | return (SET_ERROR(ENOSPC)); | |
1353 | } | |
1354 | ||
1355 | while (dn == DN_SLOT_UNINIT) { | |
1356 | dnode_slots_hold(dnc, idx, slots); | |
70e083d2 | 1357 | |
86e3c28a CIK |
1358 | if (!dnode_check_slots(dnc, idx, slots, DN_SLOT_FREE)) { |
1359 | DNODE_STAT_BUMP(dnode_hold_free_misses); | |
1360 | dnode_slots_rele(dnc, idx, slots); | |
1361 | dbuf_rele(db, FTAG); | |
1362 | return (SET_ERROR(ENOSPC)); | |
1363 | } | |
1364 | ||
1365 | dnode_slots_rele(dnc, idx, slots); | |
1366 | if (!dnode_slots_tryenter(dnc, idx, slots)) { | |
1367 | DNODE_STAT_BUMP(dnode_hold_free_lock_retry); | |
1368 | continue; | |
1369 | } | |
1370 | ||
1371 | if (!dnode_check_slots(dnc, idx, slots, DN_SLOT_FREE)) { | |
1372 | DNODE_STAT_BUMP(dnode_hold_free_lock_misses); | |
1373 | dnode_slots_rele(dnc, idx, slots); | |
1374 | dbuf_rele(db, FTAG); | |
1375 | return (SET_ERROR(ENOSPC)); | |
1376 | } | |
1377 | ||
1378 | dnh = &dnc->dnc_children[idx]; | |
1379 | dn = dnode_create(os, dn_block + idx, db, object, dnh); | |
1380 | } | |
1381 | ||
1382 | mutex_enter(&dn->dn_mtx); | |
1383 | if (!refcount_is_zero(&dn->dn_holds)) { | |
1384 | DNODE_STAT_BUMP(dnode_hold_free_refcount); | |
1385 | mutex_exit(&dn->dn_mtx); | |
1386 | dnode_slots_rele(dnc, idx, slots); | |
1387 | dbuf_rele(db, FTAG); | |
1388 | return (SET_ERROR(EEXIST)); | |
1389 | } | |
1390 | ||
1391 | dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR); | |
1392 | DNODE_STAT_BUMP(dnode_hold_free_hits); | |
1393 | } else { | |
1394 | dbuf_rele(db, FTAG); | |
1395 | return (SET_ERROR(EINVAL)); | |
70e083d2 TG |
1396 | } |
1397 | ||
86e3c28a CIK |
1398 | if (dn->dn_free_txg) { |
1399 | DNODE_STAT_BUMP(dnode_hold_free_txg); | |
1400 | type = dn->dn_type; | |
70e083d2 | 1401 | mutex_exit(&dn->dn_mtx); |
86e3c28a | 1402 | dnode_slots_rele(dnc, idx, slots); |
70e083d2 TG |
1403 | dbuf_rele(db, FTAG); |
1404 | return (type == DMU_OT_NONE ? ENOENT : EEXIST); | |
1405 | } | |
86e3c28a | 1406 | |
70e083d2 TG |
1407 | if (refcount_add(&dn->dn_holds, tag) == 1) |
1408 | dbuf_add_ref(db, dnh); | |
86e3c28a | 1409 | |
70e083d2 TG |
1410 | mutex_exit(&dn->dn_mtx); |
1411 | ||
1412 | /* Now we can rely on the hold to prevent the dnode from moving. */ | |
86e3c28a | 1413 | dnode_slots_rele(dnc, idx, slots); |
70e083d2 TG |
1414 | |
1415 | DNODE_VERIFY(dn); | |
1416 | ASSERT3P(dn->dn_dbuf, ==, db); | |
1417 | ASSERT3U(dn->dn_object, ==, object); | |
1418 | dbuf_rele(db, FTAG); | |
1419 | ||
1420 | *dnp = dn; | |
1421 | return (0); | |
1422 | } | |
1423 | ||
1424 | /* | |
1425 | * Return held dnode if the object is allocated, NULL if not. | |
1426 | */ | |
1427 | int | |
1428 | dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp) | |
1429 | { | |
86e3c28a CIK |
1430 | return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag, |
1431 | dnp)); | |
70e083d2 TG |
1432 | } |
1433 | ||
1434 | /* | |
1435 | * Can only add a reference if there is already at least one | |
1436 | * reference on the dnode. Returns FALSE if unable to add a | |
1437 | * new reference. | |
1438 | */ | |
1439 | boolean_t | |
1440 | dnode_add_ref(dnode_t *dn, void *tag) | |
1441 | { | |
1442 | mutex_enter(&dn->dn_mtx); | |
1443 | if (refcount_is_zero(&dn->dn_holds)) { | |
1444 | mutex_exit(&dn->dn_mtx); | |
1445 | return (FALSE); | |
1446 | } | |
1447 | VERIFY(1 < refcount_add(&dn->dn_holds, tag)); | |
1448 | mutex_exit(&dn->dn_mtx); | |
1449 | return (TRUE); | |
1450 | } | |
1451 | ||
1452 | void | |
1453 | dnode_rele(dnode_t *dn, void *tag) | |
1454 | { | |
1455 | mutex_enter(&dn->dn_mtx); | |
1456 | dnode_rele_and_unlock(dn, tag); | |
1457 | } | |
1458 | ||
1459 | void | |
1460 | dnode_rele_and_unlock(dnode_t *dn, void *tag) | |
1461 | { | |
1462 | uint64_t refs; | |
1463 | /* Get while the hold prevents the dnode from moving. */ | |
1464 | dmu_buf_impl_t *db = dn->dn_dbuf; | |
1465 | dnode_handle_t *dnh = dn->dn_handle; | |
1466 | ||
1467 | refs = refcount_remove(&dn->dn_holds, tag); | |
1468 | mutex_exit(&dn->dn_mtx); | |
1469 | ||
1470 | /* | |
1471 | * It's unsafe to release the last hold on a dnode by dnode_rele() or | |
1472 | * indirectly by dbuf_rele() while relying on the dnode handle to | |
1473 | * prevent the dnode from moving, since releasing the last hold could | |
1474 | * result in the dnode's parent dbuf evicting its dnode handles. For | |
1475 | * that reason anyone calling dnode_rele() or dbuf_rele() without some | |
1476 | * other direct or indirect hold on the dnode must first drop the dnode | |
1477 | * handle. | |
1478 | */ | |
1479 | ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread); | |
1480 | ||
1481 | /* NOTE: the DNODE_DNODE does not have a dn_dbuf */ | |
1482 | if (refs == 0 && db != NULL) { | |
1483 | /* | |
1484 | * Another thread could add a hold to the dnode handle in | |
1485 | * dnode_hold_impl() while holding the parent dbuf. Since the | |
1486 | * hold on the parent dbuf prevents the handle from being | |
1487 | * destroyed, the hold on the handle is OK. We can't yet assert | |
1488 | * that the handle has zero references, but that will be | |
1489 | * asserted anyway when the handle gets destroyed. | |
1490 | */ | |
1491 | dbuf_rele(db, dnh); | |
1492 | } | |
1493 | } | |
1494 | ||
1495 | void | |
1496 | dnode_setdirty(dnode_t *dn, dmu_tx_t *tx) | |
1497 | { | |
1498 | objset_t *os = dn->dn_objset; | |
1499 | uint64_t txg = tx->tx_txg; | |
1500 | ||
1501 | if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { | |
1502 | dsl_dataset_dirty(os->os_dsl_dataset, tx); | |
1503 | return; | |
1504 | } | |
1505 | ||
1506 | DNODE_VERIFY(dn); | |
1507 | ||
1508 | #ifdef ZFS_DEBUG | |
1509 | mutex_enter(&dn->dn_mtx); | |
1510 | ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg); | |
1511 | ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg); | |
1512 | mutex_exit(&dn->dn_mtx); | |
1513 | #endif | |
1514 | ||
1515 | /* | |
1516 | * Determine old uid/gid when necessary | |
1517 | */ | |
1518 | dmu_objset_userquota_get_ids(dn, B_TRUE, tx); | |
1519 | ||
86e3c28a CIK |
1520 | multilist_t *dirtylist = os->os_dirty_dnodes[txg & TXG_MASK]; |
1521 | multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn); | |
70e083d2 TG |
1522 | |
1523 | /* | |
1524 | * If we are already marked dirty, we're done. | |
1525 | */ | |
1526 | if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) { | |
86e3c28a | 1527 | multilist_sublist_unlock(mls); |
70e083d2 TG |
1528 | return; |
1529 | } | |
1530 | ||
1531 | ASSERT(!refcount_is_zero(&dn->dn_holds) || | |
1532 | !avl_is_empty(&dn->dn_dbufs)); | |
1533 | ASSERT(dn->dn_datablksz != 0); | |
1534 | ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]); | |
1535 | ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]); | |
1536 | ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]); | |
1537 | ||
1538 | dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n", | |
1539 | dn->dn_object, txg); | |
1540 | ||
86e3c28a | 1541 | multilist_sublist_insert_head(mls, dn); |
70e083d2 | 1542 | |
86e3c28a | 1543 | multilist_sublist_unlock(mls); |
70e083d2 TG |
1544 | |
1545 | /* | |
1546 | * The dnode maintains a hold on its containing dbuf as | |
1547 | * long as there are holds on it. Each instantiated child | |
1548 | * dbuf maintains a hold on the dnode. When the last child | |
1549 | * drops its hold, the dnode will drop its hold on the | |
1550 | * containing dbuf. We add a "dirty hold" here so that the | |
1551 | * dnode will hang around after we finish processing its | |
1552 | * children. | |
1553 | */ | |
1554 | VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg)); | |
1555 | ||
1556 | (void) dbuf_dirty(dn->dn_dbuf, tx); | |
1557 | ||
1558 | dsl_dataset_dirty(os->os_dsl_dataset, tx); | |
1559 | } | |
1560 | ||
1561 | void | |
1562 | dnode_free(dnode_t *dn, dmu_tx_t *tx) | |
1563 | { | |
70e083d2 TG |
1564 | mutex_enter(&dn->dn_mtx); |
1565 | if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) { | |
1566 | mutex_exit(&dn->dn_mtx); | |
1567 | return; | |
1568 | } | |
1569 | dn->dn_free_txg = tx->tx_txg; | |
1570 | mutex_exit(&dn->dn_mtx); | |
1571 | ||
86e3c28a | 1572 | dnode_setdirty(dn, tx); |
70e083d2 TG |
1573 | } |
1574 | ||
1575 | /* | |
1576 | * Try to change the block size for the indicated dnode. This can only | |
1577 | * succeed if there are no blocks allocated or dirty beyond first block | |
1578 | */ | |
1579 | int | |
1580 | dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx) | |
1581 | { | |
1582 | dmu_buf_impl_t *db; | |
1583 | int err; | |
1584 | ||
1585 | ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset))); | |
1586 | if (size == 0) | |
1587 | size = SPA_MINBLOCKSIZE; | |
1588 | else | |
1589 | size = P2ROUNDUP(size, SPA_MINBLOCKSIZE); | |
1590 | ||
1591 | if (ibs == dn->dn_indblkshift) | |
1592 | ibs = 0; | |
1593 | ||
1594 | if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0) | |
1595 | return (0); | |
1596 | ||
1597 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
1598 | ||
1599 | /* Check for any allocated blocks beyond the first */ | |
1600 | if (dn->dn_maxblkid != 0) | |
1601 | goto fail; | |
1602 | ||
1603 | mutex_enter(&dn->dn_dbufs_mtx); | |
1604 | for (db = avl_first(&dn->dn_dbufs); db != NULL; | |
1605 | db = AVL_NEXT(&dn->dn_dbufs, db)) { | |
1606 | if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID && | |
1607 | db->db_blkid != DMU_SPILL_BLKID) { | |
1608 | mutex_exit(&dn->dn_dbufs_mtx); | |
1609 | goto fail; | |
1610 | } | |
1611 | } | |
1612 | mutex_exit(&dn->dn_dbufs_mtx); | |
1613 | ||
1614 | if (ibs && dn->dn_nlevels != 1) | |
1615 | goto fail; | |
1616 | ||
1617 | /* resize the old block */ | |
86e3c28a | 1618 | err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db); |
70e083d2 TG |
1619 | if (err == 0) |
1620 | dbuf_new_size(db, size, tx); | |
1621 | else if (err != ENOENT) | |
1622 | goto fail; | |
1623 | ||
1624 | dnode_setdblksz(dn, size); | |
1625 | dnode_setdirty(dn, tx); | |
1626 | dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size; | |
1627 | if (ibs) { | |
1628 | dn->dn_indblkshift = ibs; | |
1629 | dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs; | |
1630 | } | |
1631 | /* rele after we have fixed the blocksize in the dnode */ | |
1632 | if (db) | |
1633 | dbuf_rele(db, FTAG); | |
1634 | ||
1635 | rw_exit(&dn->dn_struct_rwlock); | |
1636 | return (0); | |
1637 | ||
1638 | fail: | |
1639 | rw_exit(&dn->dn_struct_rwlock); | |
1640 | return (SET_ERROR(ENOTSUP)); | |
1641 | } | |
1642 | ||
1643 | /* read-holding callers must not rely on the lock being continuously held */ | |
1644 | void | |
1645 | dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read) | |
1646 | { | |
1647 | uint64_t txgoff = tx->tx_txg & TXG_MASK; | |
1648 | int epbs, new_nlevels; | |
1649 | uint64_t sz; | |
1650 | ||
1651 | ASSERT(blkid != DMU_BONUS_BLKID); | |
1652 | ||
1653 | ASSERT(have_read ? | |
1654 | RW_READ_HELD(&dn->dn_struct_rwlock) : | |
1655 | RW_WRITE_HELD(&dn->dn_struct_rwlock)); | |
1656 | ||
1657 | /* | |
1658 | * if we have a read-lock, check to see if we need to do any work | |
1659 | * before upgrading to a write-lock. | |
1660 | */ | |
1661 | if (have_read) { | |
1662 | if (blkid <= dn->dn_maxblkid) | |
1663 | return; | |
1664 | ||
1665 | if (!rw_tryupgrade(&dn->dn_struct_rwlock)) { | |
1666 | rw_exit(&dn->dn_struct_rwlock); | |
1667 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | if (blkid <= dn->dn_maxblkid) | |
1672 | goto out; | |
1673 | ||
1674 | dn->dn_maxblkid = blkid; | |
1675 | ||
1676 | /* | |
1677 | * Compute the number of levels necessary to support the new maxblkid. | |
1678 | */ | |
1679 | new_nlevels = 1; | |
1680 | epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
1681 | for (sz = dn->dn_nblkptr; | |
1682 | sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs) | |
1683 | new_nlevels++; | |
1684 | ||
86e3c28a CIK |
1685 | ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS); |
1686 | ||
70e083d2 TG |
1687 | if (new_nlevels > dn->dn_nlevels) { |
1688 | int old_nlevels = dn->dn_nlevels; | |
1689 | dmu_buf_impl_t *db; | |
1690 | list_t *list; | |
1691 | dbuf_dirty_record_t *new, *dr, *dr_next; | |
1692 | ||
1693 | dn->dn_nlevels = new_nlevels; | |
1694 | ||
1695 | ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]); | |
1696 | dn->dn_next_nlevels[txgoff] = new_nlevels; | |
1697 | ||
1698 | /* dirty the left indirects */ | |
1699 | db = dbuf_hold_level(dn, old_nlevels, 0, FTAG); | |
1700 | ASSERT(db != NULL); | |
1701 | new = dbuf_dirty(db, tx); | |
1702 | dbuf_rele(db, FTAG); | |
1703 | ||
1704 | /* transfer the dirty records to the new indirect */ | |
1705 | mutex_enter(&dn->dn_mtx); | |
1706 | mutex_enter(&new->dt.di.dr_mtx); | |
1707 | list = &dn->dn_dirty_records[txgoff]; | |
1708 | for (dr = list_head(list); dr; dr = dr_next) { | |
1709 | dr_next = list_next(&dn->dn_dirty_records[txgoff], dr); | |
1710 | if (dr->dr_dbuf->db_level != new_nlevels-1 && | |
1711 | dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && | |
1712 | dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { | |
1713 | ASSERT(dr->dr_dbuf->db_level == old_nlevels-1); | |
1714 | list_remove(&dn->dn_dirty_records[txgoff], dr); | |
1715 | list_insert_tail(&new->dt.di.dr_children, dr); | |
1716 | dr->dr_parent = new; | |
1717 | } | |
1718 | } | |
1719 | mutex_exit(&new->dt.di.dr_mtx); | |
1720 | mutex_exit(&dn->dn_mtx); | |
1721 | } | |
1722 | ||
1723 | out: | |
1724 | if (have_read) | |
1725 | rw_downgrade(&dn->dn_struct_rwlock); | |
1726 | } | |
1727 | ||
1728 | static void | |
1729 | dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx) | |
1730 | { | |
1731 | dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG); | |
1732 | if (db != NULL) { | |
1733 | dmu_buf_will_dirty(&db->db, tx); | |
1734 | dbuf_rele(db, FTAG); | |
1735 | } | |
1736 | } | |
1737 | ||
1738 | void | |
1739 | dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx) | |
1740 | { | |
1741 | dmu_buf_impl_t *db; | |
1742 | uint64_t blkoff, blkid, nblks; | |
1743 | int blksz, blkshift, head, tail; | |
1744 | int trunc = FALSE; | |
1745 | int epbs; | |
1746 | ||
1747 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
1748 | blksz = dn->dn_datablksz; | |
1749 | blkshift = dn->dn_datablkshift; | |
1750 | epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
1751 | ||
1752 | if (len == DMU_OBJECT_END) { | |
1753 | len = UINT64_MAX - off; | |
1754 | trunc = TRUE; | |
1755 | } | |
1756 | ||
1757 | /* | |
1758 | * First, block align the region to free: | |
1759 | */ | |
1760 | if (ISP2(blksz)) { | |
1761 | head = P2NPHASE(off, blksz); | |
1762 | blkoff = P2PHASE(off, blksz); | |
1763 | if ((off >> blkshift) > dn->dn_maxblkid) | |
1764 | goto out; | |
1765 | } else { | |
1766 | ASSERT(dn->dn_maxblkid == 0); | |
1767 | if (off == 0 && len >= blksz) { | |
1768 | /* | |
1769 | * Freeing the whole block; fast-track this request. | |
1770 | * Note that we won't dirty any indirect blocks, | |
1771 | * which is fine because we will be freeing the entire | |
1772 | * file and thus all indirect blocks will be freed | |
1773 | * by free_children(). | |
1774 | */ | |
1775 | blkid = 0; | |
1776 | nblks = 1; | |
1777 | goto done; | |
1778 | } else if (off >= blksz) { | |
1779 | /* Freeing past end-of-data */ | |
1780 | goto out; | |
1781 | } else { | |
1782 | /* Freeing part of the block. */ | |
1783 | head = blksz - off; | |
1784 | ASSERT3U(head, >, 0); | |
1785 | } | |
1786 | blkoff = off; | |
1787 | } | |
1788 | /* zero out any partial block data at the start of the range */ | |
1789 | if (head) { | |
1790 | ASSERT3U(blkoff + head, ==, blksz); | |
1791 | if (len < head) | |
1792 | head = len; | |
86e3c28a CIK |
1793 | if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off), |
1794 | TRUE, FALSE, FTAG, &db) == 0) { | |
70e083d2 TG |
1795 | caddr_t data; |
1796 | ||
1797 | /* don't dirty if it isn't on disk and isn't dirty */ | |
1798 | if (db->db_last_dirty || | |
1799 | (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { | |
1800 | rw_exit(&dn->dn_struct_rwlock); | |
1801 | dmu_buf_will_dirty(&db->db, tx); | |
1802 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
1803 | data = db->db.db_data; | |
1804 | bzero(data + blkoff, head); | |
1805 | } | |
1806 | dbuf_rele(db, FTAG); | |
1807 | } | |
1808 | off += head; | |
1809 | len -= head; | |
1810 | } | |
1811 | ||
1812 | /* If the range was less than one block, we're done */ | |
1813 | if (len == 0) | |
1814 | goto out; | |
1815 | ||
1816 | /* If the remaining range is past end of file, we're done */ | |
1817 | if ((off >> blkshift) > dn->dn_maxblkid) | |
1818 | goto out; | |
1819 | ||
1820 | ASSERT(ISP2(blksz)); | |
1821 | if (trunc) | |
1822 | tail = 0; | |
1823 | else | |
1824 | tail = P2PHASE(len, blksz); | |
1825 | ||
1826 | ASSERT0(P2PHASE(off, blksz)); | |
1827 | /* zero out any partial block data at the end of the range */ | |
1828 | if (tail) { | |
1829 | if (len < tail) | |
1830 | tail = len; | |
86e3c28a CIK |
1831 | if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len), |
1832 | TRUE, FALSE, FTAG, &db) == 0) { | |
70e083d2 TG |
1833 | /* don't dirty if not on disk and not dirty */ |
1834 | if (db->db_last_dirty || | |
1835 | (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { | |
1836 | rw_exit(&dn->dn_struct_rwlock); | |
1837 | dmu_buf_will_dirty(&db->db, tx); | |
1838 | rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
1839 | bzero(db->db.db_data, tail); | |
1840 | } | |
1841 | dbuf_rele(db, FTAG); | |
1842 | } | |
1843 | len -= tail; | |
1844 | } | |
1845 | ||
1846 | /* If the range did not include a full block, we are done */ | |
1847 | if (len == 0) | |
1848 | goto out; | |
1849 | ||
1850 | ASSERT(IS_P2ALIGNED(off, blksz)); | |
1851 | ASSERT(trunc || IS_P2ALIGNED(len, blksz)); | |
1852 | blkid = off >> blkshift; | |
1853 | nblks = len >> blkshift; | |
1854 | if (trunc) | |
1855 | nblks += 1; | |
1856 | ||
1857 | /* | |
1858 | * Dirty all the indirect blocks in this range. Note that only | |
1859 | * the first and last indirect blocks can actually be written | |
1860 | * (if they were partially freed) -- they must be dirtied, even if | |
1861 | * they do not exist on disk yet. The interior blocks will | |
1862 | * be freed by free_children(), so they will not actually be written. | |
1863 | * Even though these interior blocks will not be written, we | |
1864 | * dirty them for two reasons: | |
1865 | * | |
1866 | * - It ensures that the indirect blocks remain in memory until | |
1867 | * syncing context. (They have already been prefetched by | |
1868 | * dmu_tx_hold_free(), so we don't have to worry about reading | |
1869 | * them serially here.) | |
1870 | * | |
1871 | * - The dirty space accounting will put pressure on the txg sync | |
1872 | * mechanism to begin syncing, and to delay transactions if there | |
1873 | * is a large amount of freeing. Even though these indirect | |
1874 | * blocks will not be written, we could need to write the same | |
1875 | * amount of space if we copy the freed BPs into deadlists. | |
1876 | */ | |
1877 | if (dn->dn_nlevels > 1) { | |
1878 | uint64_t first, last, i, ibyte; | |
1879 | int shift, err; | |
1880 | ||
1881 | first = blkid >> epbs; | |
1882 | dnode_dirty_l1(dn, first, tx); | |
1883 | if (trunc) | |
1884 | last = dn->dn_maxblkid >> epbs; | |
1885 | else | |
1886 | last = (blkid + nblks - 1) >> epbs; | |
1887 | if (last != first) | |
1888 | dnode_dirty_l1(dn, last, tx); | |
1889 | ||
1890 | shift = dn->dn_datablkshift + dn->dn_indblkshift - | |
1891 | SPA_BLKPTRSHIFT; | |
1892 | for (i = first + 1; i < last; i++) { | |
1893 | /* | |
1894 | * Set i to the blockid of the next non-hole | |
1895 | * level-1 indirect block at or after i. Note | |
1896 | * that dnode_next_offset() operates in terms of | |
1897 | * level-0-equivalent bytes. | |
1898 | */ | |
1899 | ibyte = i << shift; | |
1900 | err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK, | |
1901 | &ibyte, 2, 1, 0); | |
1902 | i = ibyte >> shift; | |
1903 | if (i >= last) | |
1904 | break; | |
1905 | ||
1906 | /* | |
1907 | * Normally we should not see an error, either | |
1908 | * from dnode_next_offset() or dbuf_hold_level() | |
1909 | * (except for ESRCH from dnode_next_offset). | |
1910 | * If there is an i/o error, then when we read | |
1911 | * this block in syncing context, it will use | |
1912 | * ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according | |
1913 | * to the "failmode" property. dnode_next_offset() | |
1914 | * doesn't have a flag to indicate MUSTSUCCEED. | |
1915 | */ | |
1916 | if (err != 0) | |
1917 | break; | |
1918 | ||
1919 | dnode_dirty_l1(dn, i, tx); | |
1920 | } | |
1921 | } | |
1922 | ||
1923 | done: | |
1924 | /* | |
1925 | * Add this range to the dnode range list. | |
1926 | * We will finish up this free operation in the syncing phase. | |
1927 | */ | |
1928 | mutex_enter(&dn->dn_mtx); | |
1929 | { | |
1930 | int txgoff = tx->tx_txg & TXG_MASK; | |
1931 | if (dn->dn_free_ranges[txgoff] == NULL) { | |
1932 | dn->dn_free_ranges[txgoff] = | |
1933 | range_tree_create(NULL, NULL, &dn->dn_mtx); | |
1934 | } | |
1935 | range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks); | |
1936 | range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks); | |
1937 | } | |
1938 | dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n", | |
1939 | blkid, nblks, tx->tx_txg); | |
1940 | mutex_exit(&dn->dn_mtx); | |
1941 | ||
1942 | dbuf_free_range(dn, blkid, blkid + nblks - 1, tx); | |
1943 | dnode_setdirty(dn, tx); | |
1944 | out: | |
1945 | ||
1946 | rw_exit(&dn->dn_struct_rwlock); | |
1947 | } | |
1948 | ||
1949 | static boolean_t | |
1950 | dnode_spill_freed(dnode_t *dn) | |
1951 | { | |
1952 | int i; | |
1953 | ||
1954 | mutex_enter(&dn->dn_mtx); | |
1955 | for (i = 0; i < TXG_SIZE; i++) { | |
1956 | if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK) | |
1957 | break; | |
1958 | } | |
1959 | mutex_exit(&dn->dn_mtx); | |
1960 | return (i < TXG_SIZE); | |
1961 | } | |
1962 | ||
1963 | /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */ | |
1964 | uint64_t | |
1965 | dnode_block_freed(dnode_t *dn, uint64_t blkid) | |
1966 | { | |
1967 | void *dp = spa_get_dsl(dn->dn_objset->os_spa); | |
1968 | int i; | |
1969 | ||
1970 | if (blkid == DMU_BONUS_BLKID) | |
1971 | return (FALSE); | |
1972 | ||
1973 | /* | |
1974 | * If we're in the process of opening the pool, dp will not be | |
1975 | * set yet, but there shouldn't be anything dirty. | |
1976 | */ | |
1977 | if (dp == NULL) | |
1978 | return (FALSE); | |
1979 | ||
1980 | if (dn->dn_free_txg) | |
1981 | return (TRUE); | |
1982 | ||
1983 | if (blkid == DMU_SPILL_BLKID) | |
1984 | return (dnode_spill_freed(dn)); | |
1985 | ||
1986 | mutex_enter(&dn->dn_mtx); | |
1987 | for (i = 0; i < TXG_SIZE; i++) { | |
1988 | if (dn->dn_free_ranges[i] != NULL && | |
1989 | range_tree_contains(dn->dn_free_ranges[i], blkid, 1)) | |
1990 | break; | |
1991 | } | |
1992 | mutex_exit(&dn->dn_mtx); | |
1993 | return (i < TXG_SIZE); | |
1994 | } | |
1995 | ||
1996 | /* call from syncing context when we actually write/free space for this dnode */ | |
1997 | void | |
1998 | dnode_diduse_space(dnode_t *dn, int64_t delta) | |
1999 | { | |
2000 | uint64_t space; | |
2001 | dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n", | |
2002 | dn, dn->dn_phys, | |
2003 | (u_longlong_t)dn->dn_phys->dn_used, | |
2004 | (longlong_t)delta); | |
2005 | ||
2006 | mutex_enter(&dn->dn_mtx); | |
2007 | space = DN_USED_BYTES(dn->dn_phys); | |
2008 | if (delta > 0) { | |
2009 | ASSERT3U(space + delta, >=, space); /* no overflow */ | |
2010 | } else { | |
2011 | ASSERT3U(space, >=, -delta); /* no underflow */ | |
2012 | } | |
2013 | space += delta; | |
2014 | if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) { | |
2015 | ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0); | |
2016 | ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT)); | |
2017 | dn->dn_phys->dn_used = space >> DEV_BSHIFT; | |
2018 | } else { | |
2019 | dn->dn_phys->dn_used = space; | |
2020 | dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES; | |
2021 | } | |
2022 | mutex_exit(&dn->dn_mtx); | |
2023 | } | |
2024 | ||
70e083d2 TG |
2025 | /* |
2026 | * Scans a block at the indicated "level" looking for a hole or data, | |
2027 | * depending on 'flags'. | |
2028 | * | |
2029 | * If level > 0, then we are scanning an indirect block looking at its | |
2030 | * pointers. If level == 0, then we are looking at a block of dnodes. | |
2031 | * | |
2032 | * If we don't find what we are looking for in the block, we return ESRCH. | |
2033 | * Otherwise, return with *offset pointing to the beginning (if searching | |
2034 | * forwards) or end (if searching backwards) of the range covered by the | |
2035 | * block pointer we matched on (or dnode). | |
2036 | * | |
2037 | * The basic search algorithm used below by dnode_next_offset() is to | |
2038 | * use this function to search up the block tree (widen the search) until | |
2039 | * we find something (i.e., we don't return ESRCH) and then search back | |
2040 | * down the tree (narrow the search) until we reach our original search | |
2041 | * level. | |
2042 | */ | |
2043 | static int | |
2044 | dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset, | |
86e3c28a | 2045 | int lvl, uint64_t blkfill, uint64_t txg) |
70e083d2 TG |
2046 | { |
2047 | dmu_buf_impl_t *db = NULL; | |
2048 | void *data = NULL; | |
2049 | uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
2050 | uint64_t epb = 1ULL << epbs; | |
2051 | uint64_t minfill, maxfill; | |
2052 | boolean_t hole; | |
2053 | int i, inc, error, span; | |
2054 | ||
70e083d2 TG |
2055 | hole = ((flags & DNODE_FIND_HOLE) != 0); |
2056 | inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1; | |
2057 | ASSERT(txg == 0 || !hole); | |
2058 | ||
2059 | if (lvl == dn->dn_phys->dn_nlevels) { | |
2060 | error = 0; | |
2061 | epb = dn->dn_phys->dn_nblkptr; | |
2062 | data = dn->dn_phys->dn_blkptr; | |
2063 | } else { | |
86e3c28a CIK |
2064 | uint64_t blkid = dbuf_whichblock(dn, lvl, *offset); |
2065 | error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db); | |
70e083d2 TG |
2066 | if (error) { |
2067 | if (error != ENOENT) | |
2068 | return (error); | |
2069 | if (hole) | |
2070 | return (0); | |
2071 | /* | |
2072 | * This can only happen when we are searching up | |
2073 | * the block tree for data. We don't really need to | |
2074 | * adjust the offset, as we will just end up looking | |
2075 | * at the pointer to this block in its parent, and its | |
2076 | * going to be unallocated, so we will skip over it. | |
2077 | */ | |
2078 | return (SET_ERROR(ESRCH)); | |
2079 | } | |
2080 | error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT); | |
2081 | if (error) { | |
2082 | dbuf_rele(db, FTAG); | |
2083 | return (error); | |
2084 | } | |
2085 | data = db->db.db_data; | |
2086 | } | |
2087 | ||
2088 | ||
2089 | if (db != NULL && txg != 0 && (db->db_blkptr == NULL || | |
2090 | db->db_blkptr->blk_birth <= txg || | |
2091 | BP_IS_HOLE(db->db_blkptr))) { | |
2092 | /* | |
2093 | * This can only happen when we are searching up the tree | |
2094 | * and these conditions mean that we need to keep climbing. | |
2095 | */ | |
2096 | error = SET_ERROR(ESRCH); | |
2097 | } else if (lvl == 0) { | |
2098 | dnode_phys_t *dnp = data; | |
86e3c28a | 2099 | |
70e083d2 | 2100 | ASSERT(dn->dn_type == DMU_OT_DNODE); |
86e3c28a | 2101 | ASSERT(!(flags & DNODE_FIND_BACKWARDS)); |
70e083d2 | 2102 | |
86e3c28a CIK |
2103 | for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1); |
2104 | i < blkfill; i += dnp[i].dn_extra_slots + 1) { | |
70e083d2 TG |
2105 | if ((dnp[i].dn_type == DMU_OT_NONE) == hole) |
2106 | break; | |
70e083d2 | 2107 | } |
86e3c28a CIK |
2108 | |
2109 | if (i == blkfill) | |
70e083d2 | 2110 | error = SET_ERROR(ESRCH); |
86e3c28a CIK |
2111 | |
2112 | *offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) + | |
2113 | (i << DNODE_SHIFT); | |
70e083d2 TG |
2114 | } else { |
2115 | blkptr_t *bp = data; | |
2116 | uint64_t start = *offset; | |
2117 | span = (lvl - 1) * epbs + dn->dn_datablkshift; | |
2118 | minfill = 0; | |
2119 | maxfill = blkfill << ((lvl - 1) * epbs); | |
2120 | ||
2121 | if (hole) | |
2122 | maxfill--; | |
2123 | else | |
2124 | minfill++; | |
2125 | ||
86e3c28a CIK |
2126 | if (span >= 8 * sizeof (*offset)) { |
2127 | /* This only happens on the highest indirection level */ | |
2128 | ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1); | |
2129 | *offset = 0; | |
2130 | } else { | |
2131 | *offset = *offset >> span; | |
2132 | } | |
2133 | ||
70e083d2 TG |
2134 | for (i = BF64_GET(*offset, 0, epbs); |
2135 | i >= 0 && i < epb; i += inc) { | |
2136 | if (BP_GET_FILL(&bp[i]) >= minfill && | |
2137 | BP_GET_FILL(&bp[i]) <= maxfill && | |
2138 | (hole || bp[i].blk_birth > txg)) | |
2139 | break; | |
2140 | if (inc > 0 || *offset > 0) | |
2141 | *offset += inc; | |
2142 | } | |
86e3c28a CIK |
2143 | |
2144 | if (span >= 8 * sizeof (*offset)) { | |
2145 | *offset = start; | |
2146 | } else { | |
2147 | *offset = *offset << span; | |
2148 | } | |
2149 | ||
70e083d2 TG |
2150 | if (inc < 0) { |
2151 | /* traversing backwards; position offset at the end */ | |
2152 | ASSERT3U(*offset, <=, start); | |
2153 | *offset = MIN(*offset + (1ULL << span) - 1, start); | |
2154 | } else if (*offset < start) { | |
2155 | *offset = start; | |
2156 | } | |
2157 | if (i < 0 || i >= epb) | |
2158 | error = SET_ERROR(ESRCH); | |
2159 | } | |
2160 | ||
2161 | if (db) | |
2162 | dbuf_rele(db, FTAG); | |
2163 | ||
2164 | return (error); | |
2165 | } | |
2166 | ||
2167 | /* | |
2168 | * Find the next hole, data, or sparse region at or after *offset. | |
2169 | * The value 'blkfill' tells us how many items we expect to find | |
2170 | * in an L0 data block; this value is 1 for normal objects, | |
2171 | * DNODES_PER_BLOCK for the meta dnode, and some fraction of | |
2172 | * DNODES_PER_BLOCK when searching for sparse regions thereof. | |
2173 | * | |
2174 | * Examples: | |
2175 | * | |
2176 | * dnode_next_offset(dn, flags, offset, 1, 1, 0); | |
2177 | * Finds the next/previous hole/data in a file. | |
2178 | * Used in dmu_offset_next(). | |
2179 | * | |
2180 | * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg); | |
2181 | * Finds the next free/allocated dnode an objset's meta-dnode. | |
2182 | * Only finds objects that have new contents since txg (ie. | |
2183 | * bonus buffer changes and content removal are ignored). | |
2184 | * Used in dmu_object_next(). | |
2185 | * | |
2186 | * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0); | |
2187 | * Finds the next L2 meta-dnode bp that's at most 1/4 full. | |
2188 | * Used in dmu_object_alloc(). | |
2189 | */ | |
2190 | int | |
2191 | dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset, | |
2192 | int minlvl, uint64_t blkfill, uint64_t txg) | |
2193 | { | |
2194 | uint64_t initial_offset = *offset; | |
2195 | int lvl, maxlvl; | |
2196 | int error = 0; | |
2197 | ||
2198 | if (!(flags & DNODE_FIND_HAVELOCK)) | |
2199 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
2200 | ||
2201 | if (dn->dn_phys->dn_nlevels == 0) { | |
2202 | error = SET_ERROR(ESRCH); | |
2203 | goto out; | |
2204 | } | |
2205 | ||
2206 | if (dn->dn_datablkshift == 0) { | |
2207 | if (*offset < dn->dn_datablksz) { | |
2208 | if (flags & DNODE_FIND_HOLE) | |
2209 | *offset = dn->dn_datablksz; | |
2210 | } else { | |
2211 | error = SET_ERROR(ESRCH); | |
2212 | } | |
2213 | goto out; | |
2214 | } | |
2215 | ||
2216 | maxlvl = dn->dn_phys->dn_nlevels; | |
2217 | ||
2218 | for (lvl = minlvl; lvl <= maxlvl; lvl++) { | |
2219 | error = dnode_next_offset_level(dn, | |
2220 | flags, offset, lvl, blkfill, txg); | |
2221 | if (error != ESRCH) | |
2222 | break; | |
2223 | } | |
2224 | ||
2225 | while (error == 0 && --lvl >= minlvl) { | |
2226 | error = dnode_next_offset_level(dn, | |
2227 | flags, offset, lvl, blkfill, txg); | |
2228 | } | |
2229 | ||
2230 | /* | |
2231 | * There's always a "virtual hole" at the end of the object, even | |
2232 | * if all BP's which physically exist are non-holes. | |
2233 | */ | |
2234 | if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 && | |
2235 | minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) { | |
2236 | error = 0; | |
2237 | } | |
2238 | ||
2239 | if (error == 0 && (flags & DNODE_FIND_BACKWARDS ? | |
2240 | initial_offset < *offset : initial_offset > *offset)) | |
2241 | error = SET_ERROR(ESRCH); | |
2242 | out: | |
2243 | if (!(flags & DNODE_FIND_HAVELOCK)) | |
2244 | rw_exit(&dn->dn_struct_rwlock); | |
2245 | ||
2246 | return (error); | |
2247 | } |