4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 #include <sys/zfs_context.h>
30 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dsl_dataset.h>
37 #include <sys/range_tree.h>
38 #include <sys/zfeature.h>
41 dnode_increase_indirection(dnode_t
*dn
, dmu_tx_t
*tx
)
44 int txgoff
= tx
->tx_txg
& TXG_MASK
;
45 int nblkptr
= dn
->dn_phys
->dn_nblkptr
;
46 int old_toplvl
= dn
->dn_phys
->dn_nlevels
- 1;
47 int new_level
= dn
->dn_next_nlevels
[txgoff
];
50 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
52 /* this dnode can't be paged out because it's dirty */
53 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
54 ASSERT(RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
55 ASSERT(new_level
> 1 && dn
->dn_phys
->dn_nlevels
> 0);
57 db
= dbuf_hold_level(dn
, dn
->dn_phys
->dn_nlevels
, 0, FTAG
);
60 dn
->dn_phys
->dn_nlevels
= new_level
;
61 dprintf("os=%p obj=%llu, increase to %d\n", dn
->dn_objset
,
62 dn
->dn_object
, dn
->dn_phys
->dn_nlevels
);
64 /* transfer dnode's block pointers to new indirect block */
65 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
|DB_RF_HAVESTRUCT
);
66 ASSERT(db
->db
.db_data
);
67 ASSERT(arc_released(db
->db_buf
));
68 ASSERT3U(sizeof (blkptr_t
) * nblkptr
, <=, db
->db
.db_size
);
69 bcopy(dn
->dn_phys
->dn_blkptr
, db
->db
.db_data
,
70 sizeof (blkptr_t
) * nblkptr
);
71 arc_buf_freeze(db
->db_buf
);
73 /* set dbuf's parent pointers to new indirect buf */
74 for (i
= 0; i
< nblkptr
; i
++) {
75 dmu_buf_impl_t
*child
=
76 dbuf_find(dn
->dn_objset
, dn
->dn_object
, old_toplvl
, i
);
81 DB_DNODE_ENTER(child
);
82 ASSERT3P(DB_DNODE(child
), ==, dn
);
85 if (child
->db_parent
&& child
->db_parent
!= dn
->dn_dbuf
) {
86 ASSERT(child
->db_parent
->db_level
== db
->db_level
);
87 ASSERT(child
->db_blkptr
!=
88 &dn
->dn_phys
->dn_blkptr
[child
->db_blkid
]);
89 mutex_exit(&child
->db_mtx
);
92 ASSERT(child
->db_parent
== NULL
||
93 child
->db_parent
== dn
->dn_dbuf
);
95 child
->db_parent
= db
;
96 dbuf_add_ref(db
, child
);
98 child
->db_blkptr
= (blkptr_t
*)db
->db
.db_data
+ i
;
100 child
->db_blkptr
= NULL
;
101 dprintf_dbuf_bp(child
, child
->db_blkptr
,
102 "changed db_blkptr to new indirect %s", "");
104 mutex_exit(&child
->db_mtx
);
107 bzero(dn
->dn_phys
->dn_blkptr
, sizeof (blkptr_t
) * nblkptr
);
111 rw_exit(&dn
->dn_struct_rwlock
);
115 free_blocks(dnode_t
*dn
, blkptr_t
*bp
, int num
, dmu_tx_t
*tx
)
117 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
118 uint64_t bytesfreed
= 0;
121 dprintf("ds=%p obj=%llx num=%d\n", ds
, dn
->dn_object
, num
);
123 for (i
= 0; i
< num
; i
++, bp
++) {
125 dmu_object_type_t type
;
130 bytesfreed
+= dsl_dataset_block_kill(ds
, bp
, tx
, B_FALSE
);
131 ASSERT3U(bytesfreed
, <=, DN_USED_BYTES(dn
->dn_phys
));
134 * Save some useful information on the holes being
135 * punched, including logical size, type, and indirection
136 * level. Retaining birth time enables detection of when
137 * holes are punched for reducing the number of free
138 * records transmitted during a zfs send.
141 lsize
= BP_GET_LSIZE(bp
);
142 type
= BP_GET_TYPE(bp
);
143 lvl
= BP_GET_LEVEL(bp
);
145 bzero(bp
, sizeof (blkptr_t
));
147 if (spa_feature_is_active(dn
->dn_objset
->os_spa
,
148 SPA_FEATURE_HOLE_BIRTH
)) {
149 BP_SET_LSIZE(bp
, lsize
);
150 BP_SET_TYPE(bp
, type
);
151 BP_SET_LEVEL(bp
, lvl
);
152 BP_SET_BIRTH(bp
, dmu_tx_get_txg(tx
), 0);
155 dnode_diduse_space(dn
, -bytesfreed
);
160 free_verify(dmu_buf_impl_t
*db
, uint64_t start
, uint64_t end
, dmu_tx_t
*tx
)
164 uint64_t txg
= tx
->tx_txg
;
169 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
170 off
= start
- (db
->db_blkid
* 1<<epbs
);
171 num
= end
- start
+ 1;
173 ASSERT3U(off
, >=, 0);
174 ASSERT3U(num
, >=, 0);
175 ASSERT3U(db
->db_level
, >, 0);
176 ASSERT3U(db
->db
.db_size
, ==, 1 << dn
->dn_phys
->dn_indblkshift
);
177 ASSERT3U(off
+num
, <=, db
->db
.db_size
>> SPA_BLKPTRSHIFT
);
178 ASSERT(db
->db_blkptr
!= NULL
);
180 for (i
= off
; i
< off
+num
; i
++) {
182 dmu_buf_impl_t
*child
;
183 dbuf_dirty_record_t
*dr
;
186 ASSERT(db
->db_level
== 1);
188 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
189 err
= dbuf_hold_impl(dn
, db
->db_level
-1,
190 (db
->db_blkid
<< epbs
) + i
, TRUE
, FALSE
, FTAG
, &child
);
191 rw_exit(&dn
->dn_struct_rwlock
);
195 ASSERT(child
->db_level
== 0);
196 dr
= child
->db_last_dirty
;
197 while (dr
&& dr
->dr_txg
> txg
)
199 ASSERT(dr
== NULL
|| dr
->dr_txg
== txg
);
201 /* data_old better be zeroed */
203 buf
= dr
->dt
.dl
.dr_data
->b_data
;
204 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
206 panic("freed data not zero: "
207 "child=%p i=%d off=%d num=%d\n",
208 (void *)child
, i
, off
, num
);
214 * db_data better be zeroed unless it's dirty in a
217 mutex_enter(&child
->db_mtx
);
218 buf
= child
->db
.db_data
;
219 if (buf
!= NULL
&& child
->db_state
!= DB_FILL
&&
220 child
->db_last_dirty
== NULL
) {
221 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
223 panic("freed data not zero: "
224 "child=%p i=%d off=%d num=%d\n",
225 (void *)child
, i
, off
, num
);
229 mutex_exit(&child
->db_mtx
);
231 dbuf_rele(child
, FTAG
);
238 free_children(dmu_buf_impl_t
*db
, uint64_t blkid
, uint64_t nblks
,
243 dmu_buf_impl_t
*subdb
;
244 uint64_t start
, end
, dbstart
, dbend
;
245 unsigned int epbs
, shift
, i
;
249 * There is a small possibility that this block will not be cached:
250 * 1 - if level > 1 and there are no children with level <= 1
251 * 2 - if this block was evicted since we read it from
252 * dmu_tx_hold_free().
254 if (db
->db_state
!= DB_CACHED
)
255 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
);
262 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
263 ASSERT3U(epbs
, <, 31);
264 shift
= (db
->db_level
- 1) * epbs
;
265 dbstart
= db
->db_blkid
<< epbs
;
266 start
= blkid
>> shift
;
267 if (dbstart
< start
) {
268 bp
+= start
- dbstart
;
272 dbend
= ((db
->db_blkid
+ 1) << epbs
) - 1;
273 end
= (blkid
+ nblks
- 1) >> shift
;
277 ASSERT3U(start
, <=, end
);
279 if (db
->db_level
== 1) {
280 FREE_VERIFY(db
, start
, end
, tx
);
281 free_blocks(dn
, bp
, end
-start
+1, tx
);
283 for (id
= start
; id
<= end
; id
++, bp
++) {
286 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
287 VERIFY0(dbuf_hold_impl(dn
, db
->db_level
- 1,
288 id
, TRUE
, FALSE
, FTAG
, &subdb
));
289 rw_exit(&dn
->dn_struct_rwlock
);
290 ASSERT3P(bp
, ==, subdb
->db_blkptr
);
292 free_children(subdb
, blkid
, nblks
, tx
);
293 dbuf_rele(subdb
, FTAG
);
297 /* If this whole block is free, free ourself too. */
298 for (i
= 0, bp
= db
->db
.db_data
; i
< 1ULL << epbs
; i
++, bp
++) {
302 if (i
== 1 << epbs
) {
304 * We only found holes. Grab the rwlock to prevent
305 * anybody from reading the blocks we're about to
308 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
309 bzero(db
->db
.db_data
, db
->db
.db_size
);
310 rw_exit(&dn
->dn_struct_rwlock
);
311 free_blocks(dn
, db
->db_blkptr
, 1, tx
);
314 * Partial block free; must be marked dirty so that it
315 * will be written out.
317 ASSERT(db
->db_dirtycnt
> 0);
321 arc_buf_freeze(db
->db_buf
);
325 * Traverse the indicated range of the provided file
326 * and "free" all the blocks contained there.
329 dnode_sync_free_range_impl(dnode_t
*dn
, uint64_t blkid
, uint64_t nblks
,
332 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
333 int dnlevel
= dn
->dn_phys
->dn_nlevels
;
334 boolean_t trunc
= B_FALSE
;
336 if (blkid
> dn
->dn_phys
->dn_maxblkid
)
339 ASSERT(dn
->dn_phys
->dn_maxblkid
< UINT64_MAX
);
340 if (blkid
+ nblks
> dn
->dn_phys
->dn_maxblkid
) {
341 nblks
= dn
->dn_phys
->dn_maxblkid
- blkid
+ 1;
345 /* There are no indirect blocks in the object */
347 if (blkid
>= dn
->dn_phys
->dn_nblkptr
) {
348 /* this range was never made persistent */
351 ASSERT3U(blkid
+ nblks
, <=, dn
->dn_phys
->dn_nblkptr
);
352 free_blocks(dn
, bp
+ blkid
, nblks
, tx
);
354 int shift
= (dnlevel
- 1) *
355 (dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
);
356 int start
= blkid
>> shift
;
357 int end
= (blkid
+ nblks
- 1) >> shift
;
361 ASSERT(start
< dn
->dn_phys
->dn_nblkptr
);
363 for (i
= start
; i
<= end
; i
++, bp
++) {
366 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
367 VERIFY0(dbuf_hold_impl(dn
, dnlevel
- 1, i
,
368 TRUE
, FALSE
, FTAG
, &db
));
369 rw_exit(&dn
->dn_struct_rwlock
);
371 free_children(db
, blkid
, nblks
, tx
);
377 ASSERTV(uint64_t off
);
378 dn
->dn_phys
->dn_maxblkid
= blkid
== 0 ? 0 : blkid
- 1;
380 ASSERTV(off
= (dn
->dn_phys
->dn_maxblkid
+ 1) *
381 (dn
->dn_phys
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
));
382 ASSERT(off
< dn
->dn_phys
->dn_maxblkid
||
383 dn
->dn_phys
->dn_maxblkid
== 0 ||
384 dnode_next_offset(dn
, 0, &off
, 1, 1, 0) != 0);
388 typedef struct dnode_sync_free_range_arg
{
389 dnode_t
*dsfra_dnode
;
391 } dnode_sync_free_range_arg_t
;
394 dnode_sync_free_range(void *arg
, uint64_t blkid
, uint64_t nblks
)
396 dnode_sync_free_range_arg_t
*dsfra
= arg
;
397 dnode_t
*dn
= dsfra
->dsfra_dnode
;
399 mutex_exit(&dn
->dn_mtx
);
400 dnode_sync_free_range_impl(dn
, blkid
, nblks
, dsfra
->dsfra_tx
);
401 mutex_enter(&dn
->dn_mtx
);
405 * Try to kick all the dnode's dbufs out of the cache...
408 dnode_evict_dbufs(dnode_t
*dn
)
410 dmu_buf_impl_t
*db_marker
;
411 dmu_buf_impl_t
*db
, *db_next
;
413 db_marker
= kmem_alloc(sizeof (dmu_buf_impl_t
), KM_SLEEP
);
415 mutex_enter(&dn
->dn_dbufs_mtx
);
416 for (db
= avl_first(&dn
->dn_dbufs
); db
!= NULL
; db
= db_next
) {
420 ASSERT3P(DB_DNODE(db
), ==, dn
);
424 mutex_enter(&db
->db_mtx
);
425 if (db
->db_state
!= DB_EVICTING
&&
426 refcount_is_zero(&db
->db_holds
)) {
427 db_marker
->db_level
= db
->db_level
;
428 db_marker
->db_blkid
= db
->db_blkid
;
429 db_marker
->db_state
= DB_SEARCH
;
430 avl_insert_here(&dn
->dn_dbufs
, db_marker
, db
,
435 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db_marker
);
436 avl_remove(&dn
->dn_dbufs
, db_marker
);
438 db
->db_pending_evict
= TRUE
;
439 mutex_exit(&db
->db_mtx
);
440 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db
);
443 mutex_exit(&dn
->dn_dbufs_mtx
);
445 kmem_free(db_marker
, sizeof (dmu_buf_impl_t
));
447 dnode_evict_bonus(dn
);
451 dnode_evict_bonus(dnode_t
*dn
)
453 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
454 if (dn
->dn_bonus
!= NULL
) {
455 if (refcount_is_zero(&dn
->dn_bonus
->db_holds
)) {
456 mutex_enter(&dn
->dn_bonus
->db_mtx
);
457 dbuf_destroy(dn
->dn_bonus
);
460 dn
->dn_bonus
->db_pending_evict
= TRUE
;
463 rw_exit(&dn
->dn_struct_rwlock
);
467 dnode_undirty_dbufs(list_t
*list
)
469 dbuf_dirty_record_t
*dr
;
471 while ((dr
= list_head(list
))) {
472 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
473 uint64_t txg
= dr
->dr_txg
;
475 if (db
->db_level
!= 0)
476 dnode_undirty_dbufs(&dr
->dt
.di
.dr_children
);
478 mutex_enter(&db
->db_mtx
);
479 /* XXX - use dbuf_undirty()? */
480 list_remove(list
, dr
);
481 ASSERT(db
->db_last_dirty
== dr
);
482 db
->db_last_dirty
= NULL
;
483 db
->db_dirtycnt
-= 1;
484 if (db
->db_level
== 0) {
485 ASSERT(db
->db_blkid
== DMU_BONUS_BLKID
||
486 dr
->dt
.dl
.dr_data
== db
->db_buf
);
489 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
490 list_destroy(&dr
->dt
.di
.dr_children
);
492 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
493 dbuf_rele_and_unlock(db
, (void *)(uintptr_t)txg
);
498 dnode_sync_free(dnode_t
*dn
, dmu_tx_t
*tx
)
500 int txgoff
= tx
->tx_txg
& TXG_MASK
;
502 ASSERT(dmu_tx_is_syncing(tx
));
505 * Our contents should have been freed in dnode_sync() by the
506 * free range record inserted by the caller of dnode_free().
508 ASSERT0(DN_USED_BYTES(dn
->dn_phys
));
509 ASSERT(BP_IS_HOLE(dn
->dn_phys
->dn_blkptr
));
511 dnode_undirty_dbufs(&dn
->dn_dirty_records
[txgoff
]);
512 dnode_evict_dbufs(dn
);
515 * XXX - It would be nice to assert this, but we may still
516 * have residual holds from async evictions from the arc...
518 * zfs_obj_to_path() also depends on this being
521 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
524 /* Undirty next bits */
525 dn
->dn_next_nlevels
[txgoff
] = 0;
526 dn
->dn_next_indblkshift
[txgoff
] = 0;
527 dn
->dn_next_blksz
[txgoff
] = 0;
529 /* ASSERT(blkptrs are zero); */
530 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
531 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
533 ASSERT(dn
->dn_free_txg
> 0);
534 if (dn
->dn_allocated_txg
!= dn
->dn_free_txg
)
535 dmu_buf_will_dirty(&dn
->dn_dbuf
->db
, tx
);
536 bzero(dn
->dn_phys
, sizeof (dnode_phys_t
) * dn
->dn_num_slots
);
538 mutex_enter(&dn
->dn_mtx
);
539 dn
->dn_type
= DMU_OT_NONE
;
541 dn
->dn_allocated_txg
= 0;
543 dn
->dn_have_spill
= B_FALSE
;
544 mutex_exit(&dn
->dn_mtx
);
546 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
548 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
550 * Now that we've released our hold, the dnode may
551 * be evicted, so we mustn't access it.
556 * Write out the dnode's dirty buffers.
559 dnode_sync(dnode_t
*dn
, dmu_tx_t
*tx
)
561 objset_t
*os
= dn
->dn_objset
;
562 dnode_phys_t
*dnp
= dn
->dn_phys
;
563 int txgoff
= tx
->tx_txg
& TXG_MASK
;
564 list_t
*list
= &dn
->dn_dirty_records
[txgoff
];
565 boolean_t kill_spill
= B_FALSE
;
566 boolean_t freeing_dnode
;
567 ASSERTV(static const dnode_phys_t zerodn
= { 0 });
569 ASSERT(dmu_tx_is_syncing(tx
));
570 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
|| dn
->dn_allocated_txg
);
571 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
||
572 bcmp(dnp
, &zerodn
, DNODE_MIN_SIZE
) == 0);
575 ASSERT(dn
->dn_dbuf
== NULL
|| arc_released(dn
->dn_dbuf
->db_buf
));
578 * Do user accounting if it is enabled and this is not
579 * an encrypted receive.
581 if (dmu_objset_userused_enabled(os
) &&
582 !DMU_OBJECT_IS_SPECIAL(dn
->dn_object
) &&
583 (!os
->os_encrypted
|| !dmu_objset_is_receiving(os
))) {
584 mutex_enter(&dn
->dn_mtx
);
585 dn
->dn_oldused
= DN_USED_BYTES(dn
->dn_phys
);
586 dn
->dn_oldflags
= dn
->dn_phys
->dn_flags
;
587 dn
->dn_phys
->dn_flags
|= DNODE_FLAG_USERUSED_ACCOUNTED
;
588 if (dmu_objset_userobjused_enabled(dn
->dn_objset
))
589 dn
->dn_phys
->dn_flags
|=
590 DNODE_FLAG_USEROBJUSED_ACCOUNTED
;
591 mutex_exit(&dn
->dn_mtx
);
592 dmu_objset_userquota_get_ids(dn
, B_FALSE
, tx
);
594 /* Once we account for it, we should always account for it */
595 ASSERT(!(dn
->dn_phys
->dn_flags
&
596 DNODE_FLAG_USERUSED_ACCOUNTED
));
597 ASSERT(!(dn
->dn_phys
->dn_flags
&
598 DNODE_FLAG_USEROBJUSED_ACCOUNTED
));
601 mutex_enter(&dn
->dn_mtx
);
602 if (dn
->dn_allocated_txg
== tx
->tx_txg
) {
603 /* The dnode is newly allocated or reallocated */
604 if (dnp
->dn_type
== DMU_OT_NONE
) {
605 /* this is a first alloc, not a realloc */
607 dnp
->dn_nblkptr
= dn
->dn_nblkptr
;
610 dnp
->dn_type
= dn
->dn_type
;
611 dnp
->dn_bonustype
= dn
->dn_bonustype
;
612 dnp
->dn_bonuslen
= dn
->dn_bonuslen
;
615 dnp
->dn_extra_slots
= dn
->dn_num_slots
- 1;
617 ASSERT(dnp
->dn_nlevels
> 1 ||
618 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
619 BP_IS_EMBEDDED(&dnp
->dn_blkptr
[0]) ||
620 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) ==
621 dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
);
622 ASSERT(dnp
->dn_nlevels
< 2 ||
623 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
624 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) == 1 << dnp
->dn_indblkshift
);
626 if (dn
->dn_next_type
[txgoff
] != 0) {
627 dnp
->dn_type
= dn
->dn_type
;
628 dn
->dn_next_type
[txgoff
] = 0;
631 if (dn
->dn_next_blksz
[txgoff
] != 0) {
632 ASSERT(P2PHASE(dn
->dn_next_blksz
[txgoff
],
633 SPA_MINBLOCKSIZE
) == 0);
634 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
635 dn
->dn_maxblkid
== 0 || list_head(list
) != NULL
||
636 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
==
637 dnp
->dn_datablkszsec
||
638 range_tree_space(dn
->dn_free_ranges
[txgoff
]) != 0);
639 dnp
->dn_datablkszsec
=
640 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
;
641 dn
->dn_next_blksz
[txgoff
] = 0;
644 if (dn
->dn_next_bonuslen
[txgoff
] != 0) {
645 if (dn
->dn_next_bonuslen
[txgoff
] == DN_ZERO_BONUSLEN
)
646 dnp
->dn_bonuslen
= 0;
648 dnp
->dn_bonuslen
= dn
->dn_next_bonuslen
[txgoff
];
649 ASSERT(dnp
->dn_bonuslen
<=
650 DN_SLOTS_TO_BONUSLEN(dnp
->dn_extra_slots
+ 1));
651 dn
->dn_next_bonuslen
[txgoff
] = 0;
654 if (dn
->dn_next_bonustype
[txgoff
] != 0) {
655 ASSERT(DMU_OT_IS_VALID(dn
->dn_next_bonustype
[txgoff
]));
656 dnp
->dn_bonustype
= dn
->dn_next_bonustype
[txgoff
];
657 dn
->dn_next_bonustype
[txgoff
] = 0;
660 freeing_dnode
= dn
->dn_free_txg
> 0 && dn
->dn_free_txg
<= tx
->tx_txg
;
663 * Remove the spill block if we have been explicitly asked to
664 * remove it, or if the object is being removed.
666 if (dn
->dn_rm_spillblk
[txgoff
] || freeing_dnode
) {
667 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
)
669 dn
->dn_rm_spillblk
[txgoff
] = 0;
672 if (dn
->dn_next_indblkshift
[txgoff
] != 0) {
673 ASSERT(dnp
->dn_nlevels
== 1);
674 dnp
->dn_indblkshift
= dn
->dn_next_indblkshift
[txgoff
];
675 dn
->dn_next_indblkshift
[txgoff
] = 0;
679 * Just take the live (open-context) values for checksum and compress.
680 * Strictly speaking it's a future leak, but nothing bad happens if we
681 * start using the new checksum or compress algorithm a little early.
683 dnp
->dn_checksum
= dn
->dn_checksum
;
684 dnp
->dn_compress
= dn
->dn_compress
;
686 mutex_exit(&dn
->dn_mtx
);
689 free_blocks(dn
, DN_SPILL_BLKPTR(dn
->dn_phys
), 1, tx
);
690 mutex_enter(&dn
->dn_mtx
);
691 dnp
->dn_flags
&= ~DNODE_FLAG_SPILL_BLKPTR
;
692 mutex_exit(&dn
->dn_mtx
);
695 /* process all the "freed" ranges in the file */
696 if (dn
->dn_free_ranges
[txgoff
] != NULL
) {
697 dnode_sync_free_range_arg_t dsfra
;
698 dsfra
.dsfra_dnode
= dn
;
700 mutex_enter(&dn
->dn_mtx
);
701 range_tree_vacate(dn
->dn_free_ranges
[txgoff
],
702 dnode_sync_free_range
, &dsfra
);
703 range_tree_destroy(dn
->dn_free_ranges
[txgoff
]);
704 dn
->dn_free_ranges
[txgoff
] = NULL
;
705 mutex_exit(&dn
->dn_mtx
);
709 dn
->dn_objset
->os_freed_dnodes
++;
710 dnode_sync_free(dn
, tx
);
714 if (dn
->dn_num_slots
> DNODE_MIN_SLOTS
) {
715 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
716 mutex_enter(&ds
->ds_lock
);
717 ds
->ds_feature_activation_needed
[SPA_FEATURE_LARGE_DNODE
] =
719 mutex_exit(&ds
->ds_lock
);
722 if (dn
->dn_next_nlevels
[txgoff
]) {
723 dnode_increase_indirection(dn
, tx
);
724 dn
->dn_next_nlevels
[txgoff
] = 0;
727 if (dn
->dn_next_nblkptr
[txgoff
]) {
728 /* this should only happen on a realloc */
729 ASSERT(dn
->dn_allocated_txg
== tx
->tx_txg
);
730 if (dn
->dn_next_nblkptr
[txgoff
] > dnp
->dn_nblkptr
) {
731 /* zero the new blkptrs we are gaining */
732 bzero(dnp
->dn_blkptr
+ dnp
->dn_nblkptr
,
734 (dn
->dn_next_nblkptr
[txgoff
] - dnp
->dn_nblkptr
));
738 ASSERT(dn
->dn_next_nblkptr
[txgoff
] < dnp
->dn_nblkptr
);
739 /* the blkptrs we are losing better be unallocated */
740 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
741 if (i
>= dn
->dn_next_nblkptr
[txgoff
])
742 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[i
]));
746 mutex_enter(&dn
->dn_mtx
);
747 dnp
->dn_nblkptr
= dn
->dn_next_nblkptr
[txgoff
];
748 dn
->dn_next_nblkptr
[txgoff
] = 0;
749 mutex_exit(&dn
->dn_mtx
);
752 dbuf_sync_list(list
, dn
->dn_phys
->dn_nlevels
- 1, tx
);
754 if (!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
)) {
755 ASSERT3P(list_head(list
), ==, NULL
);
756 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
760 * Although we have dropped our reference to the dnode, it
761 * can't be evicted until its written, and we haven't yet
762 * initiated the IO for the dnode's dbuf.