4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 #include <sys/zfs_context.h>
30 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dsl_dataset.h>
37 #include <sys/range_tree.h>
38 #include <sys/zfeature.h>
41 dnode_increase_indirection(dnode_t
*dn
, dmu_tx_t
*tx
)
44 int txgoff
= tx
->tx_txg
& TXG_MASK
;
45 int nblkptr
= dn
->dn_phys
->dn_nblkptr
;
46 int old_toplvl
= dn
->dn_phys
->dn_nlevels
- 1;
47 int new_level
= dn
->dn_next_nlevels
[txgoff
];
50 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
52 /* this dnode can't be paged out because it's dirty */
53 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
54 ASSERT(RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
55 ASSERT(new_level
> 1 && dn
->dn_phys
->dn_nlevels
> 0);
57 db
= dbuf_hold_level(dn
, dn
->dn_phys
->dn_nlevels
, 0, FTAG
);
60 dn
->dn_phys
->dn_nlevels
= new_level
;
61 dprintf("os=%p obj=%llu, increase to %d\n", dn
->dn_objset
,
62 dn
->dn_object
, dn
->dn_phys
->dn_nlevels
);
64 /* transfer dnode's block pointers to new indirect block */
65 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
|DB_RF_HAVESTRUCT
);
66 ASSERT(db
->db
.db_data
);
67 ASSERT(arc_released(db
->db_buf
));
68 ASSERT3U(sizeof (blkptr_t
) * nblkptr
, <=, db
->db
.db_size
);
69 bcopy(dn
->dn_phys
->dn_blkptr
, db
->db
.db_data
,
70 sizeof (blkptr_t
) * nblkptr
);
71 arc_buf_freeze(db
->db_buf
);
73 /* set dbuf's parent pointers to new indirect buf */
74 for (i
= 0; i
< nblkptr
; i
++) {
75 dmu_buf_impl_t
*child
=
76 dbuf_find(dn
->dn_objset
, dn
->dn_object
, old_toplvl
, i
);
81 DB_DNODE_ENTER(child
);
82 ASSERT3P(DB_DNODE(child
), ==, dn
);
85 if (child
->db_parent
&& child
->db_parent
!= dn
->dn_dbuf
) {
86 ASSERT(child
->db_parent
->db_level
== db
->db_level
);
87 ASSERT(child
->db_blkptr
!=
88 &dn
->dn_phys
->dn_blkptr
[child
->db_blkid
]);
89 mutex_exit(&child
->db_mtx
);
92 ASSERT(child
->db_parent
== NULL
||
93 child
->db_parent
== dn
->dn_dbuf
);
95 child
->db_parent
= db
;
96 dbuf_add_ref(db
, child
);
98 child
->db_blkptr
= (blkptr_t
*)db
->db
.db_data
+ i
;
100 child
->db_blkptr
= NULL
;
101 dprintf_dbuf_bp(child
, child
->db_blkptr
,
102 "changed db_blkptr to new indirect %s", "");
104 mutex_exit(&child
->db_mtx
);
107 bzero(dn
->dn_phys
->dn_blkptr
, sizeof (blkptr_t
) * nblkptr
);
111 rw_exit(&dn
->dn_struct_rwlock
);
115 free_blocks(dnode_t
*dn
, blkptr_t
*bp
, int num
, dmu_tx_t
*tx
)
117 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
118 uint64_t bytesfreed
= 0;
120 dprintf("ds=%p obj=%llx num=%d\n", ds
, dn
->dn_object
, num
);
122 for (int i
= 0; i
< num
; i
++, bp
++) {
126 bytesfreed
+= dsl_dataset_block_kill(ds
, bp
, tx
, B_FALSE
);
127 ASSERT3U(bytesfreed
, <=, DN_USED_BYTES(dn
->dn_phys
));
130 * Save some useful information on the holes being
131 * punched, including logical size, type, and indirection
132 * level. Retaining birth time enables detection of when
133 * holes are punched for reducing the number of free
134 * records transmitted during a zfs send.
137 uint64_t lsize
= BP_GET_LSIZE(bp
);
138 dmu_object_type_t type
= BP_GET_TYPE(bp
);
139 uint64_t lvl
= BP_GET_LEVEL(bp
);
141 bzero(bp
, sizeof (blkptr_t
));
143 if (spa_feature_is_active(dn
->dn_objset
->os_spa
,
144 SPA_FEATURE_HOLE_BIRTH
)) {
145 BP_SET_LSIZE(bp
, lsize
);
146 BP_SET_TYPE(bp
, type
);
147 BP_SET_LEVEL(bp
, lvl
);
148 BP_SET_BIRTH(bp
, dmu_tx_get_txg(tx
), 0);
151 dnode_diduse_space(dn
, -bytesfreed
);
156 free_verify(dmu_buf_impl_t
*db
, uint64_t start
, uint64_t end
, dmu_tx_t
*tx
)
160 uint64_t txg
= tx
->tx_txg
;
165 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
166 off
= start
- (db
->db_blkid
* 1<<epbs
);
167 num
= end
- start
+ 1;
169 ASSERT3U(off
, >=, 0);
170 ASSERT3U(num
, >=, 0);
171 ASSERT3U(db
->db_level
, >, 0);
172 ASSERT3U(db
->db
.db_size
, ==, 1 << dn
->dn_phys
->dn_indblkshift
);
173 ASSERT3U(off
+num
, <=, db
->db
.db_size
>> SPA_BLKPTRSHIFT
);
174 ASSERT(db
->db_blkptr
!= NULL
);
176 for (i
= off
; i
< off
+num
; i
++) {
178 dmu_buf_impl_t
*child
;
179 dbuf_dirty_record_t
*dr
;
182 ASSERT(db
->db_level
== 1);
184 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
185 err
= dbuf_hold_impl(dn
, db
->db_level
-1,
186 (db
->db_blkid
<< epbs
) + i
, TRUE
, FALSE
, FTAG
, &child
);
187 rw_exit(&dn
->dn_struct_rwlock
);
191 ASSERT(child
->db_level
== 0);
192 dr
= child
->db_last_dirty
;
193 while (dr
&& dr
->dr_txg
> txg
)
195 ASSERT(dr
== NULL
|| dr
->dr_txg
== txg
);
197 /* data_old better be zeroed */
199 buf
= dr
->dt
.dl
.dr_data
->b_data
;
200 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
202 panic("freed data not zero: "
203 "child=%p i=%d off=%d num=%d\n",
204 (void *)child
, i
, off
, num
);
210 * db_data better be zeroed unless it's dirty in a
213 mutex_enter(&child
->db_mtx
);
214 buf
= child
->db
.db_data
;
215 if (buf
!= NULL
&& child
->db_state
!= DB_FILL
&&
216 child
->db_last_dirty
== NULL
) {
217 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
219 panic("freed data not zero: "
220 "child=%p i=%d off=%d num=%d\n",
221 (void *)child
, i
, off
, num
);
225 mutex_exit(&child
->db_mtx
);
227 dbuf_rele(child
, FTAG
);
234 free_children(dmu_buf_impl_t
*db
, uint64_t blkid
, uint64_t nblks
,
239 dmu_buf_impl_t
*subdb
;
240 uint64_t start
, end
, dbstart
, dbend
;
241 unsigned int epbs
, shift
, i
;
244 * There is a small possibility that this block will not be cached:
245 * 1 - if level > 1 and there are no children with level <= 1
246 * 2 - if this block was evicted since we read it from
247 * dmu_tx_hold_free().
249 if (db
->db_state
!= DB_CACHED
)
250 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
);
257 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
258 ASSERT3U(epbs
, <, 31);
259 shift
= (db
->db_level
- 1) * epbs
;
260 dbstart
= db
->db_blkid
<< epbs
;
261 start
= blkid
>> shift
;
262 if (dbstart
< start
) {
263 bp
+= start
- dbstart
;
267 dbend
= ((db
->db_blkid
+ 1) << epbs
) - 1;
268 end
= (blkid
+ nblks
- 1) >> shift
;
272 ASSERT3U(start
, <=, end
);
274 if (db
->db_level
== 1) {
275 FREE_VERIFY(db
, start
, end
, tx
);
276 free_blocks(dn
, bp
, end
-start
+1, tx
);
278 for (uint64_t id
= start
; id
<= end
; id
++, bp
++) {
281 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
282 VERIFY0(dbuf_hold_impl(dn
, db
->db_level
- 1,
283 id
, TRUE
, FALSE
, FTAG
, &subdb
));
284 rw_exit(&dn
->dn_struct_rwlock
);
285 ASSERT3P(bp
, ==, subdb
->db_blkptr
);
287 free_children(subdb
, blkid
, nblks
, tx
);
288 dbuf_rele(subdb
, FTAG
);
292 /* If this whole block is free, free ourself too. */
293 for (i
= 0, bp
= db
->db
.db_data
; i
< 1ULL << epbs
; i
++, bp
++) {
297 if (i
== 1 << epbs
) {
299 * We only found holes. Grab the rwlock to prevent
300 * anybody from reading the blocks we're about to
303 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
304 bzero(db
->db
.db_data
, db
->db
.db_size
);
305 rw_exit(&dn
->dn_struct_rwlock
);
306 free_blocks(dn
, db
->db_blkptr
, 1, tx
);
309 * Partial block free; must be marked dirty so that it
310 * will be written out.
312 ASSERT(db
->db_dirtycnt
> 0);
316 arc_buf_freeze(db
->db_buf
);
320 * Traverse the indicated range of the provided file
321 * and "free" all the blocks contained there.
324 dnode_sync_free_range_impl(dnode_t
*dn
, uint64_t blkid
, uint64_t nblks
,
327 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
328 int dnlevel
= dn
->dn_phys
->dn_nlevels
;
329 boolean_t trunc
= B_FALSE
;
331 if (blkid
> dn
->dn_phys
->dn_maxblkid
)
334 ASSERT(dn
->dn_phys
->dn_maxblkid
< UINT64_MAX
);
335 if (blkid
+ nblks
> dn
->dn_phys
->dn_maxblkid
) {
336 nblks
= dn
->dn_phys
->dn_maxblkid
- blkid
+ 1;
340 /* There are no indirect blocks in the object */
342 if (blkid
>= dn
->dn_phys
->dn_nblkptr
) {
343 /* this range was never made persistent */
346 ASSERT3U(blkid
+ nblks
, <=, dn
->dn_phys
->dn_nblkptr
);
347 free_blocks(dn
, bp
+ blkid
, nblks
, tx
);
349 int shift
= (dnlevel
- 1) *
350 (dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
);
351 int start
= blkid
>> shift
;
352 int end
= (blkid
+ nblks
- 1) >> shift
;
355 ASSERT(start
< dn
->dn_phys
->dn_nblkptr
);
357 for (int i
= start
; i
<= end
; i
++, bp
++) {
360 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
361 VERIFY0(dbuf_hold_impl(dn
, dnlevel
- 1, i
,
362 TRUE
, FALSE
, FTAG
, &db
));
363 rw_exit(&dn
->dn_struct_rwlock
);
365 free_children(db
, blkid
, nblks
, tx
);
371 * Do not truncate the maxblkid if we are performing a raw
372 * receive. The raw receive sets the mablkid manually and
373 * must not be overriden.
375 if (trunc
&& !dn
->dn_objset
->os_raw_receive
) {
376 ASSERTV(uint64_t off
);
377 dn
->dn_phys
->dn_maxblkid
= blkid
== 0 ? 0 : blkid
- 1;
379 ASSERTV(off
= (dn
->dn_phys
->dn_maxblkid
+ 1) *
380 (dn
->dn_phys
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
));
381 ASSERT(off
< dn
->dn_phys
->dn_maxblkid
||
382 dn
->dn_phys
->dn_maxblkid
== 0 ||
383 dnode_next_offset(dn
, 0, &off
, 1, 1, 0) != 0);
387 typedef struct dnode_sync_free_range_arg
{
388 dnode_t
*dsfra_dnode
;
390 } dnode_sync_free_range_arg_t
;
393 dnode_sync_free_range(void *arg
, uint64_t blkid
, uint64_t nblks
)
395 dnode_sync_free_range_arg_t
*dsfra
= arg
;
396 dnode_t
*dn
= dsfra
->dsfra_dnode
;
398 mutex_exit(&dn
->dn_mtx
);
399 dnode_sync_free_range_impl(dn
, blkid
, nblks
, dsfra
->dsfra_tx
);
400 mutex_enter(&dn
->dn_mtx
);
404 * Try to kick all the dnode's dbufs out of the cache...
407 dnode_evict_dbufs(dnode_t
*dn
)
409 dmu_buf_impl_t
*db_marker
;
410 dmu_buf_impl_t
*db
, *db_next
;
412 db_marker
= kmem_alloc(sizeof (dmu_buf_impl_t
), KM_SLEEP
);
414 mutex_enter(&dn
->dn_dbufs_mtx
);
415 for (db
= avl_first(&dn
->dn_dbufs
); db
!= NULL
; db
= db_next
) {
419 ASSERT3P(DB_DNODE(db
), ==, dn
);
423 mutex_enter(&db
->db_mtx
);
424 if (db
->db_state
!= DB_EVICTING
&&
425 refcount_is_zero(&db
->db_holds
)) {
426 db_marker
->db_level
= db
->db_level
;
427 db_marker
->db_blkid
= db
->db_blkid
;
428 db_marker
->db_state
= DB_SEARCH
;
429 avl_insert_here(&dn
->dn_dbufs
, db_marker
, db
,
433 * We need to use the "marker" dbuf rather than
434 * simply getting the next dbuf, because
435 * dbuf_destroy() may actually remove multiple dbufs.
436 * It can call itself recursively on the parent dbuf,
437 * which may also be removed from dn_dbufs. The code
438 * flow would look like:
441 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
442 * if (!cacheable || pending_evict)
447 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db_marker
);
448 avl_remove(&dn
->dn_dbufs
, db_marker
);
450 db
->db_pending_evict
= TRUE
;
451 mutex_exit(&db
->db_mtx
);
452 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db
);
455 mutex_exit(&dn
->dn_dbufs_mtx
);
457 kmem_free(db_marker
, sizeof (dmu_buf_impl_t
));
459 dnode_evict_bonus(dn
);
463 dnode_evict_bonus(dnode_t
*dn
)
465 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
466 if (dn
->dn_bonus
!= NULL
) {
467 if (refcount_is_zero(&dn
->dn_bonus
->db_holds
)) {
468 mutex_enter(&dn
->dn_bonus
->db_mtx
);
469 dbuf_destroy(dn
->dn_bonus
);
472 dn
->dn_bonus
->db_pending_evict
= TRUE
;
475 rw_exit(&dn
->dn_struct_rwlock
);
479 dnode_undirty_dbufs(list_t
*list
)
481 dbuf_dirty_record_t
*dr
;
483 while ((dr
= list_head(list
))) {
484 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
485 uint64_t txg
= dr
->dr_txg
;
487 if (db
->db_level
!= 0)
488 dnode_undirty_dbufs(&dr
->dt
.di
.dr_children
);
490 mutex_enter(&db
->db_mtx
);
491 /* XXX - use dbuf_undirty()? */
492 list_remove(list
, dr
);
493 ASSERT(db
->db_last_dirty
== dr
);
494 db
->db_last_dirty
= NULL
;
495 db
->db_dirtycnt
-= 1;
496 if (db
->db_level
== 0) {
497 ASSERT(db
->db_blkid
== DMU_BONUS_BLKID
||
498 dr
->dt
.dl
.dr_data
== db
->db_buf
);
501 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
502 list_destroy(&dr
->dt
.di
.dr_children
);
504 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
505 dbuf_rele_and_unlock(db
, (void *)(uintptr_t)txg
, B_FALSE
);
510 dnode_sync_free(dnode_t
*dn
, dmu_tx_t
*tx
)
512 int txgoff
= tx
->tx_txg
& TXG_MASK
;
514 ASSERT(dmu_tx_is_syncing(tx
));
517 * Our contents should have been freed in dnode_sync() by the
518 * free range record inserted by the caller of dnode_free().
520 ASSERT0(DN_USED_BYTES(dn
->dn_phys
));
521 ASSERT(BP_IS_HOLE(dn
->dn_phys
->dn_blkptr
));
523 dnode_undirty_dbufs(&dn
->dn_dirty_records
[txgoff
]);
524 dnode_evict_dbufs(dn
);
527 * XXX - It would be nice to assert this, but we may still
528 * have residual holds from async evictions from the arc...
530 * zfs_obj_to_path() also depends on this being
533 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
536 /* Undirty next bits */
537 dn
->dn_next_nlevels
[txgoff
] = 0;
538 dn
->dn_next_indblkshift
[txgoff
] = 0;
539 dn
->dn_next_blksz
[txgoff
] = 0;
540 dn
->dn_next_maxblkid
[txgoff
] = 0;
542 /* ASSERT(blkptrs are zero); */
543 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
544 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
546 ASSERT(dn
->dn_free_txg
> 0);
547 if (dn
->dn_allocated_txg
!= dn
->dn_free_txg
)
548 dmu_buf_will_dirty(&dn
->dn_dbuf
->db
, tx
);
549 bzero(dn
->dn_phys
, sizeof (dnode_phys_t
) * dn
->dn_num_slots
);
550 dnode_free_interior_slots(dn
);
552 mutex_enter(&dn
->dn_mtx
);
553 dn
->dn_type
= DMU_OT_NONE
;
555 dn
->dn_allocated_txg
= 0;
557 dn
->dn_have_spill
= B_FALSE
;
558 dn
->dn_num_slots
= 1;
559 mutex_exit(&dn
->dn_mtx
);
561 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
563 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
565 * Now that we've released our hold, the dnode may
566 * be evicted, so we mustn't access it.
571 * Write out the dnode's dirty buffers.
574 dnode_sync(dnode_t
*dn
, dmu_tx_t
*tx
)
576 objset_t
*os
= dn
->dn_objset
;
577 dnode_phys_t
*dnp
= dn
->dn_phys
;
578 int txgoff
= tx
->tx_txg
& TXG_MASK
;
579 list_t
*list
= &dn
->dn_dirty_records
[txgoff
];
580 ASSERTV(static const dnode_phys_t zerodn
= { 0 });
581 boolean_t kill_spill
= B_FALSE
;
583 ASSERT(dmu_tx_is_syncing(tx
));
584 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
|| dn
->dn_allocated_txg
);
585 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
||
586 bcmp(dnp
, &zerodn
, DNODE_MIN_SIZE
) == 0);
589 ASSERT(dn
->dn_dbuf
== NULL
|| arc_released(dn
->dn_dbuf
->db_buf
));
592 * Do user accounting if it is enabled and this is not
593 * an encrypted receive.
595 if (dmu_objset_userused_enabled(os
) &&
596 !DMU_OBJECT_IS_SPECIAL(dn
->dn_object
) &&
597 (!os
->os_encrypted
|| !dmu_objset_is_receiving(os
))) {
598 mutex_enter(&dn
->dn_mtx
);
599 dn
->dn_oldused
= DN_USED_BYTES(dn
->dn_phys
);
600 dn
->dn_oldflags
= dn
->dn_phys
->dn_flags
;
601 dn
->dn_phys
->dn_flags
|= DNODE_FLAG_USERUSED_ACCOUNTED
;
602 if (dmu_objset_userobjused_enabled(dn
->dn_objset
))
603 dn
->dn_phys
->dn_flags
|=
604 DNODE_FLAG_USEROBJUSED_ACCOUNTED
;
605 mutex_exit(&dn
->dn_mtx
);
606 dmu_objset_userquota_get_ids(dn
, B_FALSE
, tx
);
608 /* Once we account for it, we should always account for it */
609 ASSERT(!(dn
->dn_phys
->dn_flags
&
610 DNODE_FLAG_USERUSED_ACCOUNTED
));
611 ASSERT(!(dn
->dn_phys
->dn_flags
&
612 DNODE_FLAG_USEROBJUSED_ACCOUNTED
));
615 mutex_enter(&dn
->dn_mtx
);
616 if (dn
->dn_allocated_txg
== tx
->tx_txg
) {
617 /* The dnode is newly allocated or reallocated */
618 if (dnp
->dn_type
== DMU_OT_NONE
) {
619 /* this is a first alloc, not a realloc */
621 dnp
->dn_nblkptr
= dn
->dn_nblkptr
;
624 dnp
->dn_type
= dn
->dn_type
;
625 dnp
->dn_bonustype
= dn
->dn_bonustype
;
626 dnp
->dn_bonuslen
= dn
->dn_bonuslen
;
629 dnp
->dn_extra_slots
= dn
->dn_num_slots
- 1;
631 ASSERT(dnp
->dn_nlevels
> 1 ||
632 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
633 BP_IS_EMBEDDED(&dnp
->dn_blkptr
[0]) ||
634 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) ==
635 dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
);
636 ASSERT(dnp
->dn_nlevels
< 2 ||
637 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
638 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) == 1 << dnp
->dn_indblkshift
);
640 if (dn
->dn_next_type
[txgoff
] != 0) {
641 dnp
->dn_type
= dn
->dn_type
;
642 dn
->dn_next_type
[txgoff
] = 0;
645 if (dn
->dn_next_blksz
[txgoff
] != 0) {
646 ASSERT(P2PHASE(dn
->dn_next_blksz
[txgoff
],
647 SPA_MINBLOCKSIZE
) == 0);
648 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
649 dn
->dn_maxblkid
== 0 || list_head(list
) != NULL
||
650 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
==
651 dnp
->dn_datablkszsec
||
652 !range_tree_is_empty(dn
->dn_free_ranges
[txgoff
]));
653 dnp
->dn_datablkszsec
=
654 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
;
655 dn
->dn_next_blksz
[txgoff
] = 0;
658 if (dn
->dn_next_bonuslen
[txgoff
] != 0) {
659 if (dn
->dn_next_bonuslen
[txgoff
] == DN_ZERO_BONUSLEN
)
660 dnp
->dn_bonuslen
= 0;
662 dnp
->dn_bonuslen
= dn
->dn_next_bonuslen
[txgoff
];
663 ASSERT(dnp
->dn_bonuslen
<=
664 DN_SLOTS_TO_BONUSLEN(dnp
->dn_extra_slots
+ 1));
665 dn
->dn_next_bonuslen
[txgoff
] = 0;
668 if (dn
->dn_next_bonustype
[txgoff
] != 0) {
669 ASSERT(DMU_OT_IS_VALID(dn
->dn_next_bonustype
[txgoff
]));
670 dnp
->dn_bonustype
= dn
->dn_next_bonustype
[txgoff
];
671 dn
->dn_next_bonustype
[txgoff
] = 0;
674 boolean_t freeing_dnode
= dn
->dn_free_txg
> 0 &&
675 dn
->dn_free_txg
<= tx
->tx_txg
;
678 * Remove the spill block if we have been explicitly asked to
679 * remove it, or if the object is being removed.
681 if (dn
->dn_rm_spillblk
[txgoff
] || freeing_dnode
) {
682 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
)
684 dn
->dn_rm_spillblk
[txgoff
] = 0;
687 if (dn
->dn_next_indblkshift
[txgoff
] != 0) {
688 ASSERT(dnp
->dn_nlevels
== 1);
689 dnp
->dn_indblkshift
= dn
->dn_next_indblkshift
[txgoff
];
690 dn
->dn_next_indblkshift
[txgoff
] = 0;
694 * Just take the live (open-context) values for checksum and compress.
695 * Strictly speaking it's a future leak, but nothing bad happens if we
696 * start using the new checksum or compress algorithm a little early.
698 dnp
->dn_checksum
= dn
->dn_checksum
;
699 dnp
->dn_compress
= dn
->dn_compress
;
701 mutex_exit(&dn
->dn_mtx
);
704 free_blocks(dn
, DN_SPILL_BLKPTR(dn
->dn_phys
), 1, tx
);
705 mutex_enter(&dn
->dn_mtx
);
706 dnp
->dn_flags
&= ~DNODE_FLAG_SPILL_BLKPTR
;
707 mutex_exit(&dn
->dn_mtx
);
710 /* process all the "freed" ranges in the file */
711 if (dn
->dn_free_ranges
[txgoff
] != NULL
) {
712 dnode_sync_free_range_arg_t dsfra
;
713 dsfra
.dsfra_dnode
= dn
;
715 mutex_enter(&dn
->dn_mtx
);
716 range_tree_vacate(dn
->dn_free_ranges
[txgoff
],
717 dnode_sync_free_range
, &dsfra
);
718 range_tree_destroy(dn
->dn_free_ranges
[txgoff
]);
719 dn
->dn_free_ranges
[txgoff
] = NULL
;
720 mutex_exit(&dn
->dn_mtx
);
724 dn
->dn_objset
->os_freed_dnodes
++;
725 dnode_sync_free(dn
, tx
);
729 if (dn
->dn_num_slots
> DNODE_MIN_SLOTS
) {
730 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
731 mutex_enter(&ds
->ds_lock
);
732 ds
->ds_feature_activation_needed
[SPA_FEATURE_LARGE_DNODE
] =
734 mutex_exit(&ds
->ds_lock
);
737 if (dn
->dn_next_nlevels
[txgoff
]) {
738 dnode_increase_indirection(dn
, tx
);
739 dn
->dn_next_nlevels
[txgoff
] = 0;
743 * This must be done after dnode_sync_free_range()
744 * and dnode_increase_indirection().
746 if (dn
->dn_next_maxblkid
[txgoff
]) {
747 mutex_enter(&dn
->dn_mtx
);
748 dnp
->dn_maxblkid
= dn
->dn_next_maxblkid
[txgoff
];
749 dn
->dn_next_maxblkid
[txgoff
] = 0;
750 mutex_exit(&dn
->dn_mtx
);
753 if (dn
->dn_next_nblkptr
[txgoff
]) {
754 /* this should only happen on a realloc */
755 ASSERT(dn
->dn_allocated_txg
== tx
->tx_txg
);
756 if (dn
->dn_next_nblkptr
[txgoff
] > dnp
->dn_nblkptr
) {
757 /* zero the new blkptrs we are gaining */
758 bzero(dnp
->dn_blkptr
+ dnp
->dn_nblkptr
,
760 (dn
->dn_next_nblkptr
[txgoff
] - dnp
->dn_nblkptr
));
764 ASSERT(dn
->dn_next_nblkptr
[txgoff
] < dnp
->dn_nblkptr
);
765 /* the blkptrs we are losing better be unallocated */
766 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
767 if (i
>= dn
->dn_next_nblkptr
[txgoff
])
768 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[i
]));
772 mutex_enter(&dn
->dn_mtx
);
773 dnp
->dn_nblkptr
= dn
->dn_next_nblkptr
[txgoff
];
774 dn
->dn_next_nblkptr
[txgoff
] = 0;
775 mutex_exit(&dn
->dn_mtx
);
778 dbuf_sync_list(list
, dn
->dn_phys
->dn_nlevels
- 1, tx
);
780 if (!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
)) {
781 ASSERT3P(list_head(list
), ==, NULL
);
782 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
786 * Although we have dropped our reference to the dnode, it
787 * can't be evicted until its written, and we haven't yet
788 * initiated the IO for the dnode's dbuf.