4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 #include <sys/zfs_context.h>
30 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dmu_recv.h>
35 #include <sys/dsl_dataset.h>
37 #include <sys/range_tree.h>
38 #include <sys/zfeature.h>
41 dnode_increase_indirection(dnode_t
*dn
, dmu_tx_t
*tx
)
44 int txgoff
= tx
->tx_txg
& TXG_MASK
;
45 int nblkptr
= dn
->dn_phys
->dn_nblkptr
;
46 int old_toplvl
= dn
->dn_phys
->dn_nlevels
- 1;
47 int new_level
= dn
->dn_next_nlevels
[txgoff
];
50 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
52 /* this dnode can't be paged out because it's dirty */
53 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
54 ASSERT(RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
55 ASSERT(new_level
> 1 && dn
->dn_phys
->dn_nlevels
> 0);
57 db
= dbuf_hold_level(dn
, dn
->dn_phys
->dn_nlevels
, 0, FTAG
);
60 dn
->dn_phys
->dn_nlevels
= new_level
;
61 dprintf("os=%p obj=%llu, increase to %d\n", dn
->dn_objset
,
62 dn
->dn_object
, dn
->dn_phys
->dn_nlevels
);
64 /* transfer dnode's block pointers to new indirect block */
65 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
|DB_RF_HAVESTRUCT
);
66 ASSERT(db
->db
.db_data
);
67 ASSERT(arc_released(db
->db_buf
));
68 ASSERT3U(sizeof (blkptr_t
) * nblkptr
, <=, db
->db
.db_size
);
69 bcopy(dn
->dn_phys
->dn_blkptr
, db
->db
.db_data
,
70 sizeof (blkptr_t
) * nblkptr
);
71 arc_buf_freeze(db
->db_buf
);
73 /* set dbuf's parent pointers to new indirect buf */
74 for (i
= 0; i
< nblkptr
; i
++) {
75 dmu_buf_impl_t
*child
=
76 dbuf_find(dn
->dn_objset
, dn
->dn_object
, old_toplvl
, i
);
81 DB_DNODE_ENTER(child
);
82 ASSERT3P(DB_DNODE(child
), ==, dn
);
85 if (child
->db_parent
&& child
->db_parent
!= dn
->dn_dbuf
) {
86 ASSERT(child
->db_parent
->db_level
== db
->db_level
);
87 ASSERT(child
->db_blkptr
!=
88 &dn
->dn_phys
->dn_blkptr
[child
->db_blkid
]);
89 mutex_exit(&child
->db_mtx
);
92 ASSERT(child
->db_parent
== NULL
||
93 child
->db_parent
== dn
->dn_dbuf
);
95 child
->db_parent
= db
;
96 dbuf_add_ref(db
, child
);
98 child
->db_blkptr
= (blkptr_t
*)db
->db
.db_data
+ i
;
100 child
->db_blkptr
= NULL
;
101 dprintf_dbuf_bp(child
, child
->db_blkptr
,
102 "changed db_blkptr to new indirect %s", "");
104 mutex_exit(&child
->db_mtx
);
107 bzero(dn
->dn_phys
->dn_blkptr
, sizeof (blkptr_t
) * nblkptr
);
111 rw_exit(&dn
->dn_struct_rwlock
);
115 free_blocks(dnode_t
*dn
, blkptr_t
*bp
, int num
, dmu_tx_t
*tx
)
117 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
118 uint64_t bytesfreed
= 0;
120 dprintf("ds=%p obj=%llx num=%d\n", ds
, dn
->dn_object
, num
);
122 for (int i
= 0; i
< num
; i
++, bp
++) {
126 bytesfreed
+= dsl_dataset_block_kill(ds
, bp
, tx
, B_FALSE
);
127 ASSERT3U(bytesfreed
, <=, DN_USED_BYTES(dn
->dn_phys
));
130 * Save some useful information on the holes being
131 * punched, including logical size, type, and indirection
132 * level. Retaining birth time enables detection of when
133 * holes are punched for reducing the number of free
134 * records transmitted during a zfs send.
137 uint64_t lsize
= BP_GET_LSIZE(bp
);
138 dmu_object_type_t type
= BP_GET_TYPE(bp
);
139 uint64_t lvl
= BP_GET_LEVEL(bp
);
141 bzero(bp
, sizeof (blkptr_t
));
143 if (spa_feature_is_active(dn
->dn_objset
->os_spa
,
144 SPA_FEATURE_HOLE_BIRTH
)) {
145 BP_SET_LSIZE(bp
, lsize
);
146 BP_SET_TYPE(bp
, type
);
147 BP_SET_LEVEL(bp
, lvl
);
148 BP_SET_BIRTH(bp
, dmu_tx_get_txg(tx
), 0);
151 dnode_diduse_space(dn
, -bytesfreed
);
156 free_verify(dmu_buf_impl_t
*db
, uint64_t start
, uint64_t end
, dmu_tx_t
*tx
)
160 uint64_t txg
= tx
->tx_txg
;
165 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
166 off
= start
- (db
->db_blkid
* 1<<epbs
);
167 num
= end
- start
+ 1;
169 ASSERT3U(off
, >=, 0);
170 ASSERT3U(num
, >=, 0);
171 ASSERT3U(db
->db_level
, >, 0);
172 ASSERT3U(db
->db
.db_size
, ==, 1 << dn
->dn_phys
->dn_indblkshift
);
173 ASSERT3U(off
+num
, <=, db
->db
.db_size
>> SPA_BLKPTRSHIFT
);
174 ASSERT(db
->db_blkptr
!= NULL
);
176 for (i
= off
; i
< off
+num
; i
++) {
178 dmu_buf_impl_t
*child
;
179 dbuf_dirty_record_t
*dr
;
182 ASSERT(db
->db_level
== 1);
184 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
185 err
= dbuf_hold_impl(dn
, db
->db_level
-1,
186 (db
->db_blkid
<< epbs
) + i
, TRUE
, FALSE
, FTAG
, &child
);
187 rw_exit(&dn
->dn_struct_rwlock
);
191 ASSERT(child
->db_level
== 0);
192 dr
= child
->db_last_dirty
;
193 while (dr
&& dr
->dr_txg
> txg
)
195 ASSERT(dr
== NULL
|| dr
->dr_txg
== txg
);
197 /* data_old better be zeroed */
199 buf
= dr
->dt
.dl
.dr_data
->b_data
;
200 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
202 panic("freed data not zero: "
203 "child=%p i=%d off=%d num=%d\n",
204 (void *)child
, i
, off
, num
);
210 * db_data better be zeroed unless it's dirty in a
213 mutex_enter(&child
->db_mtx
);
214 buf
= child
->db
.db_data
;
215 if (buf
!= NULL
&& child
->db_state
!= DB_FILL
&&
216 child
->db_last_dirty
== NULL
) {
217 for (j
= 0; j
< child
->db
.db_size
>> 3; j
++) {
219 panic("freed data not zero: "
220 "child=%p i=%d off=%d num=%d\n",
221 (void *)child
, i
, off
, num
);
225 mutex_exit(&child
->db_mtx
);
227 dbuf_rele(child
, FTAG
);
234 * We don't usually free the indirect blocks here. If in one txg we have a
235 * free_range and a write to the same indirect block, it's important that we
236 * preserve the hole's birth times. Therefore, we don't free any any indirect
237 * blocks in free_children(). If an indirect block happens to turn into all
238 * holes, it will be freed by dbuf_write_children_ready, which happens at a
239 * point in the syncing process where we know for certain the contents of the
242 * However, if we're freeing a dnode, its space accounting must go to zero
243 * before we actually try to free the dnode, or we will trip an assertion. In
244 * addition, we know the case described above cannot occur, because the dnode is
245 * being freed. Therefore, we free the indirect blocks immediately in that
249 free_children(dmu_buf_impl_t
*db
, uint64_t blkid
, uint64_t nblks
,
250 boolean_t free_indirects
, dmu_tx_t
*tx
)
254 dmu_buf_impl_t
*subdb
;
255 uint64_t start
, end
, dbstart
, dbend
;
256 unsigned int epbs
, shift
, i
;
259 * There is a small possibility that this block will not be cached:
260 * 1 - if level > 1 and there are no children with level <= 1
261 * 2 - if this block was evicted since we read it from
262 * dmu_tx_hold_free().
264 if (db
->db_state
!= DB_CACHED
)
265 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
);
268 * If we modify this indirect block, and we are not freeing the
269 * dnode (!free_indirects), then this indirect block needs to get
270 * written to disk by dbuf_write(). If it is dirty, we know it will
271 * be written (otherwise, we would have incorrect on-disk state
272 * because the space would be freed but still referenced by the BP
273 * in this indirect block). Therefore we VERIFY that it is
276 * Our VERIFY covers some cases that do not actually have to be
277 * dirty, but the open-context code happens to dirty. E.g. if the
278 * blocks we are freeing are all holes, because in that case, we
279 * are only freeing part of this indirect block, so it is an
280 * ancestor of the first or last block to be freed. The first and
281 * last L1 indirect blocks are always dirtied by dnode_free_range().
283 VERIFY(BP_GET_FILL(db
->db_blkptr
) == 0 || db
->db_dirtycnt
> 0);
290 epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
291 ASSERT3U(epbs
, <, 31);
292 shift
= (db
->db_level
- 1) * epbs
;
293 dbstart
= db
->db_blkid
<< epbs
;
294 start
= blkid
>> shift
;
295 if (dbstart
< start
) {
296 bp
+= start
- dbstart
;
300 dbend
= ((db
->db_blkid
+ 1) << epbs
) - 1;
301 end
= (blkid
+ nblks
- 1) >> shift
;
305 ASSERT3U(start
, <=, end
);
307 if (db
->db_level
== 1) {
308 FREE_VERIFY(db
, start
, end
, tx
);
309 free_blocks(dn
, bp
, end
-start
+1, tx
);
311 for (uint64_t id
= start
; id
<= end
; id
++, bp
++) {
314 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
315 VERIFY0(dbuf_hold_impl(dn
, db
->db_level
- 1,
316 id
, TRUE
, FALSE
, FTAG
, &subdb
));
317 rw_exit(&dn
->dn_struct_rwlock
);
318 ASSERT3P(bp
, ==, subdb
->db_blkptr
);
320 free_children(subdb
, blkid
, nblks
, free_indirects
, tx
);
321 dbuf_rele(subdb
, FTAG
);
325 if (free_indirects
) {
326 for (i
= 0, bp
= db
->db
.db_data
; i
< 1 << epbs
; i
++, bp
++)
327 ASSERT(BP_IS_HOLE(bp
));
328 bzero(db
->db
.db_data
, db
->db
.db_size
);
329 free_blocks(dn
, db
->db_blkptr
, 1, tx
);
333 arc_buf_freeze(db
->db_buf
);
337 * Traverse the indicated range of the provided file
338 * and "free" all the blocks contained there.
341 dnode_sync_free_range_impl(dnode_t
*dn
, uint64_t blkid
, uint64_t nblks
,
342 boolean_t free_indirects
, dmu_tx_t
*tx
)
344 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
345 int dnlevel
= dn
->dn_phys
->dn_nlevels
;
346 boolean_t trunc
= B_FALSE
;
348 if (blkid
> dn
->dn_phys
->dn_maxblkid
)
351 ASSERT(dn
->dn_phys
->dn_maxblkid
< UINT64_MAX
);
352 if (blkid
+ nblks
> dn
->dn_phys
->dn_maxblkid
) {
353 nblks
= dn
->dn_phys
->dn_maxblkid
- blkid
+ 1;
357 /* There are no indirect blocks in the object */
359 if (blkid
>= dn
->dn_phys
->dn_nblkptr
) {
360 /* this range was never made persistent */
363 ASSERT3U(blkid
+ nblks
, <=, dn
->dn_phys
->dn_nblkptr
);
364 free_blocks(dn
, bp
+ blkid
, nblks
, tx
);
366 int shift
= (dnlevel
- 1) *
367 (dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
);
368 int start
= blkid
>> shift
;
369 int end
= (blkid
+ nblks
- 1) >> shift
;
372 ASSERT(start
< dn
->dn_phys
->dn_nblkptr
);
374 for (int i
= start
; i
<= end
; i
++, bp
++) {
377 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
378 VERIFY0(dbuf_hold_impl(dn
, dnlevel
- 1, i
,
379 TRUE
, FALSE
, FTAG
, &db
));
380 rw_exit(&dn
->dn_struct_rwlock
);
382 free_children(db
, blkid
, nblks
, free_indirects
, tx
);
388 * Do not truncate the maxblkid if we are performing a raw
389 * receive. The raw receive sets the mablkid manually and
390 * must not be overriden.
392 if (trunc
&& !dn
->dn_objset
->os_raw_receive
) {
393 ASSERTV(uint64_t off
);
394 dn
->dn_phys
->dn_maxblkid
= blkid
== 0 ? 0 : blkid
- 1;
396 ASSERTV(off
= (dn
->dn_phys
->dn_maxblkid
+ 1) *
397 (dn
->dn_phys
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
));
398 ASSERT(off
< dn
->dn_phys
->dn_maxblkid
||
399 dn
->dn_phys
->dn_maxblkid
== 0 ||
400 dnode_next_offset(dn
, 0, &off
, 1, 1, 0) != 0);
404 typedef struct dnode_sync_free_range_arg
{
405 dnode_t
*dsfra_dnode
;
407 boolean_t dsfra_free_indirects
;
408 } dnode_sync_free_range_arg_t
;
411 dnode_sync_free_range(void *arg
, uint64_t blkid
, uint64_t nblks
)
413 dnode_sync_free_range_arg_t
*dsfra
= arg
;
414 dnode_t
*dn
= dsfra
->dsfra_dnode
;
416 mutex_exit(&dn
->dn_mtx
);
417 dnode_sync_free_range_impl(dn
, blkid
, nblks
,
418 dsfra
->dsfra_free_indirects
, dsfra
->dsfra_tx
);
419 mutex_enter(&dn
->dn_mtx
);
423 * Try to kick all the dnode's dbufs out of the cache...
426 dnode_evict_dbufs(dnode_t
*dn
)
428 dmu_buf_impl_t
*db_marker
;
429 dmu_buf_impl_t
*db
, *db_next
;
431 db_marker
= kmem_alloc(sizeof (dmu_buf_impl_t
), KM_SLEEP
);
433 mutex_enter(&dn
->dn_dbufs_mtx
);
434 for (db
= avl_first(&dn
->dn_dbufs
); db
!= NULL
; db
= db_next
) {
438 ASSERT3P(DB_DNODE(db
), ==, dn
);
442 mutex_enter(&db
->db_mtx
);
443 if (db
->db_state
!= DB_EVICTING
&&
444 zfs_refcount_is_zero(&db
->db_holds
)) {
445 db_marker
->db_level
= db
->db_level
;
446 db_marker
->db_blkid
= db
->db_blkid
;
447 db_marker
->db_state
= DB_SEARCH
;
448 avl_insert_here(&dn
->dn_dbufs
, db_marker
, db
,
452 * We need to use the "marker" dbuf rather than
453 * simply getting the next dbuf, because
454 * dbuf_destroy() may actually remove multiple dbufs.
455 * It can call itself recursively on the parent dbuf,
456 * which may also be removed from dn_dbufs. The code
457 * flow would look like:
460 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
461 * if (!cacheable || pending_evict)
466 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db_marker
);
467 avl_remove(&dn
->dn_dbufs
, db_marker
);
469 db
->db_pending_evict
= TRUE
;
470 mutex_exit(&db
->db_mtx
);
471 db_next
= AVL_NEXT(&dn
->dn_dbufs
, db
);
474 mutex_exit(&dn
->dn_dbufs_mtx
);
476 kmem_free(db_marker
, sizeof (dmu_buf_impl_t
));
478 dnode_evict_bonus(dn
);
482 dnode_evict_bonus(dnode_t
*dn
)
484 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
485 if (dn
->dn_bonus
!= NULL
) {
486 if (zfs_refcount_is_zero(&dn
->dn_bonus
->db_holds
)) {
487 mutex_enter(&dn
->dn_bonus
->db_mtx
);
488 dbuf_destroy(dn
->dn_bonus
);
491 dn
->dn_bonus
->db_pending_evict
= TRUE
;
494 rw_exit(&dn
->dn_struct_rwlock
);
498 dnode_undirty_dbufs(list_t
*list
)
500 dbuf_dirty_record_t
*dr
;
502 while ((dr
= list_head(list
))) {
503 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
504 uint64_t txg
= dr
->dr_txg
;
506 if (db
->db_level
!= 0)
507 dnode_undirty_dbufs(&dr
->dt
.di
.dr_children
);
509 mutex_enter(&db
->db_mtx
);
510 /* XXX - use dbuf_undirty()? */
511 list_remove(list
, dr
);
512 ASSERT(db
->db_last_dirty
== dr
);
513 db
->db_last_dirty
= NULL
;
514 db
->db_dirtycnt
-= 1;
515 if (db
->db_level
== 0) {
516 ASSERT(db
->db_blkid
== DMU_BONUS_BLKID
||
517 dr
->dt
.dl
.dr_data
== db
->db_buf
);
520 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
521 list_destroy(&dr
->dt
.di
.dr_children
);
523 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
524 dbuf_rele_and_unlock(db
, (void *)(uintptr_t)txg
, B_FALSE
);
529 dnode_sync_free(dnode_t
*dn
, dmu_tx_t
*tx
)
531 int txgoff
= tx
->tx_txg
& TXG_MASK
;
533 ASSERT(dmu_tx_is_syncing(tx
));
536 * Our contents should have been freed in dnode_sync() by the
537 * free range record inserted by the caller of dnode_free().
539 ASSERT0(DN_USED_BYTES(dn
->dn_phys
));
540 ASSERT(BP_IS_HOLE(dn
->dn_phys
->dn_blkptr
));
542 dnode_undirty_dbufs(&dn
->dn_dirty_records
[txgoff
]);
543 dnode_evict_dbufs(dn
);
546 * XXX - It would be nice to assert this, but we may still
547 * have residual holds from async evictions from the arc...
549 * zfs_obj_to_path() also depends on this being
552 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
555 /* Undirty next bits */
556 dn
->dn_next_nlevels
[txgoff
] = 0;
557 dn
->dn_next_indblkshift
[txgoff
] = 0;
558 dn
->dn_next_blksz
[txgoff
] = 0;
559 dn
->dn_next_maxblkid
[txgoff
] = 0;
561 /* ASSERT(blkptrs are zero); */
562 ASSERT(dn
->dn_phys
->dn_type
!= DMU_OT_NONE
);
563 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
565 ASSERT(dn
->dn_free_txg
> 0);
566 if (dn
->dn_allocated_txg
!= dn
->dn_free_txg
)
567 dmu_buf_will_dirty(&dn
->dn_dbuf
->db
, tx
);
568 bzero(dn
->dn_phys
, sizeof (dnode_phys_t
) * dn
->dn_num_slots
);
569 dnode_free_interior_slots(dn
);
571 mutex_enter(&dn
->dn_mtx
);
572 dn
->dn_type
= DMU_OT_NONE
;
574 dn
->dn_allocated_txg
= 0;
576 dn
->dn_have_spill
= B_FALSE
;
577 dn
->dn_num_slots
= 1;
578 mutex_exit(&dn
->dn_mtx
);
580 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
582 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
584 * Now that we've released our hold, the dnode may
585 * be evicted, so we mustn't access it.
590 * Write out the dnode's dirty buffers.
593 dnode_sync(dnode_t
*dn
, dmu_tx_t
*tx
)
595 objset_t
*os
= dn
->dn_objset
;
596 dnode_phys_t
*dnp
= dn
->dn_phys
;
597 int txgoff
= tx
->tx_txg
& TXG_MASK
;
598 list_t
*list
= &dn
->dn_dirty_records
[txgoff
];
599 ASSERTV(static const dnode_phys_t zerodn
= { 0 });
600 boolean_t kill_spill
= B_FALSE
;
602 ASSERT(dmu_tx_is_syncing(tx
));
603 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
|| dn
->dn_allocated_txg
);
604 ASSERT(dnp
->dn_type
!= DMU_OT_NONE
||
605 bcmp(dnp
, &zerodn
, DNODE_MIN_SIZE
) == 0);
608 ASSERT(dn
->dn_dbuf
== NULL
|| arc_released(dn
->dn_dbuf
->db_buf
));
611 * Do user accounting if it is enabled and this is not
612 * an encrypted receive.
614 if (dmu_objset_userused_enabled(os
) &&
615 !DMU_OBJECT_IS_SPECIAL(dn
->dn_object
) &&
616 (!os
->os_encrypted
|| !dmu_objset_is_receiving(os
))) {
617 mutex_enter(&dn
->dn_mtx
);
618 dn
->dn_oldused
= DN_USED_BYTES(dn
->dn_phys
);
619 dn
->dn_oldflags
= dn
->dn_phys
->dn_flags
;
620 dn
->dn_phys
->dn_flags
|= DNODE_FLAG_USERUSED_ACCOUNTED
;
621 if (dmu_objset_userobjused_enabled(dn
->dn_objset
))
622 dn
->dn_phys
->dn_flags
|=
623 DNODE_FLAG_USEROBJUSED_ACCOUNTED
;
624 mutex_exit(&dn
->dn_mtx
);
625 dmu_objset_userquota_get_ids(dn
, B_FALSE
, tx
);
627 /* Once we account for it, we should always account for it */
628 ASSERT(!(dn
->dn_phys
->dn_flags
&
629 DNODE_FLAG_USERUSED_ACCOUNTED
));
630 ASSERT(!(dn
->dn_phys
->dn_flags
&
631 DNODE_FLAG_USEROBJUSED_ACCOUNTED
));
634 mutex_enter(&dn
->dn_mtx
);
635 if (dn
->dn_allocated_txg
== tx
->tx_txg
) {
636 /* The dnode is newly allocated or reallocated */
637 if (dnp
->dn_type
== DMU_OT_NONE
) {
638 /* this is a first alloc, not a realloc */
640 dnp
->dn_nblkptr
= dn
->dn_nblkptr
;
643 dnp
->dn_type
= dn
->dn_type
;
644 dnp
->dn_bonustype
= dn
->dn_bonustype
;
645 dnp
->dn_bonuslen
= dn
->dn_bonuslen
;
648 dnp
->dn_extra_slots
= dn
->dn_num_slots
- 1;
650 ASSERT(dnp
->dn_nlevels
> 1 ||
651 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
652 BP_IS_EMBEDDED(&dnp
->dn_blkptr
[0]) ||
653 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) ==
654 dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
);
655 ASSERT(dnp
->dn_nlevels
< 2 ||
656 BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
657 BP_GET_LSIZE(&dnp
->dn_blkptr
[0]) == 1 << dnp
->dn_indblkshift
);
659 if (dn
->dn_next_type
[txgoff
] != 0) {
660 dnp
->dn_type
= dn
->dn_type
;
661 dn
->dn_next_type
[txgoff
] = 0;
664 if (dn
->dn_next_blksz
[txgoff
] != 0) {
665 ASSERT(P2PHASE(dn
->dn_next_blksz
[txgoff
],
666 SPA_MINBLOCKSIZE
) == 0);
667 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[0]) ||
668 dn
->dn_maxblkid
== 0 || list_head(list
) != NULL
||
669 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
==
670 dnp
->dn_datablkszsec
||
671 !range_tree_is_empty(dn
->dn_free_ranges
[txgoff
]));
672 dnp
->dn_datablkszsec
=
673 dn
->dn_next_blksz
[txgoff
] >> SPA_MINBLOCKSHIFT
;
674 dn
->dn_next_blksz
[txgoff
] = 0;
677 if (dn
->dn_next_bonuslen
[txgoff
] != 0) {
678 if (dn
->dn_next_bonuslen
[txgoff
] == DN_ZERO_BONUSLEN
)
679 dnp
->dn_bonuslen
= 0;
681 dnp
->dn_bonuslen
= dn
->dn_next_bonuslen
[txgoff
];
682 ASSERT(dnp
->dn_bonuslen
<=
683 DN_SLOTS_TO_BONUSLEN(dnp
->dn_extra_slots
+ 1));
684 dn
->dn_next_bonuslen
[txgoff
] = 0;
687 if (dn
->dn_next_bonustype
[txgoff
] != 0) {
688 ASSERT(DMU_OT_IS_VALID(dn
->dn_next_bonustype
[txgoff
]));
689 dnp
->dn_bonustype
= dn
->dn_next_bonustype
[txgoff
];
690 dn
->dn_next_bonustype
[txgoff
] = 0;
693 boolean_t freeing_dnode
= dn
->dn_free_txg
> 0 &&
694 dn
->dn_free_txg
<= tx
->tx_txg
;
697 * Remove the spill block if we have been explicitly asked to
698 * remove it, or if the object is being removed.
700 if (dn
->dn_rm_spillblk
[txgoff
] || freeing_dnode
) {
701 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
)
703 dn
->dn_rm_spillblk
[txgoff
] = 0;
706 if (dn
->dn_next_indblkshift
[txgoff
] != 0) {
707 ASSERT(dnp
->dn_nlevels
== 1);
708 dnp
->dn_indblkshift
= dn
->dn_next_indblkshift
[txgoff
];
709 dn
->dn_next_indblkshift
[txgoff
] = 0;
713 * Just take the live (open-context) values for checksum and compress.
714 * Strictly speaking it's a future leak, but nothing bad happens if we
715 * start using the new checksum or compress algorithm a little early.
717 dnp
->dn_checksum
= dn
->dn_checksum
;
718 dnp
->dn_compress
= dn
->dn_compress
;
720 mutex_exit(&dn
->dn_mtx
);
723 free_blocks(dn
, DN_SPILL_BLKPTR(dn
->dn_phys
), 1, tx
);
724 mutex_enter(&dn
->dn_mtx
);
725 dnp
->dn_flags
&= ~DNODE_FLAG_SPILL_BLKPTR
;
726 mutex_exit(&dn
->dn_mtx
);
729 /* process all the "freed" ranges in the file */
730 if (dn
->dn_free_ranges
[txgoff
] != NULL
) {
731 dnode_sync_free_range_arg_t dsfra
;
732 dsfra
.dsfra_dnode
= dn
;
734 dsfra
.dsfra_free_indirects
= freeing_dnode
;
736 ASSERT(range_tree_contains(dn
->dn_free_ranges
[txgoff
],
737 0, dn
->dn_maxblkid
+ 1));
739 mutex_enter(&dn
->dn_mtx
);
740 range_tree_vacate(dn
->dn_free_ranges
[txgoff
],
741 dnode_sync_free_range
, &dsfra
);
742 range_tree_destroy(dn
->dn_free_ranges
[txgoff
]);
743 dn
->dn_free_ranges
[txgoff
] = NULL
;
744 mutex_exit(&dn
->dn_mtx
);
748 dn
->dn_objset
->os_freed_dnodes
++;
749 dnode_sync_free(dn
, tx
);
753 if (dn
->dn_num_slots
> DNODE_MIN_SLOTS
) {
754 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
755 mutex_enter(&ds
->ds_lock
);
756 ds
->ds_feature_activation_needed
[SPA_FEATURE_LARGE_DNODE
] =
758 mutex_exit(&ds
->ds_lock
);
761 if (dn
->dn_next_nlevels
[txgoff
]) {
762 dnode_increase_indirection(dn
, tx
);
763 dn
->dn_next_nlevels
[txgoff
] = 0;
767 * This must be done after dnode_sync_free_range()
768 * and dnode_increase_indirection().
770 if (dn
->dn_next_maxblkid
[txgoff
]) {
771 mutex_enter(&dn
->dn_mtx
);
772 dnp
->dn_maxblkid
= dn
->dn_next_maxblkid
[txgoff
];
773 dn
->dn_next_maxblkid
[txgoff
] = 0;
774 mutex_exit(&dn
->dn_mtx
);
777 if (dn
->dn_next_nblkptr
[txgoff
]) {
778 /* this should only happen on a realloc */
779 ASSERT(dn
->dn_allocated_txg
== tx
->tx_txg
);
780 if (dn
->dn_next_nblkptr
[txgoff
] > dnp
->dn_nblkptr
) {
781 /* zero the new blkptrs we are gaining */
782 bzero(dnp
->dn_blkptr
+ dnp
->dn_nblkptr
,
784 (dn
->dn_next_nblkptr
[txgoff
] - dnp
->dn_nblkptr
));
788 ASSERT(dn
->dn_next_nblkptr
[txgoff
] < dnp
->dn_nblkptr
);
789 /* the blkptrs we are losing better be unallocated */
790 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
791 if (i
>= dn
->dn_next_nblkptr
[txgoff
])
792 ASSERT(BP_IS_HOLE(&dnp
->dn_blkptr
[i
]));
796 mutex_enter(&dn
->dn_mtx
);
797 dnp
->dn_nblkptr
= dn
->dn_next_nblkptr
[txgoff
];
798 dn
->dn_next_nblkptr
[txgoff
] = 0;
799 mutex_exit(&dn
->dn_mtx
);
802 dbuf_sync_list(list
, dn
->dn_phys
->dn_nlevels
- 1, tx
);
804 if (!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
)) {
805 ASSERT3P(list_head(list
), ==, NULL
);
806 dnode_rele(dn
, (void *)(uintptr_t)tx
->tx_txg
);
810 * Although we have dropped our reference to the dnode, it
811 * can't be evicted until its written, and we haven't yet
812 * initiated the IO for the dnode's dbuf.