4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
27 #include <sys/dmu_impl.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dmu_tx.h>
35 #include <sys/dmu_zfetch.h>
37 #include <sys/sa_impl.h>
39 struct dbuf_hold_impl_data
{
40 /* Function arguments */
46 dmu_buf_impl_t
**dh_dbp
;
48 dmu_buf_impl_t
*dh_db
;
49 dmu_buf_impl_t
*dh_parent
;
52 dbuf_dirty_record_t
*dh_dr
;
53 arc_buf_contents_t dh_type
;
57 static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data
*dh
,
58 dnode_t
*dn
, uint8_t level
, uint64_t blkid
, int fail_sparse
,
59 void *tag
, dmu_buf_impl_t
**dbp
, int depth
);
60 static int __dbuf_hold_impl(struct dbuf_hold_impl_data
*dh
);
62 static void dbuf_destroy(dmu_buf_impl_t
*db
);
63 static int dbuf_undirty(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
);
64 static void dbuf_write(dbuf_dirty_record_t
*dr
, arc_buf_t
*data
, dmu_tx_t
*tx
);
67 * Global data structures and functions for the dbuf cache.
69 static kmem_cache_t
*dbuf_cache
;
73 dbuf_cons(void *vdb
, void *unused
, int kmflag
)
75 dmu_buf_impl_t
*db
= vdb
;
76 bzero(db
, sizeof (dmu_buf_impl_t
));
78 mutex_init(&db
->db_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
79 cv_init(&db
->db_changed
, NULL
, CV_DEFAULT
, NULL
);
80 refcount_create(&db
->db_holds
);
81 list_link_init(&db
->db_link
);
87 dbuf_dest(void *vdb
, void *unused
)
89 dmu_buf_impl_t
*db
= vdb
;
90 mutex_destroy(&db
->db_mtx
);
91 cv_destroy(&db
->db_changed
);
92 refcount_destroy(&db
->db_holds
);
96 * dbuf hash table routines
98 static dbuf_hash_table_t dbuf_hash_table
;
100 static uint64_t dbuf_hash_count
;
103 dbuf_hash(void *os
, uint64_t obj
, uint8_t lvl
, uint64_t blkid
)
105 uintptr_t osv
= (uintptr_t)os
;
106 uint64_t crc
= -1ULL;
108 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
109 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (lvl
)) & 0xFF];
110 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (osv
>> 6)) & 0xFF];
111 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 0)) & 0xFF];
112 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 8)) & 0xFF];
113 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (blkid
>> 0)) & 0xFF];
114 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (blkid
>> 8)) & 0xFF];
116 crc
^= (osv
>>14) ^ (obj
>>16) ^ (blkid
>>16);
121 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
123 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
124 ((dbuf)->db.db_object == (obj) && \
125 (dbuf)->db_objset == (os) && \
126 (dbuf)->db_level == (level) && \
127 (dbuf)->db_blkid == (blkid))
130 dbuf_find(dnode_t
*dn
, uint8_t level
, uint64_t blkid
)
132 dbuf_hash_table_t
*h
= &dbuf_hash_table
;
133 objset_t
*os
= dn
->dn_objset
;
140 hv
= DBUF_HASH(os
, obj
, level
, blkid
);
141 idx
= hv
& h
->hash_table_mask
;
143 mutex_enter(DBUF_HASH_MUTEX(h
, idx
));
144 for (db
= h
->hash_table
[idx
]; db
!= NULL
; db
= db
->db_hash_next
) {
145 if (DBUF_EQUAL(db
, os
, obj
, level
, blkid
)) {
146 mutex_enter(&db
->db_mtx
);
147 if (db
->db_state
!= DB_EVICTING
) {
148 mutex_exit(DBUF_HASH_MUTEX(h
, idx
));
151 mutex_exit(&db
->db_mtx
);
154 mutex_exit(DBUF_HASH_MUTEX(h
, idx
));
159 * Insert an entry into the hash table. If there is already an element
160 * equal to elem in the hash table, then the already existing element
161 * will be returned and the new element will not be inserted.
162 * Otherwise returns NULL.
164 static dmu_buf_impl_t
*
165 dbuf_hash_insert(dmu_buf_impl_t
*db
)
167 dbuf_hash_table_t
*h
= &dbuf_hash_table
;
168 objset_t
*os
= db
->db_objset
;
169 uint64_t obj
= db
->db
.db_object
;
170 int level
= db
->db_level
;
171 uint64_t blkid
, hv
, idx
;
174 blkid
= db
->db_blkid
;
175 hv
= DBUF_HASH(os
, obj
, level
, blkid
);
176 idx
= hv
& h
->hash_table_mask
;
178 mutex_enter(DBUF_HASH_MUTEX(h
, idx
));
179 for (dbf
= h
->hash_table
[idx
]; dbf
!= NULL
; dbf
= dbf
->db_hash_next
) {
180 if (DBUF_EQUAL(dbf
, os
, obj
, level
, blkid
)) {
181 mutex_enter(&dbf
->db_mtx
);
182 if (dbf
->db_state
!= DB_EVICTING
) {
183 mutex_exit(DBUF_HASH_MUTEX(h
, idx
));
186 mutex_exit(&dbf
->db_mtx
);
190 mutex_enter(&db
->db_mtx
);
191 db
->db_hash_next
= h
->hash_table
[idx
];
192 h
->hash_table
[idx
] = db
;
193 mutex_exit(DBUF_HASH_MUTEX(h
, idx
));
194 atomic_add_64(&dbuf_hash_count
, 1);
200 * Remove an entry from the hash table. This operation will
201 * fail if there are any existing holds on the db.
204 dbuf_hash_remove(dmu_buf_impl_t
*db
)
206 dbuf_hash_table_t
*h
= &dbuf_hash_table
;
208 dmu_buf_impl_t
*dbf
, **dbp
;
210 hv
= DBUF_HASH(db
->db_objset
, db
->db
.db_object
,
211 db
->db_level
, db
->db_blkid
);
212 idx
= hv
& h
->hash_table_mask
;
215 * We musn't hold db_mtx to maintin lock ordering:
216 * DBUF_HASH_MUTEX > db_mtx.
218 ASSERT(refcount_is_zero(&db
->db_holds
));
219 ASSERT(db
->db_state
== DB_EVICTING
);
220 ASSERT(!MUTEX_HELD(&db
->db_mtx
));
222 mutex_enter(DBUF_HASH_MUTEX(h
, idx
));
223 dbp
= &h
->hash_table
[idx
];
224 while ((dbf
= *dbp
) != db
) {
225 dbp
= &dbf
->db_hash_next
;
228 *dbp
= db
->db_hash_next
;
229 db
->db_hash_next
= NULL
;
230 mutex_exit(DBUF_HASH_MUTEX(h
, idx
));
231 atomic_add_64(&dbuf_hash_count
, -1);
234 static arc_evict_func_t dbuf_do_evict
;
237 dbuf_evict_user(dmu_buf_impl_t
*db
)
239 ASSERT(MUTEX_HELD(&db
->db_mtx
));
241 if (db
->db_level
!= 0 || db
->db_evict_func
== NULL
)
244 if (db
->db_user_data_ptr_ptr
)
245 *db
->db_user_data_ptr_ptr
= db
->db
.db_data
;
246 db
->db_evict_func(&db
->db
, db
->db_user_ptr
);
247 db
->db_user_ptr
= NULL
;
248 db
->db_user_data_ptr_ptr
= NULL
;
249 db
->db_evict_func
= NULL
;
253 dbuf_is_metadata(dmu_buf_impl_t
*db
)
255 if (db
->db_level
> 0) {
258 boolean_t is_metadata
;
261 is_metadata
= dmu_ot
[DB_DNODE(db
)->dn_type
].ot_metadata
;
264 return (is_metadata
);
269 dbuf_evict(dmu_buf_impl_t
*db
)
271 ASSERT(MUTEX_HELD(&db
->db_mtx
));
272 ASSERT(db
->db_buf
== NULL
);
273 ASSERT(db
->db_data_pending
== NULL
);
282 uint64_t hsize
= 1ULL << 16;
283 dbuf_hash_table_t
*h
= &dbuf_hash_table
;
287 * The hash table is big enough to fill all of physical memory
288 * with an average 4K block size. The table will take up
289 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
291 while (hsize
* 4096 < physmem
* PAGESIZE
)
295 h
->hash_table_mask
= hsize
- 1;
296 h
->hash_table
= kmem_zalloc(hsize
* sizeof (void *), KM_NOSLEEP
);
297 if (h
->hash_table
== NULL
) {
298 /* XXX - we should really return an error instead of assert */
299 ASSERT(hsize
> (1ULL << 10));
304 dbuf_cache
= kmem_cache_create("dmu_buf_impl_t",
305 sizeof (dmu_buf_impl_t
),
306 0, dbuf_cons
, dbuf_dest
, NULL
, NULL
, NULL
, 0);
308 for (i
= 0; i
< DBUF_MUTEXES
; i
++)
309 mutex_init(&h
->hash_mutexes
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
315 dbuf_hash_table_t
*h
= &dbuf_hash_table
;
318 for (i
= 0; i
< DBUF_MUTEXES
; i
++)
319 mutex_destroy(&h
->hash_mutexes
[i
]);
320 kmem_free(h
->hash_table
, (h
->hash_table_mask
+ 1) * sizeof (void *));
321 kmem_cache_destroy(dbuf_cache
);
330 dbuf_verify(dmu_buf_impl_t
*db
)
333 dbuf_dirty_record_t
*dr
;
335 ASSERT(MUTEX_HELD(&db
->db_mtx
));
337 if (!(zfs_flags
& ZFS_DEBUG_DBUF_VERIFY
))
340 ASSERT(db
->db_objset
!= NULL
);
344 ASSERT(db
->db_parent
== NULL
);
345 ASSERT(db
->db_blkptr
== NULL
);
347 ASSERT3U(db
->db
.db_object
, ==, dn
->dn_object
);
348 ASSERT3P(db
->db_objset
, ==, dn
->dn_objset
);
349 ASSERT3U(db
->db_level
, <, dn
->dn_nlevels
);
350 ASSERT(db
->db_blkid
== DMU_BONUS_BLKID
||
351 db
->db_blkid
== DMU_SPILL_BLKID
||
352 !list_is_empty(&dn
->dn_dbufs
));
354 if (db
->db_blkid
== DMU_BONUS_BLKID
) {
356 ASSERT3U(db
->db
.db_size
, >=, dn
->dn_bonuslen
);
357 ASSERT3U(db
->db
.db_offset
, ==, DMU_BONUS_BLKID
);
358 } else if (db
->db_blkid
== DMU_SPILL_BLKID
) {
360 ASSERT3U(db
->db
.db_size
, >=, dn
->dn_bonuslen
);
361 ASSERT3U(db
->db
.db_offset
, ==, 0);
363 ASSERT3U(db
->db
.db_offset
, ==, db
->db_blkid
* db
->db
.db_size
);
366 for (dr
= db
->db_data_pending
; dr
!= NULL
; dr
= dr
->dr_next
)
367 ASSERT(dr
->dr_dbuf
== db
);
369 for (dr
= db
->db_last_dirty
; dr
!= NULL
; dr
= dr
->dr_next
)
370 ASSERT(dr
->dr_dbuf
== db
);
373 * We can't assert that db_size matches dn_datablksz because it
374 * can be momentarily different when another thread is doing
377 if (db
->db_level
== 0 && db
->db
.db_object
== DMU_META_DNODE_OBJECT
) {
378 dr
= db
->db_data_pending
;
380 * It should only be modified in syncing context, so
381 * make sure we only have one copy of the data.
383 ASSERT(dr
== NULL
|| dr
->dt
.dl
.dr_data
== db
->db_buf
);
386 /* verify db->db_blkptr */
388 if (db
->db_parent
== dn
->dn_dbuf
) {
389 /* db is pointed to by the dnode */
390 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
391 if (DMU_OBJECT_IS_SPECIAL(db
->db
.db_object
))
392 ASSERT(db
->db_parent
== NULL
);
394 ASSERT(db
->db_parent
!= NULL
);
395 if (db
->db_blkid
!= DMU_SPILL_BLKID
)
396 ASSERT3P(db
->db_blkptr
, ==,
397 &dn
->dn_phys
->dn_blkptr
[db
->db_blkid
]);
399 /* db is pointed to by an indirect block */
400 ASSERTV(int epb
= db
->db_parent
->db
.db_size
>>
402 ASSERT3U(db
->db_parent
->db_level
, ==, db
->db_level
+1);
403 ASSERT3U(db
->db_parent
->db
.db_object
, ==,
406 * dnode_grow_indblksz() can make this fail if we don't
407 * have the struct_rwlock. XXX indblksz no longer
408 * grows. safe to do this now?
410 if (RW_WRITE_HELD(&dn
->dn_struct_rwlock
)) {
411 ASSERT3P(db
->db_blkptr
, ==,
412 ((blkptr_t
*)db
->db_parent
->db
.db_data
+
413 db
->db_blkid
% epb
));
417 if ((db
->db_blkptr
== NULL
|| BP_IS_HOLE(db
->db_blkptr
)) &&
418 (db
->db_buf
== NULL
|| db
->db_buf
->b_data
) &&
419 db
->db
.db_data
&& db
->db_blkid
!= DMU_BONUS_BLKID
&&
420 db
->db_state
!= DB_FILL
&& !dn
->dn_free_txg
) {
422 * If the blkptr isn't set but they have nonzero data,
423 * it had better be dirty, otherwise we'll lose that
424 * data when we evict this buffer.
426 if (db
->db_dirtycnt
== 0) {
427 ASSERTV(uint64_t *buf
= db
->db
.db_data
);
430 for (i
= 0; i
< db
->db
.db_size
>> 3; i
++) {
440 dbuf_update_data(dmu_buf_impl_t
*db
)
442 ASSERT(MUTEX_HELD(&db
->db_mtx
));
443 if (db
->db_level
== 0 && db
->db_user_data_ptr_ptr
) {
444 ASSERT(!refcount_is_zero(&db
->db_holds
));
445 *db
->db_user_data_ptr_ptr
= db
->db
.db_data
;
450 dbuf_set_data(dmu_buf_impl_t
*db
, arc_buf_t
*buf
)
452 ASSERT(MUTEX_HELD(&db
->db_mtx
));
453 ASSERT(db
->db_buf
== NULL
|| !arc_has_callback(db
->db_buf
));
456 ASSERT(buf
->b_data
!= NULL
);
457 db
->db
.db_data
= buf
->b_data
;
458 if (!arc_released(buf
))
459 arc_set_callback(buf
, dbuf_do_evict
, db
);
460 dbuf_update_data(db
);
463 db
->db
.db_data
= NULL
;
464 if (db
->db_state
!= DB_NOFILL
)
465 db
->db_state
= DB_UNCACHED
;
470 * Loan out an arc_buf for read. Return the loaned arc_buf.
473 dbuf_loan_arcbuf(dmu_buf_impl_t
*db
)
477 mutex_enter(&db
->db_mtx
);
478 if (arc_released(db
->db_buf
) || refcount_count(&db
->db_holds
) > 1) {
479 int blksz
= db
->db
.db_size
;
482 mutex_exit(&db
->db_mtx
);
483 DB_GET_SPA(&spa
, db
);
484 abuf
= arc_loan_buf(spa
, blksz
);
485 bcopy(db
->db
.db_data
, abuf
->b_data
, blksz
);
488 arc_loan_inuse_buf(abuf
, db
);
489 dbuf_set_data(db
, NULL
);
490 mutex_exit(&db
->db_mtx
);
496 dbuf_whichblock(dnode_t
*dn
, uint64_t offset
)
498 if (dn
->dn_datablkshift
) {
499 return (offset
>> dn
->dn_datablkshift
);
501 ASSERT3U(offset
, <, dn
->dn_datablksz
);
507 dbuf_read_done(zio_t
*zio
, arc_buf_t
*buf
, void *vdb
)
509 dmu_buf_impl_t
*db
= vdb
;
511 mutex_enter(&db
->db_mtx
);
512 ASSERT3U(db
->db_state
, ==, DB_READ
);
514 * All reads are synchronous, so we must have a hold on the dbuf
516 ASSERT(refcount_count(&db
->db_holds
) > 0);
517 ASSERT(db
->db_buf
== NULL
);
518 ASSERT(db
->db
.db_data
== NULL
);
519 if (db
->db_level
== 0 && db
->db_freed_in_flight
) {
520 /* we were freed in flight; disregard any error */
521 arc_release(buf
, db
);
522 bzero(buf
->b_data
, db
->db
.db_size
);
524 db
->db_freed_in_flight
= FALSE
;
525 dbuf_set_data(db
, buf
);
526 db
->db_state
= DB_CACHED
;
527 } else if (zio
== NULL
|| zio
->io_error
== 0) {
528 dbuf_set_data(db
, buf
);
529 db
->db_state
= DB_CACHED
;
531 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
532 ASSERT3P(db
->db_buf
, ==, NULL
);
533 VERIFY(arc_buf_remove_ref(buf
, db
) == 1);
534 db
->db_state
= DB_UNCACHED
;
536 cv_broadcast(&db
->db_changed
);
537 dbuf_rele_and_unlock(db
, NULL
);
541 dbuf_read_impl(dmu_buf_impl_t
*db
, zio_t
*zio
, uint32_t *flags
)
546 uint32_t aflags
= ARC_NOWAIT
;
551 ASSERT(!refcount_is_zero(&db
->db_holds
));
552 /* We need the struct_rwlock to prevent db_blkptr from changing. */
553 ASSERT(RW_LOCK_HELD(&dn
->dn_struct_rwlock
));
554 ASSERT(MUTEX_HELD(&db
->db_mtx
));
555 ASSERT(db
->db_state
== DB_UNCACHED
);
556 ASSERT(db
->db_buf
== NULL
);
558 if (db
->db_blkid
== DMU_BONUS_BLKID
) {
559 int bonuslen
= MIN(dn
->dn_bonuslen
, dn
->dn_phys
->dn_bonuslen
);
561 ASSERT3U(bonuslen
, <=, db
->db
.db_size
);
562 db
->db
.db_data
= zio_buf_alloc(DN_MAX_BONUSLEN
);
563 arc_space_consume(DN_MAX_BONUSLEN
, ARC_SPACE_OTHER
);
564 if (bonuslen
< DN_MAX_BONUSLEN
)
565 bzero(db
->db
.db_data
, DN_MAX_BONUSLEN
);
567 bcopy(DN_BONUS(dn
->dn_phys
), db
->db
.db_data
, bonuslen
);
569 dbuf_update_data(db
);
570 db
->db_state
= DB_CACHED
;
571 mutex_exit(&db
->db_mtx
);
576 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
577 * processes the delete record and clears the bp while we are waiting
578 * for the dn_mtx (resulting in a "no" from block_freed).
580 if (db
->db_blkptr
== NULL
|| BP_IS_HOLE(db
->db_blkptr
) ||
581 (db
->db_level
== 0 && (dnode_block_freed(dn
, db
->db_blkid
) ||
582 BP_IS_HOLE(db
->db_blkptr
)))) {
583 arc_buf_contents_t type
= DBUF_GET_BUFC_TYPE(db
);
585 dbuf_set_data(db
, arc_buf_alloc(dn
->dn_objset
->os_spa
,
586 db
->db
.db_size
, db
, type
));
588 bzero(db
->db
.db_data
, db
->db
.db_size
);
589 db
->db_state
= DB_CACHED
;
590 *flags
|= DB_RF_CACHED
;
591 mutex_exit(&db
->db_mtx
);
595 spa
= dn
->dn_objset
->os_spa
;
598 db
->db_state
= DB_READ
;
599 mutex_exit(&db
->db_mtx
);
601 if (DBUF_IS_L2CACHEABLE(db
))
602 aflags
|= ARC_L2CACHE
;
604 SET_BOOKMARK(&zb
, db
->db_objset
->os_dsl_dataset
?
605 db
->db_objset
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
606 db
->db
.db_object
, db
->db_level
, db
->db_blkid
);
608 dbuf_add_ref(db
, NULL
);
609 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
612 pbuf
= db
->db_parent
->db_buf
;
614 pbuf
= db
->db_objset
->os_phys_buf
;
616 (void) dsl_read(zio
, spa
, db
->db_blkptr
, pbuf
,
617 dbuf_read_done
, db
, ZIO_PRIORITY_SYNC_READ
,
618 (*flags
& DB_RF_CANFAIL
) ? ZIO_FLAG_CANFAIL
: ZIO_FLAG_MUSTSUCCEED
,
620 if (aflags
& ARC_CACHED
)
621 *flags
|= DB_RF_CACHED
;
625 dbuf_read(dmu_buf_impl_t
*db
, zio_t
*zio
, uint32_t flags
)
628 int havepzio
= (zio
!= NULL
);
633 * We don't have to hold the mutex to check db_state because it
634 * can't be freed while we have a hold on the buffer.
636 ASSERT(!refcount_is_zero(&db
->db_holds
));
638 if (db
->db_state
== DB_NOFILL
)
643 if ((flags
& DB_RF_HAVESTRUCT
) == 0)
644 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
646 prefetch
= db
->db_level
== 0 && db
->db_blkid
!= DMU_BONUS_BLKID
&&
647 (flags
& DB_RF_NOPREFETCH
) == 0 && dn
!= NULL
&&
648 DBUF_IS_CACHEABLE(db
);
650 mutex_enter(&db
->db_mtx
);
651 if (db
->db_state
== DB_CACHED
) {
652 mutex_exit(&db
->db_mtx
);
654 dmu_zfetch(&dn
->dn_zfetch
, db
->db
.db_offset
,
655 db
->db
.db_size
, TRUE
);
656 if ((flags
& DB_RF_HAVESTRUCT
) == 0)
657 rw_exit(&dn
->dn_struct_rwlock
);
659 } else if (db
->db_state
== DB_UNCACHED
) {
660 spa_t
*spa
= dn
->dn_objset
->os_spa
;
663 zio
= zio_root(spa
, NULL
, NULL
, ZIO_FLAG_CANFAIL
);
664 dbuf_read_impl(db
, zio
, &flags
);
666 /* dbuf_read_impl has dropped db_mtx for us */
669 dmu_zfetch(&dn
->dn_zfetch
, db
->db
.db_offset
,
670 db
->db
.db_size
, flags
& DB_RF_CACHED
);
672 if ((flags
& DB_RF_HAVESTRUCT
) == 0)
673 rw_exit(&dn
->dn_struct_rwlock
);
679 mutex_exit(&db
->db_mtx
);
681 dmu_zfetch(&dn
->dn_zfetch
, db
->db
.db_offset
,
682 db
->db
.db_size
, TRUE
);
683 if ((flags
& DB_RF_HAVESTRUCT
) == 0)
684 rw_exit(&dn
->dn_struct_rwlock
);
687 mutex_enter(&db
->db_mtx
);
688 if ((flags
& DB_RF_NEVERWAIT
) == 0) {
689 while (db
->db_state
== DB_READ
||
690 db
->db_state
== DB_FILL
) {
691 ASSERT(db
->db_state
== DB_READ
||
692 (flags
& DB_RF_HAVESTRUCT
) == 0);
693 cv_wait(&db
->db_changed
, &db
->db_mtx
);
695 if (db
->db_state
== DB_UNCACHED
)
698 mutex_exit(&db
->db_mtx
);
701 ASSERT(err
|| havepzio
|| db
->db_state
== DB_CACHED
);
706 dbuf_noread(dmu_buf_impl_t
*db
)
708 ASSERT(!refcount_is_zero(&db
->db_holds
));
709 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
710 mutex_enter(&db
->db_mtx
);
711 while (db
->db_state
== DB_READ
|| db
->db_state
== DB_FILL
)
712 cv_wait(&db
->db_changed
, &db
->db_mtx
);
713 if (db
->db_state
== DB_UNCACHED
) {
714 arc_buf_contents_t type
= DBUF_GET_BUFC_TYPE(db
);
717 ASSERT(db
->db_buf
== NULL
);
718 ASSERT(db
->db
.db_data
== NULL
);
719 DB_GET_SPA(&spa
, db
);
720 dbuf_set_data(db
, arc_buf_alloc(spa
, db
->db
.db_size
, db
, type
));
721 db
->db_state
= DB_FILL
;
722 } else if (db
->db_state
== DB_NOFILL
) {
723 dbuf_set_data(db
, NULL
);
725 ASSERT3U(db
->db_state
, ==, DB_CACHED
);
727 mutex_exit(&db
->db_mtx
);
731 * This is our just-in-time copy function. It makes a copy of
732 * buffers, that have been modified in a previous transaction
733 * group, before we modify them in the current active group.
735 * This function is used in two places: when we are dirtying a
736 * buffer for the first time in a txg, and when we are freeing
737 * a range in a dnode that includes this buffer.
739 * Note that when we are called from dbuf_free_range() we do
740 * not put a hold on the buffer, we just traverse the active
741 * dbuf list for the dnode.
744 dbuf_fix_old_data(dmu_buf_impl_t
*db
, uint64_t txg
)
746 dbuf_dirty_record_t
*dr
= db
->db_last_dirty
;
748 ASSERT(MUTEX_HELD(&db
->db_mtx
));
749 ASSERT(db
->db
.db_data
!= NULL
);
750 ASSERT(db
->db_level
== 0);
751 ASSERT(db
->db
.db_object
!= DMU_META_DNODE_OBJECT
);
754 (dr
->dt
.dl
.dr_data
!=
755 ((db
->db_blkid
== DMU_BONUS_BLKID
) ? db
->db
.db_data
: db
->db_buf
)))
759 * If the last dirty record for this dbuf has not yet synced
760 * and its referencing the dbuf data, either:
761 * reset the reference to point to a new copy,
762 * or (if there a no active holders)
763 * just null out the current db_data pointer.
765 ASSERT(dr
->dr_txg
>= txg
- 2);
766 if (db
->db_blkid
== DMU_BONUS_BLKID
) {
767 /* Note that the data bufs here are zio_bufs */
768 dr
->dt
.dl
.dr_data
= zio_buf_alloc(DN_MAX_BONUSLEN
);
769 arc_space_consume(DN_MAX_BONUSLEN
, ARC_SPACE_OTHER
);
770 bcopy(db
->db
.db_data
, dr
->dt
.dl
.dr_data
, DN_MAX_BONUSLEN
);
771 } else if (refcount_count(&db
->db_holds
) > db
->db_dirtycnt
) {
772 int size
= db
->db
.db_size
;
773 arc_buf_contents_t type
= DBUF_GET_BUFC_TYPE(db
);
776 DB_GET_SPA(&spa
, db
);
777 dr
->dt
.dl
.dr_data
= arc_buf_alloc(spa
, size
, db
, type
);
778 bcopy(db
->db
.db_data
, dr
->dt
.dl
.dr_data
->b_data
, size
);
780 dbuf_set_data(db
, NULL
);
785 dbuf_unoverride(dbuf_dirty_record_t
*dr
)
787 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
788 blkptr_t
*bp
= &dr
->dt
.dl
.dr_overridden_by
;
789 uint64_t txg
= dr
->dr_txg
;
791 ASSERT(MUTEX_HELD(&db
->db_mtx
));
792 ASSERT(dr
->dt
.dl
.dr_override_state
!= DR_IN_DMU_SYNC
);
793 ASSERT(db
->db_level
== 0);
795 if (db
->db_blkid
== DMU_BONUS_BLKID
||
796 dr
->dt
.dl
.dr_override_state
== DR_NOT_OVERRIDDEN
)
799 ASSERT(db
->db_data_pending
!= dr
);
801 /* free this block */
802 if (!BP_IS_HOLE(bp
)) {
805 DB_GET_SPA(&spa
, db
);
806 zio_free(spa
, txg
, bp
);
808 dr
->dt
.dl
.dr_override_state
= DR_NOT_OVERRIDDEN
;
810 * Release the already-written buffer, so we leave it in
811 * a consistent dirty state. Note that all callers are
812 * modifying the buffer, so they will immediately do
813 * another (redundant) arc_release(). Therefore, leave
814 * the buf thawed to save the effort of freezing &
815 * immediately re-thawing it.
817 arc_release(dr
->dt
.dl
.dr_data
, db
);
821 * Evict (if its unreferenced) or clear (if its referenced) any level-0
822 * data blocks in the free range, so that any future readers will find
823 * empty blocks. Also, if we happen accross any level-1 dbufs in the
824 * range that have not already been marked dirty, mark them dirty so
825 * they stay in memory.
828 dbuf_free_range(dnode_t
*dn
, uint64_t start
, uint64_t end
, dmu_tx_t
*tx
)
830 dmu_buf_impl_t
*db
, *db_next
;
831 uint64_t txg
= tx
->tx_txg
;
832 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
833 uint64_t first_l1
= start
>> epbs
;
834 uint64_t last_l1
= end
>> epbs
;
836 if (end
> dn
->dn_maxblkid
&& (end
!= DMU_SPILL_BLKID
)) {
837 end
= dn
->dn_maxblkid
;
838 last_l1
= end
>> epbs
;
840 dprintf_dnode(dn
, "start=%llu end=%llu\n", start
, end
);
841 mutex_enter(&dn
->dn_dbufs_mtx
);
842 for (db
= list_head(&dn
->dn_dbufs
); db
; db
= db_next
) {
843 db_next
= list_next(&dn
->dn_dbufs
, db
);
844 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
846 if (db
->db_level
== 1 &&
847 db
->db_blkid
>= first_l1
&& db
->db_blkid
<= last_l1
) {
848 mutex_enter(&db
->db_mtx
);
849 if (db
->db_last_dirty
&&
850 db
->db_last_dirty
->dr_txg
< txg
) {
851 dbuf_add_ref(db
, FTAG
);
852 mutex_exit(&db
->db_mtx
);
853 dbuf_will_dirty(db
, tx
);
856 mutex_exit(&db
->db_mtx
);
860 if (db
->db_level
!= 0)
862 dprintf_dbuf(db
, "found buf %s\n", "");
863 if (db
->db_blkid
< start
|| db
->db_blkid
> end
)
866 /* found a level 0 buffer in the range */
867 if (dbuf_undirty(db
, tx
))
870 mutex_enter(&db
->db_mtx
);
871 if (db
->db_state
== DB_UNCACHED
||
872 db
->db_state
== DB_NOFILL
||
873 db
->db_state
== DB_EVICTING
) {
874 ASSERT(db
->db
.db_data
== NULL
);
875 mutex_exit(&db
->db_mtx
);
878 if (db
->db_state
== DB_READ
|| db
->db_state
== DB_FILL
) {
879 /* will be handled in dbuf_read_done or dbuf_rele */
880 db
->db_freed_in_flight
= TRUE
;
881 mutex_exit(&db
->db_mtx
);
884 if (refcount_count(&db
->db_holds
) == 0) {
889 /* The dbuf is referenced */
891 if (db
->db_last_dirty
!= NULL
) {
892 dbuf_dirty_record_t
*dr
= db
->db_last_dirty
;
894 if (dr
->dr_txg
== txg
) {
896 * This buffer is "in-use", re-adjust the file
897 * size to reflect that this buffer may
898 * contain new data when we sync.
900 if (db
->db_blkid
!= DMU_SPILL_BLKID
&&
901 db
->db_blkid
> dn
->dn_maxblkid
)
902 dn
->dn_maxblkid
= db
->db_blkid
;
906 * This dbuf is not dirty in the open context.
907 * Either uncache it (if its not referenced in
908 * the open context) or reset its contents to
911 dbuf_fix_old_data(db
, txg
);
914 /* clear the contents if its cached */
915 if (db
->db_state
== DB_CACHED
) {
916 ASSERT(db
->db
.db_data
!= NULL
);
917 arc_release(db
->db_buf
, db
);
918 bzero(db
->db
.db_data
, db
->db
.db_size
);
919 arc_buf_freeze(db
->db_buf
);
922 mutex_exit(&db
->db_mtx
);
924 mutex_exit(&dn
->dn_dbufs_mtx
);
928 dbuf_block_freeable(dmu_buf_impl_t
*db
)
930 dsl_dataset_t
*ds
= db
->db_objset
->os_dsl_dataset
;
931 uint64_t birth_txg
= 0;
934 * We don't need any locking to protect db_blkptr:
935 * If it's syncing, then db_last_dirty will be set
936 * so we'll ignore db_blkptr.
938 ASSERT(MUTEX_HELD(&db
->db_mtx
));
939 if (db
->db_last_dirty
)
940 birth_txg
= db
->db_last_dirty
->dr_txg
;
941 else if (db
->db_blkptr
)
942 birth_txg
= db
->db_blkptr
->blk_birth
;
945 * If we don't exist or are in a snapshot, we can't be freed.
946 * Don't pass the bp to dsl_dataset_block_freeable() since we
947 * are holding the db_mtx lock and might deadlock if we are
948 * prefetching a dedup-ed block.
951 return (ds
== NULL
||
952 dsl_dataset_block_freeable(ds
, NULL
, birth_txg
));
958 dbuf_new_size(dmu_buf_impl_t
*db
, int size
, dmu_tx_t
*tx
)
960 arc_buf_t
*buf
, *obuf
;
961 int osize
= db
->db
.db_size
;
962 arc_buf_contents_t type
= DBUF_GET_BUFC_TYPE(db
);
965 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
970 /* XXX does *this* func really need the lock? */
971 ASSERT(RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
974 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
975 * is OK, because there can be no other references to the db
976 * when we are changing its size, so no concurrent DB_FILL can
980 * XXX we should be doing a dbuf_read, checking the return
981 * value and returning that up to our callers
983 dbuf_will_dirty(db
, tx
);
985 /* create the data buffer for the new block */
986 buf
= arc_buf_alloc(dn
->dn_objset
->os_spa
, size
, db
, type
);
988 /* copy old block data to the new block */
990 bcopy(obuf
->b_data
, buf
->b_data
, MIN(osize
, size
));
991 /* zero the remainder */
993 bzero((uint8_t *)buf
->b_data
+ osize
, size
- osize
);
995 mutex_enter(&db
->db_mtx
);
996 dbuf_set_data(db
, buf
);
997 VERIFY(arc_buf_remove_ref(obuf
, db
) == 1);
998 db
->db
.db_size
= size
;
1000 if (db
->db_level
== 0) {
1001 ASSERT3U(db
->db_last_dirty
->dr_txg
, ==, tx
->tx_txg
);
1002 db
->db_last_dirty
->dt
.dl
.dr_data
= buf
;
1004 mutex_exit(&db
->db_mtx
);
1006 dnode_willuse_space(dn
, size
-osize
, tx
);
1011 dbuf_release_bp(dmu_buf_impl_t
*db
)
1016 DB_GET_OBJSET(&os
, db
);
1017 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os
)));
1018 ASSERT(arc_released(os
->os_phys_buf
) ||
1019 list_link_active(&os
->os_dsl_dataset
->ds_synced_link
));
1020 ASSERT(db
->db_parent
== NULL
|| arc_released(db
->db_parent
->db_buf
));
1022 zb
.zb_objset
= os
->os_dsl_dataset
?
1023 os
->os_dsl_dataset
->ds_object
: 0;
1024 zb
.zb_object
= db
->db
.db_object
;
1025 zb
.zb_level
= db
->db_level
;
1026 zb
.zb_blkid
= db
->db_blkid
;
1027 (void) arc_release_bp(db
->db_buf
, db
,
1028 db
->db_blkptr
, os
->os_spa
, &zb
);
1031 dbuf_dirty_record_t
*
1032 dbuf_dirty(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1036 dbuf_dirty_record_t
**drp
, *dr
;
1037 int drop_struct_lock
= FALSE
;
1038 boolean_t do_free_accounting
= B_FALSE
;
1039 int txgoff
= tx
->tx_txg
& TXG_MASK
;
1041 ASSERT(tx
->tx_txg
!= 0);
1042 ASSERT(!refcount_is_zero(&db
->db_holds
));
1043 DMU_TX_DIRTY_BUF(tx
, db
);
1048 * Shouldn't dirty a regular buffer in syncing context. Private
1049 * objects may be dirtied in syncing context, but only if they
1050 * were already pre-dirtied in open context.
1052 ASSERT(!dmu_tx_is_syncing(tx
) ||
1053 BP_IS_HOLE(dn
->dn_objset
->os_rootbp
) ||
1054 DMU_OBJECT_IS_SPECIAL(dn
->dn_object
) ||
1055 dn
->dn_objset
->os_dsl_dataset
== NULL
);
1057 * We make this assert for private objects as well, but after we
1058 * check if we're already dirty. They are allowed to re-dirty
1059 * in syncing context.
1061 ASSERT(dn
->dn_object
== DMU_META_DNODE_OBJECT
||
1062 dn
->dn_dirtyctx
== DN_UNDIRTIED
|| dn
->dn_dirtyctx
==
1063 (dmu_tx_is_syncing(tx
) ? DN_DIRTY_SYNC
: DN_DIRTY_OPEN
));
1065 mutex_enter(&db
->db_mtx
);
1067 * XXX make this true for indirects too? The problem is that
1068 * transactions created with dmu_tx_create_assigned() from
1069 * syncing context don't bother holding ahead.
1071 ASSERT(db
->db_level
!= 0 ||
1072 db
->db_state
== DB_CACHED
|| db
->db_state
== DB_FILL
||
1073 db
->db_state
== DB_NOFILL
);
1075 mutex_enter(&dn
->dn_mtx
);
1077 * Don't set dirtyctx to SYNC if we're just modifying this as we
1078 * initialize the objset.
1080 if (dn
->dn_dirtyctx
== DN_UNDIRTIED
&&
1081 !BP_IS_HOLE(dn
->dn_objset
->os_rootbp
)) {
1083 (dmu_tx_is_syncing(tx
) ? DN_DIRTY_SYNC
: DN_DIRTY_OPEN
);
1084 ASSERT(dn
->dn_dirtyctx_firstset
== NULL
);
1085 dn
->dn_dirtyctx_firstset
= kmem_alloc(1, KM_SLEEP
);
1087 mutex_exit(&dn
->dn_mtx
);
1089 if (db
->db_blkid
== DMU_SPILL_BLKID
)
1090 dn
->dn_have_spill
= B_TRUE
;
1093 * If this buffer is already dirty, we're done.
1095 drp
= &db
->db_last_dirty
;
1096 ASSERT(*drp
== NULL
|| (*drp
)->dr_txg
<= tx
->tx_txg
||
1097 db
->db
.db_object
== DMU_META_DNODE_OBJECT
);
1098 while ((dr
= *drp
) != NULL
&& dr
->dr_txg
> tx
->tx_txg
)
1100 if (dr
&& dr
->dr_txg
== tx
->tx_txg
) {
1103 if (db
->db_level
== 0 && db
->db_blkid
!= DMU_BONUS_BLKID
) {
1105 * If this buffer has already been written out,
1106 * we now need to reset its state.
1108 dbuf_unoverride(dr
);
1109 if (db
->db
.db_object
!= DMU_META_DNODE_OBJECT
&&
1110 db
->db_state
!= DB_NOFILL
)
1111 arc_buf_thaw(db
->db_buf
);
1113 mutex_exit(&db
->db_mtx
);
1118 * Only valid if not already dirty.
1120 ASSERT(dn
->dn_object
== 0 ||
1121 dn
->dn_dirtyctx
== DN_UNDIRTIED
|| dn
->dn_dirtyctx
==
1122 (dmu_tx_is_syncing(tx
) ? DN_DIRTY_SYNC
: DN_DIRTY_OPEN
));
1124 ASSERT3U(dn
->dn_nlevels
, >, db
->db_level
);
1125 ASSERT((dn
->dn_phys
->dn_nlevels
== 0 && db
->db_level
== 0) ||
1126 dn
->dn_phys
->dn_nlevels
> db
->db_level
||
1127 dn
->dn_next_nlevels
[txgoff
] > db
->db_level
||
1128 dn
->dn_next_nlevels
[(tx
->tx_txg
-1) & TXG_MASK
] > db
->db_level
||
1129 dn
->dn_next_nlevels
[(tx
->tx_txg
-2) & TXG_MASK
] > db
->db_level
);
1132 * We should only be dirtying in syncing context if it's the
1133 * mos or we're initializing the os or it's a special object.
1134 * However, we are allowed to dirty in syncing context provided
1135 * we already dirtied it in open context. Hence we must make
1136 * this assertion only if we're not already dirty.
1139 ASSERT(!dmu_tx_is_syncing(tx
) || DMU_OBJECT_IS_SPECIAL(dn
->dn_object
) ||
1140 os
->os_dsl_dataset
== NULL
|| BP_IS_HOLE(os
->os_rootbp
));
1141 ASSERT(db
->db
.db_size
!= 0);
1143 dprintf_dbuf(db
, "size=%llx\n", (u_longlong_t
)db
->db
.db_size
);
1145 if (db
->db_blkid
!= DMU_BONUS_BLKID
) {
1147 * Update the accounting.
1148 * Note: we delay "free accounting" until after we drop
1149 * the db_mtx. This keeps us from grabbing other locks
1150 * (and possibly deadlocking) in bp_get_dsize() while
1151 * also holding the db_mtx.
1153 dnode_willuse_space(dn
, db
->db
.db_size
, tx
);
1154 do_free_accounting
= dbuf_block_freeable(db
);
1158 * If this buffer is dirty in an old transaction group we need
1159 * to make a copy of it so that the changes we make in this
1160 * transaction group won't leak out when we sync the older txg.
1162 dr
= kmem_zalloc(sizeof (dbuf_dirty_record_t
), KM_SLEEP
);
1163 list_link_init(&dr
->dr_dirty_node
);
1164 if (db
->db_level
== 0) {
1165 void *data_old
= db
->db_buf
;
1167 if (db
->db_state
!= DB_NOFILL
) {
1168 if (db
->db_blkid
== DMU_BONUS_BLKID
) {
1169 dbuf_fix_old_data(db
, tx
->tx_txg
);
1170 data_old
= db
->db
.db_data
;
1171 } else if (db
->db
.db_object
!= DMU_META_DNODE_OBJECT
) {
1173 * Release the data buffer from the cache so
1174 * that we can modify it without impacting
1175 * possible other users of this cached data
1176 * block. Note that indirect blocks and
1177 * private objects are not released until the
1178 * syncing state (since they are only modified
1181 arc_release(db
->db_buf
, db
);
1182 dbuf_fix_old_data(db
, tx
->tx_txg
);
1183 data_old
= db
->db_buf
;
1185 ASSERT(data_old
!= NULL
);
1187 dr
->dt
.dl
.dr_data
= data_old
;
1189 mutex_init(&dr
->dt
.di
.dr_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
1190 list_create(&dr
->dt
.di
.dr_children
,
1191 sizeof (dbuf_dirty_record_t
),
1192 offsetof(dbuf_dirty_record_t
, dr_dirty_node
));
1195 dr
->dr_txg
= tx
->tx_txg
;
1200 * We could have been freed_in_flight between the dbuf_noread
1201 * and dbuf_dirty. We win, as though the dbuf_noread() had
1202 * happened after the free.
1204 if (db
->db_level
== 0 && db
->db_blkid
!= DMU_BONUS_BLKID
&&
1205 db
->db_blkid
!= DMU_SPILL_BLKID
) {
1206 mutex_enter(&dn
->dn_mtx
);
1207 dnode_clear_range(dn
, db
->db_blkid
, 1, tx
);
1208 mutex_exit(&dn
->dn_mtx
);
1209 db
->db_freed_in_flight
= FALSE
;
1213 * This buffer is now part of this txg
1215 dbuf_add_ref(db
, (void *)(uintptr_t)tx
->tx_txg
);
1216 db
->db_dirtycnt
+= 1;
1217 ASSERT3U(db
->db_dirtycnt
, <=, 3);
1219 mutex_exit(&db
->db_mtx
);
1221 if (db
->db_blkid
== DMU_BONUS_BLKID
||
1222 db
->db_blkid
== DMU_SPILL_BLKID
) {
1223 mutex_enter(&dn
->dn_mtx
);
1224 ASSERT(!list_link_active(&dr
->dr_dirty_node
));
1225 list_insert_tail(&dn
->dn_dirty_records
[txgoff
], dr
);
1226 mutex_exit(&dn
->dn_mtx
);
1227 dnode_setdirty(dn
, tx
);
1230 } else if (do_free_accounting
) {
1231 blkptr_t
*bp
= db
->db_blkptr
;
1232 int64_t willfree
= (bp
&& !BP_IS_HOLE(bp
)) ?
1233 bp_get_dsize(os
->os_spa
, bp
) : db
->db
.db_size
;
1235 * This is only a guess -- if the dbuf is dirty
1236 * in a previous txg, we don't know how much
1237 * space it will use on disk yet. We should
1238 * really have the struct_rwlock to access
1239 * db_blkptr, but since this is just a guess,
1240 * it's OK if we get an odd answer.
1242 ddt_prefetch(os
->os_spa
, bp
);
1243 dnode_willuse_space(dn
, -willfree
, tx
);
1246 if (!RW_WRITE_HELD(&dn
->dn_struct_rwlock
)) {
1247 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
1248 drop_struct_lock
= TRUE
;
1251 if (db
->db_level
== 0) {
1252 dnode_new_blkid(dn
, db
->db_blkid
, tx
, drop_struct_lock
);
1253 ASSERT(dn
->dn_maxblkid
>= db
->db_blkid
);
1256 if (db
->db_level
+1 < dn
->dn_nlevels
) {
1257 dmu_buf_impl_t
*parent
= db
->db_parent
;
1258 dbuf_dirty_record_t
*di
;
1259 int parent_held
= FALSE
;
1261 if (db
->db_parent
== NULL
|| db
->db_parent
== dn
->dn_dbuf
) {
1262 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
1264 parent
= dbuf_hold_level(dn
, db
->db_level
+1,
1265 db
->db_blkid
>> epbs
, FTAG
);
1266 ASSERT(parent
!= NULL
);
1269 if (drop_struct_lock
)
1270 rw_exit(&dn
->dn_struct_rwlock
);
1271 ASSERT3U(db
->db_level
+1, ==, parent
->db_level
);
1272 di
= dbuf_dirty(parent
, tx
);
1274 dbuf_rele(parent
, FTAG
);
1276 mutex_enter(&db
->db_mtx
);
1277 /* possible race with dbuf_undirty() */
1278 if (db
->db_last_dirty
== dr
||
1279 dn
->dn_object
== DMU_META_DNODE_OBJECT
) {
1280 mutex_enter(&di
->dt
.di
.dr_mtx
);
1281 ASSERT3U(di
->dr_txg
, ==, tx
->tx_txg
);
1282 ASSERT(!list_link_active(&dr
->dr_dirty_node
));
1283 list_insert_tail(&di
->dt
.di
.dr_children
, dr
);
1284 mutex_exit(&di
->dt
.di
.dr_mtx
);
1287 mutex_exit(&db
->db_mtx
);
1289 ASSERT(db
->db_level
+1 == dn
->dn_nlevels
);
1290 ASSERT(db
->db_blkid
< dn
->dn_nblkptr
);
1291 ASSERT(db
->db_parent
== NULL
|| db
->db_parent
== dn
->dn_dbuf
);
1292 mutex_enter(&dn
->dn_mtx
);
1293 ASSERT(!list_link_active(&dr
->dr_dirty_node
));
1294 list_insert_tail(&dn
->dn_dirty_records
[txgoff
], dr
);
1295 mutex_exit(&dn
->dn_mtx
);
1296 if (drop_struct_lock
)
1297 rw_exit(&dn
->dn_struct_rwlock
);
1300 dnode_setdirty(dn
, tx
);
1306 dbuf_undirty(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1309 uint64_t txg
= tx
->tx_txg
;
1310 dbuf_dirty_record_t
*dr
, **drp
;
1313 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
1315 mutex_enter(&db
->db_mtx
);
1317 * If this buffer is not dirty, we're done.
1319 for (drp
= &db
->db_last_dirty
; (dr
= *drp
) != NULL
; drp
= &dr
->dr_next
)
1320 if (dr
->dr_txg
<= txg
)
1322 if (dr
== NULL
|| dr
->dr_txg
< txg
) {
1323 mutex_exit(&db
->db_mtx
);
1326 ASSERT(dr
->dr_txg
== txg
);
1327 ASSERT(dr
->dr_dbuf
== db
);
1333 * If this buffer is currently held, we cannot undirty
1334 * it, since one of the current holders may be in the
1335 * middle of an update. Note that users of dbuf_undirty()
1336 * should not place a hold on the dbuf before the call.
1338 if (refcount_count(&db
->db_holds
) > db
->db_dirtycnt
) {
1339 mutex_exit(&db
->db_mtx
);
1340 /* Make sure we don't toss this buffer at sync phase */
1341 mutex_enter(&dn
->dn_mtx
);
1342 dnode_clear_range(dn
, db
->db_blkid
, 1, tx
);
1343 mutex_exit(&dn
->dn_mtx
);
1348 dprintf_dbuf(db
, "size=%llx\n", (u_longlong_t
)db
->db
.db_size
);
1350 ASSERT(db
->db
.db_size
!= 0);
1352 /* XXX would be nice to fix up dn_towrite_space[] */
1356 if (dr
->dr_parent
) {
1357 mutex_enter(&dr
->dr_parent
->dt
.di
.dr_mtx
);
1358 list_remove(&dr
->dr_parent
->dt
.di
.dr_children
, dr
);
1359 mutex_exit(&dr
->dr_parent
->dt
.di
.dr_mtx
);
1360 } else if (db
->db_level
+1 == dn
->dn_nlevels
) {
1361 ASSERT(db
->db_blkptr
== NULL
|| db
->db_parent
== dn
->dn_dbuf
);
1362 mutex_enter(&dn
->dn_mtx
);
1363 list_remove(&dn
->dn_dirty_records
[txg
& TXG_MASK
], dr
);
1364 mutex_exit(&dn
->dn_mtx
);
1368 if (db
->db_level
== 0) {
1369 if (db
->db_state
!= DB_NOFILL
) {
1370 dbuf_unoverride(dr
);
1372 ASSERT(db
->db_buf
!= NULL
);
1373 ASSERT(dr
->dt
.dl
.dr_data
!= NULL
);
1374 if (dr
->dt
.dl
.dr_data
!= db
->db_buf
)
1375 VERIFY(arc_buf_remove_ref(dr
->dt
.dl
.dr_data
,
1379 ASSERT(db
->db_buf
!= NULL
);
1380 ASSERT(list_head(&dr
->dt
.di
.dr_children
) == NULL
);
1381 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
1382 list_destroy(&dr
->dt
.di
.dr_children
);
1384 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
1386 ASSERT(db
->db_dirtycnt
> 0);
1387 db
->db_dirtycnt
-= 1;
1389 if (refcount_remove(&db
->db_holds
, (void *)(uintptr_t)txg
) == 0) {
1390 arc_buf_t
*buf
= db
->db_buf
;
1392 ASSERT(db
->db_state
== DB_NOFILL
|| arc_released(buf
));
1393 dbuf_set_data(db
, NULL
);
1394 VERIFY(arc_buf_remove_ref(buf
, db
) == 1);
1399 mutex_exit(&db
->db_mtx
);
1403 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1405 dbuf_will_dirty(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1407 int rf
= DB_RF_MUST_SUCCEED
| DB_RF_NOPREFETCH
;
1409 ASSERT(tx
->tx_txg
!= 0);
1410 ASSERT(!refcount_is_zero(&db
->db_holds
));
1413 if (RW_WRITE_HELD(&DB_DNODE(db
)->dn_struct_rwlock
))
1414 rf
|= DB_RF_HAVESTRUCT
;
1416 (void) dbuf_read(db
, NULL
, rf
);
1417 (void) dbuf_dirty(db
, tx
);
1421 dmu_buf_will_not_fill(dmu_buf_t
*db_fake
, dmu_tx_t
*tx
)
1423 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
1425 db
->db_state
= DB_NOFILL
;
1427 dmu_buf_will_fill(db_fake
, tx
);
1431 dmu_buf_will_fill(dmu_buf_t
*db_fake
, dmu_tx_t
*tx
)
1433 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
1435 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
1436 ASSERT(tx
->tx_txg
!= 0);
1437 ASSERT(db
->db_level
== 0);
1438 ASSERT(!refcount_is_zero(&db
->db_holds
));
1440 ASSERT(db
->db
.db_object
!= DMU_META_DNODE_OBJECT
||
1441 dmu_tx_private_ok(tx
));
1444 (void) dbuf_dirty(db
, tx
);
1447 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1450 dbuf_fill_done(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1452 mutex_enter(&db
->db_mtx
);
1455 if (db
->db_state
== DB_FILL
) {
1456 if (db
->db_level
== 0 && db
->db_freed_in_flight
) {
1457 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
1458 /* we were freed while filling */
1459 /* XXX dbuf_undirty? */
1460 bzero(db
->db
.db_data
, db
->db
.db_size
);
1461 db
->db_freed_in_flight
= FALSE
;
1463 db
->db_state
= DB_CACHED
;
1464 cv_broadcast(&db
->db_changed
);
1466 mutex_exit(&db
->db_mtx
);
1470 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1471 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1474 dbuf_assign_arcbuf(dmu_buf_impl_t
*db
, arc_buf_t
*buf
, dmu_tx_t
*tx
)
1476 ASSERT(!refcount_is_zero(&db
->db_holds
));
1477 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
1478 ASSERT(db
->db_level
== 0);
1479 ASSERT(DBUF_GET_BUFC_TYPE(db
) == ARC_BUFC_DATA
);
1480 ASSERT(buf
!= NULL
);
1481 ASSERT(arc_buf_size(buf
) == db
->db
.db_size
);
1482 ASSERT(tx
->tx_txg
!= 0);
1484 arc_return_buf(buf
, db
);
1485 ASSERT(arc_released(buf
));
1487 mutex_enter(&db
->db_mtx
);
1489 while (db
->db_state
== DB_READ
|| db
->db_state
== DB_FILL
)
1490 cv_wait(&db
->db_changed
, &db
->db_mtx
);
1492 ASSERT(db
->db_state
== DB_CACHED
|| db
->db_state
== DB_UNCACHED
);
1494 if (db
->db_state
== DB_CACHED
&&
1495 refcount_count(&db
->db_holds
) - 1 > db
->db_dirtycnt
) {
1496 mutex_exit(&db
->db_mtx
);
1497 (void) dbuf_dirty(db
, tx
);
1498 bcopy(buf
->b_data
, db
->db
.db_data
, db
->db
.db_size
);
1499 VERIFY(arc_buf_remove_ref(buf
, db
) == 1);
1500 xuio_stat_wbuf_copied();
1504 xuio_stat_wbuf_nocopy();
1505 if (db
->db_state
== DB_CACHED
) {
1506 dbuf_dirty_record_t
*dr
= db
->db_last_dirty
;
1508 ASSERT(db
->db_buf
!= NULL
);
1509 if (dr
!= NULL
&& dr
->dr_txg
== tx
->tx_txg
) {
1510 ASSERT(dr
->dt
.dl
.dr_data
== db
->db_buf
);
1511 if (!arc_released(db
->db_buf
)) {
1512 ASSERT(dr
->dt
.dl
.dr_override_state
==
1514 arc_release(db
->db_buf
, db
);
1516 dr
->dt
.dl
.dr_data
= buf
;
1517 VERIFY(arc_buf_remove_ref(db
->db_buf
, db
) == 1);
1518 } else if (dr
== NULL
|| dr
->dt
.dl
.dr_data
!= db
->db_buf
) {
1519 arc_release(db
->db_buf
, db
);
1520 VERIFY(arc_buf_remove_ref(db
->db_buf
, db
) == 1);
1524 ASSERT(db
->db_buf
== NULL
);
1525 dbuf_set_data(db
, buf
);
1526 db
->db_state
= DB_FILL
;
1527 mutex_exit(&db
->db_mtx
);
1528 (void) dbuf_dirty(db
, tx
);
1529 dbuf_fill_done(db
, tx
);
1533 * "Clear" the contents of this dbuf. This will mark the dbuf
1534 * EVICTING and clear *most* of its references. Unfortunetely,
1535 * when we are not holding the dn_dbufs_mtx, we can't clear the
1536 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1537 * in this case. For callers from the DMU we will usually see:
1538 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1539 * For the arc callback, we will usually see:
1540 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1541 * Sometimes, though, we will get a mix of these two:
1542 * DMU: dbuf_clear()->arc_buf_evict()
1543 * ARC: dbuf_do_evict()->dbuf_destroy()
1546 dbuf_clear(dmu_buf_impl_t
*db
)
1549 dmu_buf_impl_t
*parent
= db
->db_parent
;
1550 dmu_buf_impl_t
*dndb
;
1551 int dbuf_gone
= FALSE
;
1553 ASSERT(MUTEX_HELD(&db
->db_mtx
));
1554 ASSERT(refcount_is_zero(&db
->db_holds
));
1556 dbuf_evict_user(db
);
1558 if (db
->db_state
== DB_CACHED
) {
1559 ASSERT(db
->db
.db_data
!= NULL
);
1560 if (db
->db_blkid
== DMU_BONUS_BLKID
) {
1561 zio_buf_free(db
->db
.db_data
, DN_MAX_BONUSLEN
);
1562 arc_space_return(DN_MAX_BONUSLEN
, ARC_SPACE_OTHER
);
1564 db
->db
.db_data
= NULL
;
1565 db
->db_state
= DB_UNCACHED
;
1568 ASSERT(db
->db_state
== DB_UNCACHED
|| db
->db_state
== DB_NOFILL
);
1569 ASSERT(db
->db_data_pending
== NULL
);
1571 db
->db_state
= DB_EVICTING
;
1572 db
->db_blkptr
= NULL
;
1577 if (db
->db_blkid
!= DMU_BONUS_BLKID
&& MUTEX_HELD(&dn
->dn_dbufs_mtx
)) {
1578 list_remove(&dn
->dn_dbufs
, db
);
1579 (void) atomic_dec_32_nv(&dn
->dn_dbufs_count
);
1583 * Decrementing the dbuf count means that the hold corresponding
1584 * to the removed dbuf is no longer discounted in dnode_move(),
1585 * so the dnode cannot be moved until after we release the hold.
1586 * The membar_producer() ensures visibility of the decremented
1587 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1591 db
->db_dnode_handle
= NULL
;
1597 dbuf_gone
= arc_buf_evict(db
->db_buf
);
1600 mutex_exit(&db
->db_mtx
);
1603 * If this dbuf is referenced from an indirect dbuf,
1604 * decrement the ref count on the indirect dbuf.
1606 if (parent
&& parent
!= dndb
)
1607 dbuf_rele(parent
, db
);
1611 dbuf_findbp(dnode_t
*dn
, int level
, uint64_t blkid
, int fail_sparse
,
1612 dmu_buf_impl_t
**parentp
, blkptr_t
**bpp
, struct dbuf_hold_impl_data
*dh
)
1619 ASSERT(blkid
!= DMU_BONUS_BLKID
);
1621 if (blkid
== DMU_SPILL_BLKID
) {
1622 mutex_enter(&dn
->dn_mtx
);
1623 if (dn
->dn_have_spill
&&
1624 (dn
->dn_phys
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
))
1625 *bpp
= &dn
->dn_phys
->dn_spill
;
1628 dbuf_add_ref(dn
->dn_dbuf
, NULL
);
1629 *parentp
= dn
->dn_dbuf
;
1630 mutex_exit(&dn
->dn_mtx
);
1634 if (dn
->dn_phys
->dn_nlevels
== 0)
1637 nlevels
= dn
->dn_phys
->dn_nlevels
;
1639 epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
1641 ASSERT3U(level
* epbs
, <, 64);
1642 ASSERT(RW_LOCK_HELD(&dn
->dn_struct_rwlock
));
1643 if (level
>= nlevels
||
1644 (blkid
> (dn
->dn_phys
->dn_maxblkid
>> (level
* epbs
)))) {
1645 /* the buffer has no parent yet */
1647 } else if (level
< nlevels
-1) {
1648 /* this block is referenced from an indirect block */
1651 err
= dbuf_hold_impl(dn
, level
+1, blkid
>> epbs
,
1652 fail_sparse
, NULL
, parentp
);
1655 __dbuf_hold_impl_init(dh
+ 1, dn
, dh
->dh_level
+ 1,
1656 blkid
>> epbs
, fail_sparse
, NULL
,
1657 parentp
, dh
->dh_depth
+ 1);
1658 err
= __dbuf_hold_impl(dh
+ 1);
1662 err
= dbuf_read(*parentp
, NULL
,
1663 (DB_RF_HAVESTRUCT
| DB_RF_NOPREFETCH
| DB_RF_CANFAIL
));
1665 dbuf_rele(*parentp
, NULL
);
1669 *bpp
= ((blkptr_t
*)(*parentp
)->db
.db_data
) +
1670 (blkid
& ((1ULL << epbs
) - 1));
1673 /* the block is referenced from the dnode */
1674 ASSERT3U(level
, ==, nlevels
-1);
1675 ASSERT(dn
->dn_phys
->dn_nblkptr
== 0 ||
1676 blkid
< dn
->dn_phys
->dn_nblkptr
);
1678 dbuf_add_ref(dn
->dn_dbuf
, NULL
);
1679 *parentp
= dn
->dn_dbuf
;
1681 *bpp
= &dn
->dn_phys
->dn_blkptr
[blkid
];
1686 static dmu_buf_impl_t
*
1687 dbuf_create(dnode_t
*dn
, uint8_t level
, uint64_t blkid
,
1688 dmu_buf_impl_t
*parent
, blkptr_t
*blkptr
)
1690 objset_t
*os
= dn
->dn_objset
;
1691 dmu_buf_impl_t
*db
, *odb
;
1693 ASSERT(RW_LOCK_HELD(&dn
->dn_struct_rwlock
));
1694 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
1696 db
= kmem_cache_alloc(dbuf_cache
, KM_SLEEP
);
1699 db
->db
.db_object
= dn
->dn_object
;
1700 db
->db_level
= level
;
1701 db
->db_blkid
= blkid
;
1702 db
->db_last_dirty
= NULL
;
1703 db
->db_dirtycnt
= 0;
1704 db
->db_dnode_handle
= dn
->dn_handle
;
1705 db
->db_parent
= parent
;
1706 db
->db_blkptr
= blkptr
;
1708 db
->db_user_ptr
= NULL
;
1709 db
->db_user_data_ptr_ptr
= NULL
;
1710 db
->db_evict_func
= NULL
;
1711 db
->db_immediate_evict
= 0;
1712 db
->db_freed_in_flight
= 0;
1714 if (blkid
== DMU_BONUS_BLKID
) {
1715 ASSERT3P(parent
, ==, dn
->dn_dbuf
);
1716 db
->db
.db_size
= DN_MAX_BONUSLEN
-
1717 (dn
->dn_nblkptr
-1) * sizeof (blkptr_t
);
1718 ASSERT3U(db
->db
.db_size
, >=, dn
->dn_bonuslen
);
1719 db
->db
.db_offset
= DMU_BONUS_BLKID
;
1720 db
->db_state
= DB_UNCACHED
;
1721 /* the bonus dbuf is not placed in the hash table */
1722 arc_space_consume(sizeof (dmu_buf_impl_t
), ARC_SPACE_OTHER
);
1724 } else if (blkid
== DMU_SPILL_BLKID
) {
1725 db
->db
.db_size
= (blkptr
!= NULL
) ?
1726 BP_GET_LSIZE(blkptr
) : SPA_MINBLOCKSIZE
;
1727 db
->db
.db_offset
= 0;
1730 db
->db_level
? 1<<dn
->dn_indblkshift
: dn
->dn_datablksz
;
1731 db
->db
.db_size
= blocksize
;
1732 db
->db
.db_offset
= db
->db_blkid
* blocksize
;
1736 * Hold the dn_dbufs_mtx while we get the new dbuf
1737 * in the hash table *and* added to the dbufs list.
1738 * This prevents a possible deadlock with someone
1739 * trying to look up this dbuf before its added to the
1742 mutex_enter(&dn
->dn_dbufs_mtx
);
1743 db
->db_state
= DB_EVICTING
;
1744 if ((odb
= dbuf_hash_insert(db
)) != NULL
) {
1745 /* someone else inserted it first */
1746 kmem_cache_free(dbuf_cache
, db
);
1747 mutex_exit(&dn
->dn_dbufs_mtx
);
1750 list_insert_head(&dn
->dn_dbufs
, db
);
1751 db
->db_state
= DB_UNCACHED
;
1752 mutex_exit(&dn
->dn_dbufs_mtx
);
1753 arc_space_consume(sizeof (dmu_buf_impl_t
), ARC_SPACE_OTHER
);
1755 if (parent
&& parent
!= dn
->dn_dbuf
)
1756 dbuf_add_ref(parent
, db
);
1758 ASSERT(dn
->dn_object
== DMU_META_DNODE_OBJECT
||
1759 refcount_count(&dn
->dn_holds
) > 0);
1760 (void) refcount_add(&dn
->dn_holds
, db
);
1761 (void) atomic_inc_32_nv(&dn
->dn_dbufs_count
);
1763 dprintf_dbuf(db
, "db=%p\n", db
);
1769 dbuf_do_evict(void *private)
1771 arc_buf_t
*buf
= private;
1772 dmu_buf_impl_t
*db
= buf
->b_private
;
1774 if (!MUTEX_HELD(&db
->db_mtx
))
1775 mutex_enter(&db
->db_mtx
);
1777 ASSERT(refcount_is_zero(&db
->db_holds
));
1779 if (db
->db_state
!= DB_EVICTING
) {
1780 ASSERT(db
->db_state
== DB_CACHED
);
1785 mutex_exit(&db
->db_mtx
);
1792 dbuf_destroy(dmu_buf_impl_t
*db
)
1794 ASSERT(refcount_is_zero(&db
->db_holds
));
1796 if (db
->db_blkid
!= DMU_BONUS_BLKID
) {
1798 * If this dbuf is still on the dn_dbufs list,
1799 * remove it from that list.
1801 if (db
->db_dnode_handle
!= NULL
) {
1806 mutex_enter(&dn
->dn_dbufs_mtx
);
1807 list_remove(&dn
->dn_dbufs
, db
);
1808 (void) atomic_dec_32_nv(&dn
->dn_dbufs_count
);
1809 mutex_exit(&dn
->dn_dbufs_mtx
);
1812 * Decrementing the dbuf count means that the hold
1813 * corresponding to the removed dbuf is no longer
1814 * discounted in dnode_move(), so the dnode cannot be
1815 * moved until after we release the hold.
1818 db
->db_dnode_handle
= NULL
;
1820 dbuf_hash_remove(db
);
1822 db
->db_parent
= NULL
;
1825 ASSERT(!list_link_active(&db
->db_link
));
1826 ASSERT(db
->db
.db_data
== NULL
);
1827 ASSERT(db
->db_hash_next
== NULL
);
1828 ASSERT(db
->db_blkptr
== NULL
);
1829 ASSERT(db
->db_data_pending
== NULL
);
1831 kmem_cache_free(dbuf_cache
, db
);
1832 arc_space_return(sizeof (dmu_buf_impl_t
), ARC_SPACE_OTHER
);
1836 dbuf_prefetch(dnode_t
*dn
, uint64_t blkid
)
1838 dmu_buf_impl_t
*db
= NULL
;
1839 blkptr_t
*bp
= NULL
;
1841 ASSERT(blkid
!= DMU_BONUS_BLKID
);
1842 ASSERT(RW_LOCK_HELD(&dn
->dn_struct_rwlock
));
1844 if (dnode_block_freed(dn
, blkid
))
1847 /* dbuf_find() returns with db_mtx held */
1848 if ((db
= dbuf_find(dn
, 0, blkid
))) {
1850 * This dbuf is already in the cache. We assume that
1851 * it is already CACHED, or else about to be either
1854 mutex_exit(&db
->db_mtx
);
1858 if (dbuf_findbp(dn
, 0, blkid
, TRUE
, &db
, &bp
, NULL
) == 0) {
1859 if (bp
&& !BP_IS_HOLE(bp
)) {
1860 int priority
= dn
->dn_type
== DMU_OT_DDT_ZAP
?
1861 ZIO_PRIORITY_DDT_PREFETCH
: ZIO_PRIORITY_ASYNC_READ
;
1863 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
1864 uint32_t aflags
= ARC_NOWAIT
| ARC_PREFETCH
;
1867 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
1868 dn
->dn_object
, 0, blkid
);
1873 pbuf
= dn
->dn_objset
->os_phys_buf
;
1875 (void) dsl_read(NULL
, dn
->dn_objset
->os_spa
,
1876 bp
, pbuf
, NULL
, NULL
, priority
,
1877 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
,
1881 dbuf_rele(db
, NULL
);
1885 #define DBUF_HOLD_IMPL_MAX_DEPTH 20
1888 * Returns with db_holds incremented, and db_mtx not held.
1889 * Note: dn_struct_rwlock must be held.
1892 __dbuf_hold_impl(struct dbuf_hold_impl_data
*dh
)
1894 ASSERT3S(dh
->dh_depth
, <, DBUF_HOLD_IMPL_MAX_DEPTH
);
1895 dh
->dh_parent
= NULL
;
1897 ASSERT(dh
->dh_blkid
!= DMU_BONUS_BLKID
);
1898 ASSERT(RW_LOCK_HELD(&dh
->dh_dn
->dn_struct_rwlock
));
1899 ASSERT3U(dh
->dh_dn
->dn_nlevels
, >, dh
->dh_level
);
1901 *(dh
->dh_dbp
) = NULL
;
1903 /* dbuf_find() returns with db_mtx held */
1904 dh
->dh_db
= dbuf_find(dh
->dh_dn
, dh
->dh_level
, dh
->dh_blkid
);
1906 if (dh
->dh_db
== NULL
) {
1909 ASSERT3P(dh
->dh_parent
, ==, NULL
);
1910 dh
->dh_err
= dbuf_findbp(dh
->dh_dn
, dh
->dh_level
, dh
->dh_blkid
,
1911 dh
->dh_fail_sparse
, &dh
->dh_parent
,
1913 if (dh
->dh_fail_sparse
) {
1914 if (dh
->dh_err
== 0 && dh
->dh_bp
&& BP_IS_HOLE(dh
->dh_bp
))
1915 dh
->dh_err
= ENOENT
;
1918 dbuf_rele(dh
->dh_parent
, NULL
);
1919 return (dh
->dh_err
);
1922 if (dh
->dh_err
&& dh
->dh_err
!= ENOENT
)
1923 return (dh
->dh_err
);
1924 dh
->dh_db
= dbuf_create(dh
->dh_dn
, dh
->dh_level
, dh
->dh_blkid
,
1925 dh
->dh_parent
, dh
->dh_bp
);
1928 if (dh
->dh_db
->db_buf
&& refcount_is_zero(&dh
->dh_db
->db_holds
)) {
1929 arc_buf_add_ref(dh
->dh_db
->db_buf
, dh
->dh_db
);
1930 if (dh
->dh_db
->db_buf
->b_data
== NULL
) {
1931 dbuf_clear(dh
->dh_db
);
1932 if (dh
->dh_parent
) {
1933 dbuf_rele(dh
->dh_parent
, NULL
);
1934 dh
->dh_parent
= NULL
;
1938 ASSERT3P(dh
->dh_db
->db
.db_data
, ==, dh
->dh_db
->db_buf
->b_data
);
1941 ASSERT(dh
->dh_db
->db_buf
== NULL
|| arc_referenced(dh
->dh_db
->db_buf
));
1944 * If this buffer is currently syncing out, and we are are
1945 * still referencing it from db_data, we need to make a copy
1946 * of it in case we decide we want to dirty it again in this txg.
1948 if (dh
->dh_db
->db_level
== 0 &&
1949 dh
->dh_db
->db_blkid
!= DMU_BONUS_BLKID
&&
1950 dh
->dh_dn
->dn_object
!= DMU_META_DNODE_OBJECT
&&
1951 dh
->dh_db
->db_state
== DB_CACHED
&& dh
->dh_db
->db_data_pending
) {
1952 dh
->dh_dr
= dh
->dh_db
->db_data_pending
;
1954 if (dh
->dh_dr
->dt
.dl
.dr_data
== dh
->dh_db
->db_buf
) {
1955 dh
->dh_type
= DBUF_GET_BUFC_TYPE(dh
->dh_db
);
1957 dbuf_set_data(dh
->dh_db
,
1958 arc_buf_alloc(dh
->dh_dn
->dn_objset
->os_spa
,
1959 dh
->dh_db
->db
.db_size
, dh
->dh_db
, dh
->dh_type
));
1960 bcopy(dh
->dh_dr
->dt
.dl
.dr_data
->b_data
,
1961 dh
->dh_db
->db
.db_data
, dh
->dh_db
->db
.db_size
);
1965 (void) refcount_add(&dh
->dh_db
->db_holds
, dh
->dh_tag
);
1966 dbuf_update_data(dh
->dh_db
);
1967 DBUF_VERIFY(dh
->dh_db
);
1968 mutex_exit(&dh
->dh_db
->db_mtx
);
1970 /* NOTE: we can't rele the parent until after we drop the db_mtx */
1972 dbuf_rele(dh
->dh_parent
, NULL
);
1974 ASSERT3P(DB_DNODE(dh
->dh_db
), ==, dh
->dh_dn
);
1975 ASSERT3U(dh
->dh_db
->db_blkid
, ==, dh
->dh_blkid
);
1976 ASSERT3U(dh
->dh_db
->db_level
, ==, dh
->dh_level
);
1977 *(dh
->dh_dbp
) = dh
->dh_db
;
1983 * The following code preserves the recursive function dbuf_hold_impl()
1984 * but moves the local variables AND function arguments to the heap to
1985 * minimize the stack frame size. Enough space is initially allocated
1986 * on the stack for 20 levels of recursion.
1989 dbuf_hold_impl(dnode_t
*dn
, uint8_t level
, uint64_t blkid
, int fail_sparse
,
1990 void *tag
, dmu_buf_impl_t
**dbp
)
1992 struct dbuf_hold_impl_data
*dh
;
1995 dh
= kmem_zalloc(sizeof(struct dbuf_hold_impl_data
) *
1996 DBUF_HOLD_IMPL_MAX_DEPTH
, KM_SLEEP
);
1997 __dbuf_hold_impl_init(dh
, dn
, level
, blkid
, fail_sparse
, tag
, dbp
, 0);
1999 error
= __dbuf_hold_impl(dh
);
2001 kmem_free(dh
, sizeof(struct dbuf_hold_impl_data
) *
2002 DBUF_HOLD_IMPL_MAX_DEPTH
);
2008 __dbuf_hold_impl_init(struct dbuf_hold_impl_data
*dh
,
2009 dnode_t
*dn
, uint8_t level
, uint64_t blkid
, int fail_sparse
,
2010 void *tag
, dmu_buf_impl_t
**dbp
, int depth
)
2013 dh
->dh_level
= level
;
2014 dh
->dh_blkid
= blkid
;
2015 dh
->dh_fail_sparse
= fail_sparse
;
2018 dh
->dh_depth
= depth
;
2022 dbuf_hold(dnode_t
*dn
, uint64_t blkid
, void *tag
)
2025 int err
= dbuf_hold_impl(dn
, 0, blkid
, FALSE
, tag
, &db
);
2026 return (err
? NULL
: db
);
2030 dbuf_hold_level(dnode_t
*dn
, int level
, uint64_t blkid
, void *tag
)
2033 int err
= dbuf_hold_impl(dn
, level
, blkid
, FALSE
, tag
, &db
);
2034 return (err
? NULL
: db
);
2038 dbuf_create_bonus(dnode_t
*dn
)
2040 ASSERT(RW_WRITE_HELD(&dn
->dn_struct_rwlock
));
2042 ASSERT(dn
->dn_bonus
== NULL
);
2043 dn
->dn_bonus
= dbuf_create(dn
, 0, DMU_BONUS_BLKID
, dn
->dn_dbuf
, NULL
);
2047 dbuf_spill_set_blksz(dmu_buf_t
*db_fake
, uint64_t blksz
, dmu_tx_t
*tx
)
2049 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
2052 if (db
->db_blkid
!= DMU_SPILL_BLKID
)
2055 blksz
= SPA_MINBLOCKSIZE
;
2056 if (blksz
> SPA_MAXBLOCKSIZE
)
2057 blksz
= SPA_MAXBLOCKSIZE
;
2059 blksz
= P2ROUNDUP(blksz
, SPA_MINBLOCKSIZE
);
2063 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
2064 dbuf_new_size(db
, blksz
, tx
);
2065 rw_exit(&dn
->dn_struct_rwlock
);
2072 dbuf_rm_spill(dnode_t
*dn
, dmu_tx_t
*tx
)
2074 dbuf_free_range(dn
, DMU_SPILL_BLKID
, DMU_SPILL_BLKID
, tx
);
2077 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2079 dbuf_add_ref(dmu_buf_impl_t
*db
, void *tag
)
2081 VERIFY(refcount_add(&db
->db_holds
, tag
) > 1);
2085 * If you call dbuf_rele() you had better not be referencing the dnode handle
2086 * unless you have some other direct or indirect hold on the dnode. (An indirect
2087 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2088 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2089 * dnode's parent dbuf evicting its dnode handles.
2091 #pragma weak dmu_buf_rele = dbuf_rele
2093 dbuf_rele(dmu_buf_impl_t
*db
, void *tag
)
2095 mutex_enter(&db
->db_mtx
);
2096 dbuf_rele_and_unlock(db
, tag
);
2100 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2101 * db_dirtycnt and db_holds to be updated atomically.
2104 dbuf_rele_and_unlock(dmu_buf_impl_t
*db
, void *tag
)
2108 ASSERT(MUTEX_HELD(&db
->db_mtx
));
2112 * Remove the reference to the dbuf before removing its hold on the
2113 * dnode so we can guarantee in dnode_move() that a referenced bonus
2114 * buffer has a corresponding dnode hold.
2116 holds
= refcount_remove(&db
->db_holds
, tag
);
2120 * We can't freeze indirects if there is a possibility that they
2121 * may be modified in the current syncing context.
2123 if (db
->db_buf
&& holds
== (db
->db_level
== 0 ? db
->db_dirtycnt
: 0))
2124 arc_buf_freeze(db
->db_buf
);
2126 if (holds
== db
->db_dirtycnt
&&
2127 db
->db_level
== 0 && db
->db_immediate_evict
)
2128 dbuf_evict_user(db
);
2131 if (db
->db_blkid
== DMU_BONUS_BLKID
) {
2132 mutex_exit(&db
->db_mtx
);
2135 * If the dnode moves here, we cannot cross this barrier
2136 * until the move completes.
2139 (void) atomic_dec_32_nv(&DB_DNODE(db
)->dn_dbufs_count
);
2142 * The bonus buffer's dnode hold is no longer discounted
2143 * in dnode_move(). The dnode cannot move until after
2146 dnode_rele(DB_DNODE(db
), db
);
2147 } else if (db
->db_buf
== NULL
) {
2149 * This is a special case: we never associated this
2150 * dbuf with any data allocated from the ARC.
2152 ASSERT(db
->db_state
== DB_UNCACHED
||
2153 db
->db_state
== DB_NOFILL
);
2155 } else if (arc_released(db
->db_buf
)) {
2156 arc_buf_t
*buf
= db
->db_buf
;
2158 * This dbuf has anonymous data associated with it.
2160 dbuf_set_data(db
, NULL
);
2161 VERIFY(arc_buf_remove_ref(buf
, db
) == 1);
2164 VERIFY(arc_buf_remove_ref(db
->db_buf
, db
) == 0);
2165 if (!DBUF_IS_CACHEABLE(db
))
2168 mutex_exit(&db
->db_mtx
);
2171 mutex_exit(&db
->db_mtx
);
2175 #pragma weak dmu_buf_refcount = dbuf_refcount
2177 dbuf_refcount(dmu_buf_impl_t
*db
)
2179 return (refcount_count(&db
->db_holds
));
2183 dmu_buf_set_user(dmu_buf_t
*db_fake
, void *user_ptr
, void *user_data_ptr_ptr
,
2184 dmu_buf_evict_func_t
*evict_func
)
2186 return (dmu_buf_update_user(db_fake
, NULL
, user_ptr
,
2187 user_data_ptr_ptr
, evict_func
));
2191 dmu_buf_set_user_ie(dmu_buf_t
*db_fake
, void *user_ptr
, void *user_data_ptr_ptr
,
2192 dmu_buf_evict_func_t
*evict_func
)
2194 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
2196 db
->db_immediate_evict
= TRUE
;
2197 return (dmu_buf_update_user(db_fake
, NULL
, user_ptr
,
2198 user_data_ptr_ptr
, evict_func
));
2202 dmu_buf_update_user(dmu_buf_t
*db_fake
, void *old_user_ptr
, void *user_ptr
,
2203 void *user_data_ptr_ptr
, dmu_buf_evict_func_t
*evict_func
)
2205 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
2206 ASSERT(db
->db_level
== 0);
2208 ASSERT((user_ptr
== NULL
) == (evict_func
== NULL
));
2210 mutex_enter(&db
->db_mtx
);
2212 if (db
->db_user_ptr
== old_user_ptr
) {
2213 db
->db_user_ptr
= user_ptr
;
2214 db
->db_user_data_ptr_ptr
= user_data_ptr_ptr
;
2215 db
->db_evict_func
= evict_func
;
2217 dbuf_update_data(db
);
2219 old_user_ptr
= db
->db_user_ptr
;
2222 mutex_exit(&db
->db_mtx
);
2223 return (old_user_ptr
);
2227 dmu_buf_get_user(dmu_buf_t
*db_fake
)
2229 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
2230 ASSERT(!refcount_is_zero(&db
->db_holds
));
2232 return (db
->db_user_ptr
);
2236 dmu_buf_freeable(dmu_buf_t
*dbuf
)
2238 boolean_t res
= B_FALSE
;
2239 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)dbuf
;
2242 res
= dsl_dataset_block_freeable(db
->db_objset
->os_dsl_dataset
,
2243 db
->db_blkptr
, db
->db_blkptr
->blk_birth
);
2249 dbuf_check_blkptr(dnode_t
*dn
, dmu_buf_impl_t
*db
)
2251 /* ASSERT(dmu_tx_is_syncing(tx) */
2252 ASSERT(MUTEX_HELD(&db
->db_mtx
));
2254 if (db
->db_blkptr
!= NULL
)
2257 if (db
->db_blkid
== DMU_SPILL_BLKID
) {
2258 db
->db_blkptr
= &dn
->dn_phys
->dn_spill
;
2259 BP_ZERO(db
->db_blkptr
);
2262 if (db
->db_level
== dn
->dn_phys
->dn_nlevels
-1) {
2264 * This buffer was allocated at a time when there was
2265 * no available blkptrs from the dnode, or it was
2266 * inappropriate to hook it in (i.e., nlevels mis-match).
2268 ASSERT(db
->db_blkid
< dn
->dn_phys
->dn_nblkptr
);
2269 ASSERT(db
->db_parent
== NULL
);
2270 db
->db_parent
= dn
->dn_dbuf
;
2271 db
->db_blkptr
= &dn
->dn_phys
->dn_blkptr
[db
->db_blkid
];
2274 dmu_buf_impl_t
*parent
= db
->db_parent
;
2275 int epbs
= dn
->dn_phys
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
2277 ASSERT(dn
->dn_phys
->dn_nlevels
> 1);
2278 if (parent
== NULL
) {
2279 mutex_exit(&db
->db_mtx
);
2280 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
2281 (void) dbuf_hold_impl(dn
, db
->db_level
+1,
2282 db
->db_blkid
>> epbs
, FALSE
, db
, &parent
);
2283 rw_exit(&dn
->dn_struct_rwlock
);
2284 mutex_enter(&db
->db_mtx
);
2285 db
->db_parent
= parent
;
2287 db
->db_blkptr
= (blkptr_t
*)parent
->db
.db_data
+
2288 (db
->db_blkid
& ((1ULL << epbs
) - 1));
2294 dbuf_sync_indirect(dbuf_dirty_record_t
*dr
, dmu_tx_t
*tx
)
2296 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
2300 ASSERT(dmu_tx_is_syncing(tx
));
2302 dprintf_dbuf_bp(db
, db
->db_blkptr
, "blkptr=%p", db
->db_blkptr
);
2304 mutex_enter(&db
->db_mtx
);
2306 ASSERT(db
->db_level
> 0);
2309 if (db
->db_buf
== NULL
) {
2310 mutex_exit(&db
->db_mtx
);
2311 (void) dbuf_read(db
, NULL
, DB_RF_MUST_SUCCEED
);
2312 mutex_enter(&db
->db_mtx
);
2314 ASSERT3U(db
->db_state
, ==, DB_CACHED
);
2315 ASSERT(db
->db_buf
!= NULL
);
2319 ASSERT3U(db
->db
.db_size
, ==, 1<<dn
->dn_phys
->dn_indblkshift
);
2320 dbuf_check_blkptr(dn
, db
);
2323 db
->db_data_pending
= dr
;
2325 mutex_exit(&db
->db_mtx
);
2326 dbuf_write(dr
, db
->db_buf
, tx
);
2329 mutex_enter(&dr
->dt
.di
.dr_mtx
);
2330 dbuf_sync_list(&dr
->dt
.di
.dr_children
, tx
);
2331 ASSERT(list_head(&dr
->dt
.di
.dr_children
) == NULL
);
2332 mutex_exit(&dr
->dt
.di
.dr_mtx
);
2337 dbuf_sync_leaf(dbuf_dirty_record_t
*dr
, dmu_tx_t
*tx
)
2339 arc_buf_t
**datap
= &dr
->dt
.dl
.dr_data
;
2340 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
2343 uint64_t txg
= tx
->tx_txg
;
2345 ASSERT(dmu_tx_is_syncing(tx
));
2347 dprintf_dbuf_bp(db
, db
->db_blkptr
, "blkptr=%p", db
->db_blkptr
);
2349 mutex_enter(&db
->db_mtx
);
2351 * To be synced, we must be dirtied. But we
2352 * might have been freed after the dirty.
2354 if (db
->db_state
== DB_UNCACHED
) {
2355 /* This buffer has been freed since it was dirtied */
2356 ASSERT(db
->db
.db_data
== NULL
);
2357 } else if (db
->db_state
== DB_FILL
) {
2358 /* This buffer was freed and is now being re-filled */
2359 ASSERT(db
->db
.db_data
!= dr
->dt
.dl
.dr_data
);
2361 ASSERT(db
->db_state
== DB_CACHED
|| db
->db_state
== DB_NOFILL
);
2368 if (db
->db_blkid
== DMU_SPILL_BLKID
) {
2369 mutex_enter(&dn
->dn_mtx
);
2370 dn
->dn_phys
->dn_flags
|= DNODE_FLAG_SPILL_BLKPTR
;
2371 mutex_exit(&dn
->dn_mtx
);
2375 * If this is a bonus buffer, simply copy the bonus data into the
2376 * dnode. It will be written out when the dnode is synced (and it
2377 * will be synced, since it must have been dirty for dbuf_sync to
2380 if (db
->db_blkid
== DMU_BONUS_BLKID
) {
2381 dbuf_dirty_record_t
**drp
;
2383 ASSERT(*datap
!= NULL
);
2384 ASSERT3U(db
->db_level
, ==, 0);
2385 ASSERT3U(dn
->dn_phys
->dn_bonuslen
, <=, DN_MAX_BONUSLEN
);
2386 bcopy(*datap
, DN_BONUS(dn
->dn_phys
), dn
->dn_phys
->dn_bonuslen
);
2389 if (*datap
!= db
->db
.db_data
) {
2390 zio_buf_free(*datap
, DN_MAX_BONUSLEN
);
2391 arc_space_return(DN_MAX_BONUSLEN
, ARC_SPACE_OTHER
);
2393 db
->db_data_pending
= NULL
;
2394 drp
= &db
->db_last_dirty
;
2396 drp
= &(*drp
)->dr_next
;
2397 ASSERT(dr
->dr_next
== NULL
);
2398 ASSERT(dr
->dr_dbuf
== db
);
2400 if (dr
->dr_dbuf
->db_level
!= 0) {
2401 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
2402 list_destroy(&dr
->dt
.di
.dr_children
);
2404 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
2405 ASSERT(db
->db_dirtycnt
> 0);
2406 db
->db_dirtycnt
-= 1;
2407 dbuf_rele_and_unlock(db
, (void *)(uintptr_t)txg
);
2414 * This function may have dropped the db_mtx lock allowing a dmu_sync
2415 * operation to sneak in. As a result, we need to ensure that we
2416 * don't check the dr_override_state until we have returned from
2417 * dbuf_check_blkptr.
2419 dbuf_check_blkptr(dn
, db
);
2422 * If this buffer is in the middle of an immediate write,
2423 * wait for the synchronous IO to complete.
2425 while (dr
->dt
.dl
.dr_override_state
== DR_IN_DMU_SYNC
) {
2426 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
2427 cv_wait(&db
->db_changed
, &db
->db_mtx
);
2428 ASSERT(dr
->dt
.dl
.dr_override_state
!= DR_NOT_OVERRIDDEN
);
2431 if (db
->db_state
!= DB_NOFILL
&&
2432 dn
->dn_object
!= DMU_META_DNODE_OBJECT
&&
2433 refcount_count(&db
->db_holds
) > 1 &&
2434 dr
->dt
.dl
.dr_override_state
!= DR_OVERRIDDEN
&&
2435 *datap
== db
->db_buf
) {
2437 * If this buffer is currently "in use" (i.e., there
2438 * are active holds and db_data still references it),
2439 * then make a copy before we start the write so that
2440 * any modifications from the open txg will not leak
2443 * NOTE: this copy does not need to be made for
2444 * objects only modified in the syncing context (e.g.
2445 * DNONE_DNODE blocks).
2447 int blksz
= arc_buf_size(*datap
);
2448 arc_buf_contents_t type
= DBUF_GET_BUFC_TYPE(db
);
2449 *datap
= arc_buf_alloc(os
->os_spa
, blksz
, db
, type
);
2450 bcopy(db
->db
.db_data
, (*datap
)->b_data
, blksz
);
2452 db
->db_data_pending
= dr
;
2454 mutex_exit(&db
->db_mtx
);
2456 dbuf_write(dr
, *datap
, tx
);
2458 ASSERT(!list_link_active(&dr
->dr_dirty_node
));
2459 if (dn
->dn_object
== DMU_META_DNODE_OBJECT
) {
2460 list_insert_tail(&dn
->dn_dirty_records
[txg
&TXG_MASK
], dr
);
2464 * Although zio_nowait() does not "wait for an IO", it does
2465 * initiate the IO. If this is an empty write it seems plausible
2466 * that the IO could actually be completed before the nowait
2467 * returns. We need to DB_DNODE_EXIT() first in case
2468 * zio_nowait() invalidates the dbuf.
2471 zio_nowait(dr
->dr_zio
);
2476 dbuf_sync_list(list_t
*list
, dmu_tx_t
*tx
)
2478 dbuf_dirty_record_t
*dr
;
2480 while ((dr
= list_head(list
))) {
2481 if (dr
->dr_zio
!= NULL
) {
2483 * If we find an already initialized zio then we
2484 * are processing the meta-dnode, and we have finished.
2485 * The dbufs for all dnodes are put back on the list
2486 * during processing, so that we can zio_wait()
2487 * these IOs after initiating all child IOs.
2489 ASSERT3U(dr
->dr_dbuf
->db
.db_object
, ==,
2490 DMU_META_DNODE_OBJECT
);
2493 list_remove(list
, dr
);
2494 if (dr
->dr_dbuf
->db_level
> 0)
2495 dbuf_sync_indirect(dr
, tx
);
2497 dbuf_sync_leaf(dr
, tx
);
2503 dbuf_write_ready(zio_t
*zio
, arc_buf_t
*buf
, void *vdb
)
2505 dmu_buf_impl_t
*db
= vdb
;
2507 blkptr_t
*bp
= zio
->io_bp
;
2508 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
2509 spa_t
*spa
= zio
->io_spa
;
2514 ASSERT(db
->db_blkptr
== bp
);
2518 delta
= bp_get_dsize_sync(spa
, bp
) - bp_get_dsize_sync(spa
, bp_orig
);
2519 dnode_diduse_space(dn
, delta
- zio
->io_prev_space_delta
);
2520 zio
->io_prev_space_delta
= delta
;
2522 if (BP_IS_HOLE(bp
)) {
2523 ASSERT(bp
->blk_fill
== 0);
2528 ASSERT((db
->db_blkid
!= DMU_SPILL_BLKID
&&
2529 BP_GET_TYPE(bp
) == dn
->dn_type
) ||
2530 (db
->db_blkid
== DMU_SPILL_BLKID
&&
2531 BP_GET_TYPE(bp
) == dn
->dn_bonustype
));
2532 ASSERT(BP_GET_LEVEL(bp
) == db
->db_level
);
2534 mutex_enter(&db
->db_mtx
);
2537 if (db
->db_blkid
== DMU_SPILL_BLKID
) {
2538 ASSERT(dn
->dn_phys
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
);
2539 ASSERT(!(BP_IS_HOLE(db
->db_blkptr
)) &&
2540 db
->db_blkptr
== &dn
->dn_phys
->dn_spill
);
2544 if (db
->db_level
== 0) {
2545 mutex_enter(&dn
->dn_mtx
);
2546 if (db
->db_blkid
> dn
->dn_phys
->dn_maxblkid
&&
2547 db
->db_blkid
!= DMU_SPILL_BLKID
)
2548 dn
->dn_phys
->dn_maxblkid
= db
->db_blkid
;
2549 mutex_exit(&dn
->dn_mtx
);
2551 if (dn
->dn_type
== DMU_OT_DNODE
) {
2552 dnode_phys_t
*dnp
= db
->db
.db_data
;
2553 for (i
= db
->db
.db_size
>> DNODE_SHIFT
; i
> 0;
2555 if (dnp
->dn_type
!= DMU_OT_NONE
)
2562 blkptr_t
*ibp
= db
->db
.db_data
;
2563 ASSERT3U(db
->db
.db_size
, ==, 1<<dn
->dn_phys
->dn_indblkshift
);
2564 for (i
= db
->db
.db_size
>> SPA_BLKPTRSHIFT
; i
> 0; i
--, ibp
++) {
2565 if (BP_IS_HOLE(ibp
))
2567 fill
+= ibp
->blk_fill
;
2572 bp
->blk_fill
= fill
;
2574 mutex_exit(&db
->db_mtx
);
2579 dbuf_write_done(zio_t
*zio
, arc_buf_t
*buf
, void *vdb
)
2581 dmu_buf_impl_t
*db
= vdb
;
2582 blkptr_t
*bp
= zio
->io_bp
;
2583 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
2584 uint64_t txg
= zio
->io_txg
;
2585 dbuf_dirty_record_t
**drp
, *dr
;
2587 ASSERT3U(zio
->io_error
, ==, 0);
2588 ASSERT(db
->db_blkptr
== bp
);
2590 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
2591 ASSERT(BP_EQUAL(bp
, bp_orig
));
2597 DB_GET_OBJSET(&os
, db
);
2598 ds
= os
->os_dsl_dataset
;
2601 (void) dsl_dataset_block_kill(ds
, bp_orig
, tx
, B_TRUE
);
2602 dsl_dataset_block_born(ds
, bp
, tx
);
2605 mutex_enter(&db
->db_mtx
);
2609 drp
= &db
->db_last_dirty
;
2610 while ((dr
= *drp
) != db
->db_data_pending
)
2612 ASSERT(!list_link_active(&dr
->dr_dirty_node
));
2613 ASSERT(dr
->dr_txg
== txg
);
2614 ASSERT(dr
->dr_dbuf
== db
);
2615 ASSERT(dr
->dr_next
== NULL
);
2619 if (db
->db_blkid
== DMU_SPILL_BLKID
) {
2624 ASSERT(dn
->dn_phys
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
);
2625 ASSERT(!(BP_IS_HOLE(db
->db_blkptr
)) &&
2626 db
->db_blkptr
== &dn
->dn_phys
->dn_spill
);
2631 if (db
->db_level
== 0) {
2632 ASSERT(db
->db_blkid
!= DMU_BONUS_BLKID
);
2633 ASSERT(dr
->dt
.dl
.dr_override_state
== DR_NOT_OVERRIDDEN
);
2634 if (db
->db_state
!= DB_NOFILL
) {
2635 if (dr
->dt
.dl
.dr_data
!= db
->db_buf
)
2636 VERIFY(arc_buf_remove_ref(dr
->dt
.dl
.dr_data
,
2638 else if (!arc_released(db
->db_buf
))
2639 arc_set_callback(db
->db_buf
, dbuf_do_evict
, db
);
2646 ASSERT(list_head(&dr
->dt
.di
.dr_children
) == NULL
);
2647 ASSERT3U(db
->db
.db_size
, ==, 1<<dn
->dn_phys
->dn_indblkshift
);
2648 if (!BP_IS_HOLE(db
->db_blkptr
)) {
2649 ASSERTV(int epbs
= dn
->dn_phys
->dn_indblkshift
-
2651 ASSERT3U(BP_GET_LSIZE(db
->db_blkptr
), ==,
2653 ASSERT3U(dn
->dn_phys
->dn_maxblkid
2654 >> (db
->db_level
* epbs
), >=, db
->db_blkid
);
2655 arc_set_callback(db
->db_buf
, dbuf_do_evict
, db
);
2658 mutex_destroy(&dr
->dt
.di
.dr_mtx
);
2659 list_destroy(&dr
->dt
.di
.dr_children
);
2661 kmem_free(dr
, sizeof (dbuf_dirty_record_t
));
2663 cv_broadcast(&db
->db_changed
);
2664 ASSERT(db
->db_dirtycnt
> 0);
2665 db
->db_dirtycnt
-= 1;
2666 db
->db_data_pending
= NULL
;
2667 dbuf_rele_and_unlock(db
, (void *)(uintptr_t)txg
);
2671 dbuf_write_nofill_ready(zio_t
*zio
)
2673 dbuf_write_ready(zio
, NULL
, zio
->io_private
);
2677 dbuf_write_nofill_done(zio_t
*zio
)
2679 dbuf_write_done(zio
, NULL
, zio
->io_private
);
2683 dbuf_write_override_ready(zio_t
*zio
)
2685 dbuf_dirty_record_t
*dr
= zio
->io_private
;
2686 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
2688 dbuf_write_ready(zio
, NULL
, db
);
2692 dbuf_write_override_done(zio_t
*zio
)
2694 dbuf_dirty_record_t
*dr
= zio
->io_private
;
2695 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
2696 blkptr_t
*obp
= &dr
->dt
.dl
.dr_overridden_by
;
2698 mutex_enter(&db
->db_mtx
);
2699 if (!BP_EQUAL(zio
->io_bp
, obp
)) {
2700 if (!BP_IS_HOLE(obp
))
2701 dsl_free(spa_get_dsl(zio
->io_spa
), zio
->io_txg
, obp
);
2702 arc_release(dr
->dt
.dl
.dr_data
, db
);
2704 mutex_exit(&db
->db_mtx
);
2706 dbuf_write_done(zio
, NULL
, db
);
2710 dbuf_write(dbuf_dirty_record_t
*dr
, arc_buf_t
*data
, dmu_tx_t
*tx
)
2712 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
2715 dmu_buf_impl_t
*parent
= db
->db_parent
;
2716 uint64_t txg
= tx
->tx_txg
;
2726 if (db
->db_state
!= DB_NOFILL
) {
2727 if (db
->db_level
> 0 || dn
->dn_type
== DMU_OT_DNODE
) {
2729 * Private object buffers are released here rather
2730 * than in dbuf_dirty() since they are only modified
2731 * in the syncing context and we don't want the
2732 * overhead of making multiple copies of the data.
2734 if (BP_IS_HOLE(db
->db_blkptr
)) {
2737 dbuf_release_bp(db
);
2742 if (parent
!= dn
->dn_dbuf
) {
2743 ASSERT(parent
&& parent
->db_data_pending
);
2744 ASSERT(db
->db_level
== parent
->db_level
-1);
2745 ASSERT(arc_released(parent
->db_buf
));
2746 zio
= parent
->db_data_pending
->dr_zio
;
2748 ASSERT((db
->db_level
== dn
->dn_phys
->dn_nlevels
-1 &&
2749 db
->db_blkid
!= DMU_SPILL_BLKID
) ||
2750 (db
->db_blkid
== DMU_SPILL_BLKID
&& db
->db_level
== 0));
2751 if (db
->db_blkid
!= DMU_SPILL_BLKID
)
2752 ASSERT3P(db
->db_blkptr
, ==,
2753 &dn
->dn_phys
->dn_blkptr
[db
->db_blkid
]);
2757 ASSERT(db
->db_level
== 0 || data
== db
->db_buf
);
2758 ASSERT3U(db
->db_blkptr
->blk_birth
, <=, txg
);
2761 SET_BOOKMARK(&zb
, os
->os_dsl_dataset
?
2762 os
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
2763 db
->db
.db_object
, db
->db_level
, db
->db_blkid
);
2765 if (db
->db_blkid
== DMU_SPILL_BLKID
)
2767 wp_flag
|= (db
->db_state
== DB_NOFILL
) ? WP_NOFILL
: 0;
2769 dmu_write_policy(os
, dn
, db
->db_level
, wp_flag
, &zp
);
2772 if (db
->db_level
== 0 && dr
->dt
.dl
.dr_override_state
== DR_OVERRIDDEN
) {
2773 ASSERT(db
->db_state
!= DB_NOFILL
);
2774 dr
->dr_zio
= zio_write(zio
, os
->os_spa
, txg
,
2775 db
->db_blkptr
, data
->b_data
, arc_buf_size(data
), &zp
,
2776 dbuf_write_override_ready
, dbuf_write_override_done
, dr
,
2777 ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_MUSTSUCCEED
, &zb
);
2778 mutex_enter(&db
->db_mtx
);
2779 dr
->dt
.dl
.dr_override_state
= DR_NOT_OVERRIDDEN
;
2780 zio_write_override(dr
->dr_zio
, &dr
->dt
.dl
.dr_overridden_by
,
2781 dr
->dt
.dl
.dr_copies
);
2782 mutex_exit(&db
->db_mtx
);
2783 } else if (db
->db_state
== DB_NOFILL
) {
2784 ASSERT(zp
.zp_checksum
== ZIO_CHECKSUM_OFF
);
2785 dr
->dr_zio
= zio_write(zio
, os
->os_spa
, txg
,
2786 db
->db_blkptr
, NULL
, db
->db
.db_size
, &zp
,
2787 dbuf_write_nofill_ready
, dbuf_write_nofill_done
, db
,
2788 ZIO_PRIORITY_ASYNC_WRITE
,
2789 ZIO_FLAG_MUSTSUCCEED
| ZIO_FLAG_NODATA
, &zb
);
2791 ASSERT(arc_released(data
));
2792 dr
->dr_zio
= arc_write(zio
, os
->os_spa
, txg
,
2793 db
->db_blkptr
, data
, DBUF_IS_L2CACHEABLE(db
), &zp
,
2794 dbuf_write_ready
, dbuf_write_done
, db
,
2795 ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_MUSTSUCCEED
, &zb
);