4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
28 * Copyright (c) 2019 Datto Inc.
29 * Copyright (c) 2019, Klara Inc.
30 * Copyright (c) 2019, Allan Jude
34 #include <sys/dmu_impl.h>
35 #include <sys/dmu_tx.h>
37 #include <sys/dnode.h>
38 #include <sys/zfs_context.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/dmu_traverse.h>
41 #include <sys/dsl_dataset.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_synctask.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/dmu_zfetch.h>
47 #include <sys/zfs_ioctl.h>
49 #include <sys/zio_checksum.h>
50 #include <sys/zio_compress.h>
52 #include <sys/zfeature.h>
54 #include <sys/trace_zfs.h>
55 #include <sys/zfs_racct.h>
56 #include <sys/zfs_rlock.h>
58 #include <sys/vmsystm.h>
59 #include <sys/zfs_znode.h>
63 * Enable/disable nopwrite feature.
65 static int zfs_nopwrite_enabled
= 1;
68 * Tunable to control percentage of dirtied L1 blocks from frees allowed into
69 * one TXG. After this threshold is crossed, additional dirty blocks from frees
70 * will wait until the next TXG.
71 * A value of zero will disable this throttle.
73 static unsigned long zfs_per_txg_dirty_frees_percent
= 5;
76 * Enable/disable forcing txg sync when dirty checking for holes with lseek().
77 * By default this is enabled to ensure accurate hole reporting, it can result
78 * in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads.
79 * Disabling this option will result in holes never being reported in dirty
80 * files which is always safe.
82 static int zfs_dmu_offset_next_sync
= 1;
85 * Limit the amount we can prefetch with one call to this amount. This
86 * helps to limit the amount of memory that can be used by prefetching.
87 * Larger objects should be prefetched a bit at a time.
89 static int dmu_prefetch_max
= 8 * SPA_MAXBLOCKSIZE
;
91 const dmu_object_type_info_t dmu_ot
[DMU_OT_NUMTYPES
] = {
92 {DMU_BSWAP_UINT8
, TRUE
, FALSE
, FALSE
, "unallocated" },
93 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "object directory" },
94 {DMU_BSWAP_UINT64
, TRUE
, TRUE
, FALSE
, "object array" },
95 {DMU_BSWAP_UINT8
, TRUE
, FALSE
, FALSE
, "packed nvlist" },
96 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "packed nvlist size" },
97 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "bpobj" },
98 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "bpobj header" },
99 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "SPA space map header" },
100 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "SPA space map" },
101 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, TRUE
, "ZIL intent log" },
102 {DMU_BSWAP_DNODE
, TRUE
, FALSE
, TRUE
, "DMU dnode" },
103 {DMU_BSWAP_OBJSET
, TRUE
, TRUE
, FALSE
, "DMU objset" },
104 {DMU_BSWAP_UINT64
, TRUE
, TRUE
, FALSE
, "DSL directory" },
105 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "DSL directory child map"},
106 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "DSL dataset snap map" },
107 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "DSL props" },
108 {DMU_BSWAP_UINT64
, TRUE
, TRUE
, FALSE
, "DSL dataset" },
109 {DMU_BSWAP_ZNODE
, TRUE
, FALSE
, FALSE
, "ZFS znode" },
110 {DMU_BSWAP_OLDACL
, TRUE
, FALSE
, TRUE
, "ZFS V0 ACL" },
111 {DMU_BSWAP_UINT8
, FALSE
, FALSE
, TRUE
, "ZFS plain file" },
112 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, TRUE
, "ZFS directory" },
113 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "ZFS master node" },
114 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, TRUE
, "ZFS delete queue" },
115 {DMU_BSWAP_UINT8
, FALSE
, FALSE
, TRUE
, "zvol object" },
116 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "zvol prop" },
117 {DMU_BSWAP_UINT8
, FALSE
, FALSE
, TRUE
, "other uint8[]" },
118 {DMU_BSWAP_UINT64
, FALSE
, FALSE
, TRUE
, "other uint64[]" },
119 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "other ZAP" },
120 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "persistent error log" },
121 {DMU_BSWAP_UINT8
, TRUE
, FALSE
, FALSE
, "SPA history" },
122 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "SPA history offsets" },
123 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "Pool properties" },
124 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "DSL permissions" },
125 {DMU_BSWAP_ACL
, TRUE
, FALSE
, TRUE
, "ZFS ACL" },
126 {DMU_BSWAP_UINT8
, TRUE
, FALSE
, TRUE
, "ZFS SYSACL" },
127 {DMU_BSWAP_UINT8
, TRUE
, FALSE
, TRUE
, "FUID table" },
128 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "FUID table size" },
129 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "DSL dataset next clones"},
130 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "scan work queue" },
131 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, TRUE
, "ZFS user/group/project used" },
132 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, TRUE
, "ZFS user/group/project quota"},
133 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "snapshot refcount tags"},
134 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "DDT ZAP algorithm" },
135 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "DDT statistics" },
136 {DMU_BSWAP_UINT8
, TRUE
, FALSE
, TRUE
, "System attributes" },
137 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, TRUE
, "SA master node" },
138 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, TRUE
, "SA attr registration" },
139 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, TRUE
, "SA attr layouts" },
140 {DMU_BSWAP_ZAP
, TRUE
, FALSE
, FALSE
, "scan translations" },
141 {DMU_BSWAP_UINT8
, FALSE
, FALSE
, TRUE
, "deduplicated block" },
142 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "DSL deadlist map" },
143 {DMU_BSWAP_UINT64
, TRUE
, TRUE
, FALSE
, "DSL deadlist map hdr" },
144 {DMU_BSWAP_ZAP
, TRUE
, TRUE
, FALSE
, "DSL dir clones" },
145 {DMU_BSWAP_UINT64
, TRUE
, FALSE
, FALSE
, "bpobj subobj" }
148 const dmu_object_byteswap_info_t dmu_ot_byteswap
[DMU_BSWAP_NUMFUNCS
] = {
149 { byteswap_uint8_array
, "uint8" },
150 { byteswap_uint16_array
, "uint16" },
151 { byteswap_uint32_array
, "uint32" },
152 { byteswap_uint64_array
, "uint64" },
153 { zap_byteswap
, "zap" },
154 { dnode_buf_byteswap
, "dnode" },
155 { dmu_objset_byteswap
, "objset" },
156 { zfs_znode_byteswap
, "znode" },
157 { zfs_oldacl_byteswap
, "oldacl" },
158 { zfs_acl_byteswap
, "acl" }
162 dmu_buf_hold_noread_by_dnode(dnode_t
*dn
, uint64_t offset
,
163 void *tag
, dmu_buf_t
**dbp
)
168 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
169 blkid
= dbuf_whichblock(dn
, 0, offset
);
170 db
= dbuf_hold(dn
, blkid
, tag
);
171 rw_exit(&dn
->dn_struct_rwlock
);
175 return (SET_ERROR(EIO
));
182 dmu_buf_hold_noread(objset_t
*os
, uint64_t object
, uint64_t offset
,
183 void *tag
, dmu_buf_t
**dbp
)
190 err
= dnode_hold(os
, object
, FTAG
, &dn
);
193 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
194 blkid
= dbuf_whichblock(dn
, 0, offset
);
195 db
= dbuf_hold(dn
, blkid
, tag
);
196 rw_exit(&dn
->dn_struct_rwlock
);
197 dnode_rele(dn
, FTAG
);
201 return (SET_ERROR(EIO
));
209 dmu_buf_hold_by_dnode(dnode_t
*dn
, uint64_t offset
,
210 void *tag
, dmu_buf_t
**dbp
, int flags
)
213 int db_flags
= DB_RF_CANFAIL
;
215 if (flags
& DMU_READ_NO_PREFETCH
)
216 db_flags
|= DB_RF_NOPREFETCH
;
217 if (flags
& DMU_READ_NO_DECRYPT
)
218 db_flags
|= DB_RF_NO_DECRYPT
;
220 err
= dmu_buf_hold_noread_by_dnode(dn
, offset
, tag
, dbp
);
222 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)(*dbp
);
223 err
= dbuf_read(db
, NULL
, db_flags
);
234 dmu_buf_hold(objset_t
*os
, uint64_t object
, uint64_t offset
,
235 void *tag
, dmu_buf_t
**dbp
, int flags
)
238 int db_flags
= DB_RF_CANFAIL
;
240 if (flags
& DMU_READ_NO_PREFETCH
)
241 db_flags
|= DB_RF_NOPREFETCH
;
242 if (flags
& DMU_READ_NO_DECRYPT
)
243 db_flags
|= DB_RF_NO_DECRYPT
;
245 err
= dmu_buf_hold_noread(os
, object
, offset
, tag
, dbp
);
247 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)(*dbp
);
248 err
= dbuf_read(db
, NULL
, db_flags
);
261 return (DN_OLD_MAX_BONUSLEN
);
265 dmu_set_bonus(dmu_buf_t
*db_fake
, int newsize
, dmu_tx_t
*tx
)
267 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
274 if (dn
->dn_bonus
!= db
) {
275 error
= SET_ERROR(EINVAL
);
276 } else if (newsize
< 0 || newsize
> db_fake
->db_size
) {
277 error
= SET_ERROR(EINVAL
);
279 dnode_setbonuslen(dn
, newsize
, tx
);
288 dmu_set_bonustype(dmu_buf_t
*db_fake
, dmu_object_type_t type
, dmu_tx_t
*tx
)
290 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
297 if (!DMU_OT_IS_VALID(type
)) {
298 error
= SET_ERROR(EINVAL
);
299 } else if (dn
->dn_bonus
!= db
) {
300 error
= SET_ERROR(EINVAL
);
302 dnode_setbonus_type(dn
, type
, tx
);
311 dmu_get_bonustype(dmu_buf_t
*db_fake
)
313 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
315 dmu_object_type_t type
;
319 type
= dn
->dn_bonustype
;
326 dmu_rm_spill(objset_t
*os
, uint64_t object
, dmu_tx_t
*tx
)
331 error
= dnode_hold(os
, object
, FTAG
, &dn
);
332 dbuf_rm_spill(dn
, tx
);
333 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
334 dnode_rm_spill(dn
, tx
);
335 rw_exit(&dn
->dn_struct_rwlock
);
336 dnode_rele(dn
, FTAG
);
341 * Lookup and hold the bonus buffer for the provided dnode. If the dnode
342 * has not yet been allocated a new bonus dbuf a will be allocated.
343 * Returns ENOENT, EIO, or 0.
345 int dmu_bonus_hold_by_dnode(dnode_t
*dn
, void *tag
, dmu_buf_t
**dbp
,
350 uint32_t db_flags
= DB_RF_MUST_SUCCEED
;
352 if (flags
& DMU_READ_NO_PREFETCH
)
353 db_flags
|= DB_RF_NOPREFETCH
;
354 if (flags
& DMU_READ_NO_DECRYPT
)
355 db_flags
|= DB_RF_NO_DECRYPT
;
357 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
358 if (dn
->dn_bonus
== NULL
) {
359 rw_exit(&dn
->dn_struct_rwlock
);
360 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
361 if (dn
->dn_bonus
== NULL
)
362 dbuf_create_bonus(dn
);
366 /* as long as the bonus buf is held, the dnode will be held */
367 if (zfs_refcount_add(&db
->db_holds
, tag
) == 1) {
368 VERIFY(dnode_add_ref(dn
, db
));
369 atomic_inc_32(&dn
->dn_dbufs_count
);
373 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
374 * hold and incrementing the dbuf count to ensure that dnode_move() sees
375 * a dnode hold for every dbuf.
377 rw_exit(&dn
->dn_struct_rwlock
);
379 error
= dbuf_read(db
, NULL
, db_flags
);
381 dnode_evict_bonus(dn
);
392 dmu_bonus_hold(objset_t
*os
, uint64_t object
, void *tag
, dmu_buf_t
**dbp
)
397 error
= dnode_hold(os
, object
, FTAG
, &dn
);
401 error
= dmu_bonus_hold_by_dnode(dn
, tag
, dbp
, DMU_READ_NO_PREFETCH
);
402 dnode_rele(dn
, FTAG
);
408 * returns ENOENT, EIO, or 0.
410 * This interface will allocate a blank spill dbuf when a spill blk
411 * doesn't already exist on the dnode.
413 * if you only want to find an already existing spill db, then
414 * dmu_spill_hold_existing() should be used.
417 dmu_spill_hold_by_dnode(dnode_t
*dn
, uint32_t flags
, void *tag
, dmu_buf_t
**dbp
)
419 dmu_buf_impl_t
*db
= NULL
;
422 if ((flags
& DB_RF_HAVESTRUCT
) == 0)
423 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
425 db
= dbuf_hold(dn
, DMU_SPILL_BLKID
, tag
);
427 if ((flags
& DB_RF_HAVESTRUCT
) == 0)
428 rw_exit(&dn
->dn_struct_rwlock
);
432 return (SET_ERROR(EIO
));
434 err
= dbuf_read(db
, NULL
, flags
);
445 dmu_spill_hold_existing(dmu_buf_t
*bonus
, void *tag
, dmu_buf_t
**dbp
)
447 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)bonus
;
454 if (spa_version(dn
->dn_objset
->os_spa
) < SPA_VERSION_SA
) {
455 err
= SET_ERROR(EINVAL
);
457 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
459 if (!dn
->dn_have_spill
) {
460 err
= SET_ERROR(ENOENT
);
462 err
= dmu_spill_hold_by_dnode(dn
,
463 DB_RF_HAVESTRUCT
| DB_RF_CANFAIL
, tag
, dbp
);
466 rw_exit(&dn
->dn_struct_rwlock
);
474 dmu_spill_hold_by_bonus(dmu_buf_t
*bonus
, uint32_t flags
, void *tag
,
477 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)bonus
;
480 uint32_t db_flags
= DB_RF_CANFAIL
;
482 if (flags
& DMU_READ_NO_DECRYPT
)
483 db_flags
|= DB_RF_NO_DECRYPT
;
487 err
= dmu_spill_hold_by_dnode(dn
, db_flags
, tag
, dbp
);
494 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
495 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
496 * and can induce severe lock contention when writing to several files
497 * whose dnodes are in the same block.
500 dmu_buf_hold_array_by_dnode(dnode_t
*dn
, uint64_t offset
, uint64_t length
,
501 boolean_t read
, void *tag
, int *numbufsp
, dmu_buf_t
***dbpp
, uint32_t flags
)
504 zstream_t
*zs
= NULL
;
505 uint64_t blkid
, nblks
, i
;
509 boolean_t missed
= B_FALSE
;
511 ASSERT(length
<= DMU_MAX_ACCESS
);
514 * Note: We directly notify the prefetch code of this read, so that
515 * we can tell it about the multi-block read. dbuf_read() only knows
516 * about the one block it is accessing.
518 dbuf_flags
= DB_RF_CANFAIL
| DB_RF_NEVERWAIT
| DB_RF_HAVESTRUCT
|
521 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
522 if (dn
->dn_datablkshift
) {
523 int blkshift
= dn
->dn_datablkshift
;
524 nblks
= (P2ROUNDUP(offset
+ length
, 1ULL << blkshift
) -
525 P2ALIGN(offset
, 1ULL << blkshift
)) >> blkshift
;
527 if (offset
+ length
> dn
->dn_datablksz
) {
528 zfs_panic_recover("zfs: accessing past end of object "
529 "%llx/%llx (size=%u access=%llu+%llu)",
530 (longlong_t
)dn
->dn_objset
->
531 os_dsl_dataset
->ds_object
,
532 (longlong_t
)dn
->dn_object
, dn
->dn_datablksz
,
533 (longlong_t
)offset
, (longlong_t
)length
);
534 rw_exit(&dn
->dn_struct_rwlock
);
535 return (SET_ERROR(EIO
));
539 dbp
= kmem_zalloc(sizeof (dmu_buf_t
*) * nblks
, KM_SLEEP
);
542 zio
= zio_root(dn
->dn_objset
->os_spa
, NULL
, NULL
,
544 blkid
= dbuf_whichblock(dn
, 0, offset
);
545 if ((flags
& DMU_READ_NO_PREFETCH
) == 0 &&
546 DNODE_META_IS_CACHEABLE(dn
) && length
<= zfetch_array_rd_sz
) {
548 * Prepare the zfetch before initiating the demand reads, so
549 * that if multiple threads block on same indirect block, we
550 * base predictions on the original less racy request order.
552 zs
= dmu_zfetch_prepare(&dn
->dn_zfetch
, blkid
, nblks
,
553 read
&& DNODE_IS_CACHEABLE(dn
), B_TRUE
);
555 for (i
= 0; i
< nblks
; i
++) {
556 dmu_buf_impl_t
*db
= dbuf_hold(dn
, blkid
+ i
, tag
);
559 dmu_zfetch_run(zs
, missed
, B_TRUE
);
560 rw_exit(&dn
->dn_struct_rwlock
);
561 dmu_buf_rele_array(dbp
, nblks
, tag
);
564 return (SET_ERROR(EIO
));
568 * Initiate async demand data read.
569 * We check the db_state after calling dbuf_read() because
570 * (1) dbuf_read() may change the state to CACHED due to a
571 * hit in the ARC, and (2) on a cache miss, a child will
572 * have been added to "zio" but not yet completed, so the
573 * state will not yet be CACHED.
576 (void) dbuf_read(db
, zio
, dbuf_flags
);
577 if (db
->db_state
!= DB_CACHED
)
584 zfs_racct_write(length
, nblks
);
587 dmu_zfetch_run(zs
, missed
, B_TRUE
);
588 rw_exit(&dn
->dn_struct_rwlock
);
591 /* wait for async read i/o */
594 dmu_buf_rele_array(dbp
, nblks
, tag
);
598 /* wait for other io to complete */
599 for (i
= 0; i
< nblks
; i
++) {
600 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)dbp
[i
];
601 mutex_enter(&db
->db_mtx
);
602 while (db
->db_state
== DB_READ
||
603 db
->db_state
== DB_FILL
)
604 cv_wait(&db
->db_changed
, &db
->db_mtx
);
605 if (db
->db_state
== DB_UNCACHED
)
606 err
= SET_ERROR(EIO
);
607 mutex_exit(&db
->db_mtx
);
609 dmu_buf_rele_array(dbp
, nblks
, tag
);
621 dmu_buf_hold_array(objset_t
*os
, uint64_t object
, uint64_t offset
,
622 uint64_t length
, int read
, void *tag
, int *numbufsp
, dmu_buf_t
***dbpp
)
627 err
= dnode_hold(os
, object
, FTAG
, &dn
);
631 err
= dmu_buf_hold_array_by_dnode(dn
, offset
, length
, read
, tag
,
632 numbufsp
, dbpp
, DMU_READ_PREFETCH
);
634 dnode_rele(dn
, FTAG
);
640 dmu_buf_hold_array_by_bonus(dmu_buf_t
*db_fake
, uint64_t offset
,
641 uint64_t length
, boolean_t read
, void *tag
, int *numbufsp
,
644 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
650 err
= dmu_buf_hold_array_by_dnode(dn
, offset
, length
, read
, tag
,
651 numbufsp
, dbpp
, DMU_READ_PREFETCH
);
658 dmu_buf_rele_array(dmu_buf_t
**dbp_fake
, int numbufs
, void *tag
)
661 dmu_buf_impl_t
**dbp
= (dmu_buf_impl_t
**)dbp_fake
;
666 for (i
= 0; i
< numbufs
; i
++) {
668 dbuf_rele(dbp
[i
], tag
);
671 kmem_free(dbp
, sizeof (dmu_buf_t
*) * numbufs
);
675 * Issue prefetch i/os for the given blocks. If level is greater than 0, the
676 * indirect blocks prefetched will be those that point to the blocks containing
677 * the data starting at offset, and continuing to offset + len.
679 * Note that if the indirect blocks above the blocks being prefetched are not
680 * in cache, they will be asynchronously read in.
683 dmu_prefetch(objset_t
*os
, uint64_t object
, int64_t level
, uint64_t offset
,
684 uint64_t len
, zio_priority_t pri
)
690 if (len
== 0) { /* they're interested in the bonus buffer */
691 dn
= DMU_META_DNODE(os
);
693 if (object
== 0 || object
>= DN_MAX_OBJECT
)
696 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
697 blkid
= dbuf_whichblock(dn
, level
,
698 object
* sizeof (dnode_phys_t
));
699 dbuf_prefetch(dn
, level
, blkid
, pri
, 0);
700 rw_exit(&dn
->dn_struct_rwlock
);
705 * See comment before the definition of dmu_prefetch_max.
707 len
= MIN(len
, dmu_prefetch_max
);
710 * XXX - Note, if the dnode for the requested object is not
711 * already cached, we will do a *synchronous* read in the
712 * dnode_hold() call. The same is true for any indirects.
714 err
= dnode_hold(os
, object
, FTAG
, &dn
);
719 * offset + len - 1 is the last byte we want to prefetch for, and offset
720 * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
721 * last block we want to prefetch, and dbuf_whichblock(dn, level,
722 * offset) is the first. Then the number we need to prefetch is the
725 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
726 if (level
> 0 || dn
->dn_datablkshift
!= 0) {
727 nblks
= dbuf_whichblock(dn
, level
, offset
+ len
- 1) -
728 dbuf_whichblock(dn
, level
, offset
) + 1;
730 nblks
= (offset
< dn
->dn_datablksz
);
734 blkid
= dbuf_whichblock(dn
, level
, offset
);
735 for (int i
= 0; i
< nblks
; i
++)
736 dbuf_prefetch(dn
, level
, blkid
+ i
, pri
, 0);
738 rw_exit(&dn
->dn_struct_rwlock
);
740 dnode_rele(dn
, FTAG
);
744 * Get the next "chunk" of file data to free. We traverse the file from
745 * the end so that the file gets shorter over time (if we crashes in the
746 * middle, this will leave us in a better state). We find allocated file
747 * data by simply searching the allocated level 1 indirects.
749 * On input, *start should be the first offset that does not need to be
750 * freed (e.g. "offset + length"). On return, *start will be the first
751 * offset that should be freed and l1blks is set to the number of level 1
752 * indirect blocks found within the chunk.
755 get_next_chunk(dnode_t
*dn
, uint64_t *start
, uint64_t minimum
, uint64_t *l1blks
)
758 uint64_t maxblks
= DMU_MAX_ACCESS
>> (dn
->dn_indblkshift
+ 1);
759 /* bytes of data covered by a level-1 indirect block */
760 uint64_t iblkrange
= (uint64_t)dn
->dn_datablksz
*
761 EPB(dn
->dn_indblkshift
, SPA_BLKPTRSHIFT
);
763 ASSERT3U(minimum
, <=, *start
);
766 * Check if we can free the entire range assuming that all of the
767 * L1 blocks in this range have data. If we can, we use this
768 * worst case value as an estimate so we can avoid having to look
769 * at the object's actual data.
771 uint64_t total_l1blks
=
772 (roundup(*start
, iblkrange
) - (minimum
/ iblkrange
* iblkrange
)) /
774 if (total_l1blks
<= maxblks
) {
775 *l1blks
= total_l1blks
;
779 ASSERT(ISP2(iblkrange
));
781 for (blks
= 0; *start
> minimum
&& blks
< maxblks
; blks
++) {
785 * dnode_next_offset(BACKWARDS) will find an allocated L1
786 * indirect block at or before the input offset. We must
787 * decrement *start so that it is at the end of the region
792 err
= dnode_next_offset(dn
,
793 DNODE_FIND_BACKWARDS
, start
, 2, 1, 0);
795 /* if there are no indirect blocks before start, we are done */
799 } else if (err
!= 0) {
804 /* set start to the beginning of this L1 indirect */
805 *start
= P2ALIGN(*start
, iblkrange
);
807 if (*start
< minimum
)
815 * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
816 * otherwise return false.
817 * Used below in dmu_free_long_range_impl() to enable abort when unmounting
820 dmu_objset_zfs_unmounting(objset_t
*os
)
823 if (dmu_objset_type(os
) == DMU_OST_ZFS
)
824 return (zfs_get_vfs_flag_unmounted(os
));
832 dmu_free_long_range_impl(objset_t
*os
, dnode_t
*dn
, uint64_t offset
,
835 uint64_t object_size
;
837 uint64_t dirty_frees_threshold
;
838 dsl_pool_t
*dp
= dmu_objset_pool(os
);
841 return (SET_ERROR(EINVAL
));
843 object_size
= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
;
844 if (offset
>= object_size
)
847 if (zfs_per_txg_dirty_frees_percent
<= 100)
848 dirty_frees_threshold
=
849 zfs_per_txg_dirty_frees_percent
* zfs_dirty_data_max
/ 100;
851 dirty_frees_threshold
= zfs_dirty_data_max
/ 20;
853 if (length
== DMU_OBJECT_END
|| offset
+ length
> object_size
)
854 length
= object_size
- offset
;
856 while (length
!= 0) {
857 uint64_t chunk_end
, chunk_begin
, chunk_len
;
861 if (dmu_objset_zfs_unmounting(dn
->dn_objset
))
862 return (SET_ERROR(EINTR
));
864 chunk_end
= chunk_begin
= offset
+ length
;
866 /* move chunk_begin backwards to the beginning of this chunk */
867 err
= get_next_chunk(dn
, &chunk_begin
, offset
, &l1blks
);
870 ASSERT3U(chunk_begin
, >=, offset
);
871 ASSERT3U(chunk_begin
, <=, chunk_end
);
873 chunk_len
= chunk_end
- chunk_begin
;
875 tx
= dmu_tx_create(os
);
876 dmu_tx_hold_free(tx
, dn
->dn_object
, chunk_begin
, chunk_len
);
879 * Mark this transaction as typically resulting in a net
880 * reduction in space used.
882 dmu_tx_mark_netfree(tx
);
883 err
= dmu_tx_assign(tx
, TXG_WAIT
);
889 uint64_t txg
= dmu_tx_get_txg(tx
);
891 mutex_enter(&dp
->dp_lock
);
892 uint64_t long_free_dirty
=
893 dp
->dp_long_free_dirty_pertxg
[txg
& TXG_MASK
];
894 mutex_exit(&dp
->dp_lock
);
897 * To avoid filling up a TXG with just frees, wait for
898 * the next TXG to open before freeing more chunks if
899 * we have reached the threshold of frees.
901 if (dirty_frees_threshold
!= 0 &&
902 long_free_dirty
>= dirty_frees_threshold
) {
903 DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay
);
905 txg_wait_open(dp
, 0, B_TRUE
);
910 * In order to prevent unnecessary write throttling, for each
911 * TXG, we track the cumulative size of L1 blocks being dirtied
912 * in dnode_free_range() below. We compare this number to a
913 * tunable threshold, past which we prevent new L1 dirty freeing
914 * blocks from being added into the open TXG. See
915 * dmu_free_long_range_impl() for details. The threshold
916 * prevents write throttle activation due to dirty freeing L1
917 * blocks taking up a large percentage of zfs_dirty_data_max.
919 mutex_enter(&dp
->dp_lock
);
920 dp
->dp_long_free_dirty_pertxg
[txg
& TXG_MASK
] +=
921 l1blks
<< dn
->dn_indblkshift
;
922 mutex_exit(&dp
->dp_lock
);
923 DTRACE_PROBE3(free__long__range
,
924 uint64_t, long_free_dirty
, uint64_t, chunk_len
,
926 dnode_free_range(dn
, chunk_begin
, chunk_len
, tx
);
936 dmu_free_long_range(objset_t
*os
, uint64_t object
,
937 uint64_t offset
, uint64_t length
)
942 err
= dnode_hold(os
, object
, FTAG
, &dn
);
945 err
= dmu_free_long_range_impl(os
, dn
, offset
, length
);
948 * It is important to zero out the maxblkid when freeing the entire
949 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
950 * will take the fast path, and (b) dnode_reallocate() can verify
951 * that the entire file has been freed.
953 if (err
== 0 && offset
== 0 && length
== DMU_OBJECT_END
)
956 dnode_rele(dn
, FTAG
);
961 dmu_free_long_object(objset_t
*os
, uint64_t object
)
966 err
= dmu_free_long_range(os
, object
, 0, DMU_OBJECT_END
);
970 tx
= dmu_tx_create(os
);
971 dmu_tx_hold_bonus(tx
, object
);
972 dmu_tx_hold_free(tx
, object
, 0, DMU_OBJECT_END
);
973 dmu_tx_mark_netfree(tx
);
974 err
= dmu_tx_assign(tx
, TXG_WAIT
);
976 err
= dmu_object_free(os
, object
, tx
);
986 dmu_free_range(objset_t
*os
, uint64_t object
, uint64_t offset
,
987 uint64_t size
, dmu_tx_t
*tx
)
990 int err
= dnode_hold(os
, object
, FTAG
, &dn
);
993 ASSERT(offset
< UINT64_MAX
);
994 ASSERT(size
== DMU_OBJECT_END
|| size
<= UINT64_MAX
- offset
);
995 dnode_free_range(dn
, offset
, size
, tx
);
996 dnode_rele(dn
, FTAG
);
1001 dmu_read_impl(dnode_t
*dn
, uint64_t offset
, uint64_t size
,
1002 void *buf
, uint32_t flags
)
1005 int numbufs
, err
= 0;
1008 * Deal with odd block sizes, where there can't be data past the first
1009 * block. If we ever do the tail block optimization, we will need to
1010 * handle that here as well.
1012 if (dn
->dn_maxblkid
== 0) {
1013 uint64_t newsz
= offset
> dn
->dn_datablksz
? 0 :
1014 MIN(size
, dn
->dn_datablksz
- offset
);
1015 bzero((char *)buf
+ newsz
, size
- newsz
);
1020 uint64_t mylen
= MIN(size
, DMU_MAX_ACCESS
/ 2);
1024 * NB: we could do this block-at-a-time, but it's nice
1025 * to be reading in parallel.
1027 err
= dmu_buf_hold_array_by_dnode(dn
, offset
, mylen
,
1028 TRUE
, FTAG
, &numbufs
, &dbp
, flags
);
1032 for (i
= 0; i
< numbufs
; i
++) {
1035 dmu_buf_t
*db
= dbp
[i
];
1039 bufoff
= offset
- db
->db_offset
;
1040 tocpy
= MIN(db
->db_size
- bufoff
, size
);
1042 (void) memcpy(buf
, (char *)db
->db_data
+ bufoff
, tocpy
);
1046 buf
= (char *)buf
+ tocpy
;
1048 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
1054 dmu_read(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
1055 void *buf
, uint32_t flags
)
1060 err
= dnode_hold(os
, object
, FTAG
, &dn
);
1064 err
= dmu_read_impl(dn
, offset
, size
, buf
, flags
);
1065 dnode_rele(dn
, FTAG
);
1070 dmu_read_by_dnode(dnode_t
*dn
, uint64_t offset
, uint64_t size
, void *buf
,
1073 return (dmu_read_impl(dn
, offset
, size
, buf
, flags
));
1077 dmu_write_impl(dmu_buf_t
**dbp
, int numbufs
, uint64_t offset
, uint64_t size
,
1078 const void *buf
, dmu_tx_t
*tx
)
1082 for (i
= 0; i
< numbufs
; i
++) {
1085 dmu_buf_t
*db
= dbp
[i
];
1089 bufoff
= offset
- db
->db_offset
;
1090 tocpy
= MIN(db
->db_size
- bufoff
, size
);
1092 ASSERT(i
== 0 || i
== numbufs
-1 || tocpy
== db
->db_size
);
1094 if (tocpy
== db
->db_size
)
1095 dmu_buf_will_fill(db
, tx
);
1097 dmu_buf_will_dirty(db
, tx
);
1099 (void) memcpy((char *)db
->db_data
+ bufoff
, buf
, tocpy
);
1101 if (tocpy
== db
->db_size
)
1102 dmu_buf_fill_done(db
, tx
);
1106 buf
= (char *)buf
+ tocpy
;
1111 dmu_write(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
1112 const void *buf
, dmu_tx_t
*tx
)
1120 VERIFY0(dmu_buf_hold_array(os
, object
, offset
, size
,
1121 FALSE
, FTAG
, &numbufs
, &dbp
));
1122 dmu_write_impl(dbp
, numbufs
, offset
, size
, buf
, tx
);
1123 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
1127 * Note: Lustre is an external consumer of this interface.
1130 dmu_write_by_dnode(dnode_t
*dn
, uint64_t offset
, uint64_t size
,
1131 const void *buf
, dmu_tx_t
*tx
)
1139 VERIFY0(dmu_buf_hold_array_by_dnode(dn
, offset
, size
,
1140 FALSE
, FTAG
, &numbufs
, &dbp
, DMU_READ_PREFETCH
));
1141 dmu_write_impl(dbp
, numbufs
, offset
, size
, buf
, tx
);
1142 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
1146 dmu_prealloc(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
1155 VERIFY(0 == dmu_buf_hold_array(os
, object
, offset
, size
,
1156 FALSE
, FTAG
, &numbufs
, &dbp
));
1158 for (i
= 0; i
< numbufs
; i
++) {
1159 dmu_buf_t
*db
= dbp
[i
];
1161 dmu_buf_will_not_fill(db
, tx
);
1163 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
1167 dmu_write_embedded(objset_t
*os
, uint64_t object
, uint64_t offset
,
1168 void *data
, uint8_t etype
, uint8_t comp
, int uncompressed_size
,
1169 int compressed_size
, int byteorder
, dmu_tx_t
*tx
)
1173 ASSERT3U(etype
, <, NUM_BP_EMBEDDED_TYPES
);
1174 ASSERT3U(comp
, <, ZIO_COMPRESS_FUNCTIONS
);
1175 VERIFY0(dmu_buf_hold_noread(os
, object
, offset
,
1178 dmu_buf_write_embedded(db
,
1179 data
, (bp_embedded_type_t
)etype
, (enum zio_compress
)comp
,
1180 uncompressed_size
, compressed_size
, byteorder
, tx
);
1182 dmu_buf_rele(db
, FTAG
);
1186 dmu_redact(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
1192 VERIFY0(dmu_buf_hold_array(os
, object
, offset
, size
, FALSE
, FTAG
,
1194 for (i
= 0; i
< numbufs
; i
++)
1195 dmu_buf_redact(dbp
[i
], tx
);
1196 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
1201 dmu_read_uio_dnode(dnode_t
*dn
, zfs_uio_t
*uio
, uint64_t size
)
1204 int numbufs
, i
, err
;
1207 * NB: we could do this block-at-a-time, but it's nice
1208 * to be reading in parallel.
1210 err
= dmu_buf_hold_array_by_dnode(dn
, zfs_uio_offset(uio
), size
,
1211 TRUE
, FTAG
, &numbufs
, &dbp
, 0);
1215 for (i
= 0; i
< numbufs
; i
++) {
1218 dmu_buf_t
*db
= dbp
[i
];
1222 bufoff
= zfs_uio_offset(uio
) - db
->db_offset
;
1223 tocpy
= MIN(db
->db_size
- bufoff
, size
);
1225 err
= zfs_uio_fault_move((char *)db
->db_data
+ bufoff
, tocpy
,
1233 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
1239 * Read 'size' bytes into the uio buffer.
1240 * From object zdb->db_object.
1241 * Starting at zfs_uio_offset(uio).
1243 * If the caller already has a dbuf in the target object
1244 * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1245 * because we don't have to find the dnode_t for the object.
1248 dmu_read_uio_dbuf(dmu_buf_t
*zdb
, zfs_uio_t
*uio
, uint64_t size
)
1250 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)zdb
;
1259 err
= dmu_read_uio_dnode(dn
, uio
, size
);
1266 * Read 'size' bytes into the uio buffer.
1267 * From the specified object
1268 * Starting at offset zfs_uio_offset(uio).
1271 dmu_read_uio(objset_t
*os
, uint64_t object
, zfs_uio_t
*uio
, uint64_t size
)
1279 err
= dnode_hold(os
, object
, FTAG
, &dn
);
1283 err
= dmu_read_uio_dnode(dn
, uio
, size
);
1285 dnode_rele(dn
, FTAG
);
1291 dmu_write_uio_dnode(dnode_t
*dn
, zfs_uio_t
*uio
, uint64_t size
, dmu_tx_t
*tx
)
1298 err
= dmu_buf_hold_array_by_dnode(dn
, zfs_uio_offset(uio
), size
,
1299 FALSE
, FTAG
, &numbufs
, &dbp
, DMU_READ_PREFETCH
);
1303 for (i
= 0; i
< numbufs
; i
++) {
1306 dmu_buf_t
*db
= dbp
[i
];
1310 bufoff
= zfs_uio_offset(uio
) - db
->db_offset
;
1311 tocpy
= MIN(db
->db_size
- bufoff
, size
);
1313 ASSERT(i
== 0 || i
== numbufs
-1 || tocpy
== db
->db_size
);
1315 if (tocpy
== db
->db_size
)
1316 dmu_buf_will_fill(db
, tx
);
1318 dmu_buf_will_dirty(db
, tx
);
1321 * XXX zfs_uiomove could block forever (eg.nfs-backed
1322 * pages). There needs to be a uiolockdown() function
1323 * to lock the pages in memory, so that zfs_uiomove won't
1326 err
= zfs_uio_fault_move((char *)db
->db_data
+ bufoff
,
1327 tocpy
, UIO_WRITE
, uio
);
1329 if (tocpy
== db
->db_size
)
1330 dmu_buf_fill_done(db
, tx
);
1338 dmu_buf_rele_array(dbp
, numbufs
, FTAG
);
1343 * Write 'size' bytes from the uio buffer.
1344 * To object zdb->db_object.
1345 * Starting at offset zfs_uio_offset(uio).
1347 * If the caller already has a dbuf in the target object
1348 * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1349 * because we don't have to find the dnode_t for the object.
1352 dmu_write_uio_dbuf(dmu_buf_t
*zdb
, zfs_uio_t
*uio
, uint64_t size
,
1355 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)zdb
;
1364 err
= dmu_write_uio_dnode(dn
, uio
, size
, tx
);
1371 * Write 'size' bytes from the uio buffer.
1372 * To the specified object.
1373 * Starting at offset zfs_uio_offset(uio).
1376 dmu_write_uio(objset_t
*os
, uint64_t object
, zfs_uio_t
*uio
, uint64_t size
,
1385 err
= dnode_hold(os
, object
, FTAG
, &dn
);
1389 err
= dmu_write_uio_dnode(dn
, uio
, size
, tx
);
1391 dnode_rele(dn
, FTAG
);
1395 #endif /* _KERNEL */
1398 * Allocate a loaned anonymous arc buffer.
1401 dmu_request_arcbuf(dmu_buf_t
*handle
, int size
)
1403 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)handle
;
1405 return (arc_loan_buf(db
->db_objset
->os_spa
, B_FALSE
, size
));
1409 * Free a loaned arc buffer.
1412 dmu_return_arcbuf(arc_buf_t
*buf
)
1414 arc_return_buf(buf
, FTAG
);
1415 arc_buf_destroy(buf
, FTAG
);
1419 * A "lightweight" write is faster than a regular write (e.g.
1420 * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
1421 * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the
1422 * data can not be read or overwritten until the transaction's txg has been
1423 * synced. This makes it appropriate for workloads that are known to be
1424 * (temporarily) write-only, like "zfs receive".
1426 * A single block is written, starting at the specified offset in bytes. If
1427 * the call is successful, it returns 0 and the provided abd has been
1428 * consumed (the caller should not free it).
1431 dmu_lightweight_write_by_dnode(dnode_t
*dn
, uint64_t offset
, abd_t
*abd
,
1432 const zio_prop_t
*zp
, enum zio_flag flags
, dmu_tx_t
*tx
)
1434 dbuf_dirty_record_t
*dr
=
1435 dbuf_dirty_lightweight(dn
, dbuf_whichblock(dn
, 0, offset
), tx
);
1437 return (SET_ERROR(EIO
));
1438 dr
->dt
.dll
.dr_abd
= abd
;
1439 dr
->dt
.dll
.dr_props
= *zp
;
1440 dr
->dt
.dll
.dr_flags
= flags
;
1445 * When possible directly assign passed loaned arc buffer to a dbuf.
1446 * If this is not possible copy the contents of passed arc buf via
1450 dmu_assign_arcbuf_by_dnode(dnode_t
*dn
, uint64_t offset
, arc_buf_t
*buf
,
1454 objset_t
*os
= dn
->dn_objset
;
1455 uint64_t object
= dn
->dn_object
;
1456 uint32_t blksz
= (uint32_t)arc_buf_lsize(buf
);
1459 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
1460 blkid
= dbuf_whichblock(dn
, 0, offset
);
1461 db
= dbuf_hold(dn
, blkid
, FTAG
);
1463 return (SET_ERROR(EIO
));
1464 rw_exit(&dn
->dn_struct_rwlock
);
1467 * We can only assign if the offset is aligned and the arc buf is the
1468 * same size as the dbuf.
1470 if (offset
== db
->db
.db_offset
&& blksz
== db
->db
.db_size
) {
1471 zfs_racct_write(blksz
, 1);
1472 dbuf_assign_arcbuf(db
, buf
, tx
);
1473 dbuf_rele(db
, FTAG
);
1475 /* compressed bufs must always be assignable to their dbuf */
1476 ASSERT3U(arc_get_compression(buf
), ==, ZIO_COMPRESS_OFF
);
1477 ASSERT(!(buf
->b_flags
& ARC_BUF_FLAG_COMPRESSED
));
1479 dbuf_rele(db
, FTAG
);
1480 dmu_write(os
, object
, offset
, blksz
, buf
->b_data
, tx
);
1481 dmu_return_arcbuf(buf
);
1488 dmu_assign_arcbuf_by_dbuf(dmu_buf_t
*handle
, uint64_t offset
, arc_buf_t
*buf
,
1492 dmu_buf_impl_t
*dbuf
= (dmu_buf_impl_t
*)handle
;
1494 DB_DNODE_ENTER(dbuf
);
1495 err
= dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf
), offset
, buf
, tx
);
1496 DB_DNODE_EXIT(dbuf
);
1502 dbuf_dirty_record_t
*dsa_dr
;
1503 dmu_sync_cb_t
*dsa_done
;
1509 dmu_sync_ready(zio_t
*zio
, arc_buf_t
*buf
, void *varg
)
1512 dmu_sync_arg_t
*dsa
= varg
;
1513 dmu_buf_t
*db
= dsa
->dsa_zgd
->zgd_db
;
1514 blkptr_t
*bp
= zio
->io_bp
;
1516 if (zio
->io_error
== 0) {
1517 if (BP_IS_HOLE(bp
)) {
1519 * A block of zeros may compress to a hole, but the
1520 * block size still needs to be known for replay.
1522 BP_SET_LSIZE(bp
, db
->db_size
);
1523 } else if (!BP_IS_EMBEDDED(bp
)) {
1524 ASSERT(BP_GET_LEVEL(bp
) == 0);
1531 dmu_sync_late_arrival_ready(zio_t
*zio
)
1533 dmu_sync_ready(zio
, NULL
, zio
->io_private
);
1537 dmu_sync_done(zio_t
*zio
, arc_buf_t
*buf
, void *varg
)
1540 dmu_sync_arg_t
*dsa
= varg
;
1541 dbuf_dirty_record_t
*dr
= dsa
->dsa_dr
;
1542 dmu_buf_impl_t
*db
= dr
->dr_dbuf
;
1543 zgd_t
*zgd
= dsa
->dsa_zgd
;
1546 * Record the vdev(s) backing this blkptr so they can be flushed after
1547 * the writes for the lwb have completed.
1549 if (zio
->io_error
== 0) {
1550 zil_lwb_add_block(zgd
->zgd_lwb
, zgd
->zgd_bp
);
1553 mutex_enter(&db
->db_mtx
);
1554 ASSERT(dr
->dt
.dl
.dr_override_state
== DR_IN_DMU_SYNC
);
1555 if (zio
->io_error
== 0) {
1556 dr
->dt
.dl
.dr_nopwrite
= !!(zio
->io_flags
& ZIO_FLAG_NOPWRITE
);
1557 if (dr
->dt
.dl
.dr_nopwrite
) {
1558 blkptr_t
*bp
= zio
->io_bp
;
1559 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
1560 uint8_t chksum
= BP_GET_CHECKSUM(bp_orig
);
1562 ASSERT(BP_EQUAL(bp
, bp_orig
));
1563 VERIFY(BP_EQUAL(bp
, db
->db_blkptr
));
1564 ASSERT(zio
->io_prop
.zp_compress
!= ZIO_COMPRESS_OFF
);
1565 VERIFY(zio_checksum_table
[chksum
].ci_flags
&
1566 ZCHECKSUM_FLAG_NOPWRITE
);
1568 dr
->dt
.dl
.dr_overridden_by
= *zio
->io_bp
;
1569 dr
->dt
.dl
.dr_override_state
= DR_OVERRIDDEN
;
1570 dr
->dt
.dl
.dr_copies
= zio
->io_prop
.zp_copies
;
1573 * Old style holes are filled with all zeros, whereas
1574 * new-style holes maintain their lsize, type, level,
1575 * and birth time (see zio_write_compress). While we
1576 * need to reset the BP_SET_LSIZE() call that happened
1577 * in dmu_sync_ready for old style holes, we do *not*
1578 * want to wipe out the information contained in new
1579 * style holes. Thus, only zero out the block pointer if
1580 * it's an old style hole.
1582 if (BP_IS_HOLE(&dr
->dt
.dl
.dr_overridden_by
) &&
1583 dr
->dt
.dl
.dr_overridden_by
.blk_birth
== 0)
1584 BP_ZERO(&dr
->dt
.dl
.dr_overridden_by
);
1586 dr
->dt
.dl
.dr_override_state
= DR_NOT_OVERRIDDEN
;
1588 cv_broadcast(&db
->db_changed
);
1589 mutex_exit(&db
->db_mtx
);
1591 dsa
->dsa_done(dsa
->dsa_zgd
, zio
->io_error
);
1593 kmem_free(dsa
, sizeof (*dsa
));
1597 dmu_sync_late_arrival_done(zio_t
*zio
)
1599 blkptr_t
*bp
= zio
->io_bp
;
1600 dmu_sync_arg_t
*dsa
= zio
->io_private
;
1601 zgd_t
*zgd
= dsa
->dsa_zgd
;
1603 if (zio
->io_error
== 0) {
1605 * Record the vdev(s) backing this blkptr so they can be
1606 * flushed after the writes for the lwb have completed.
1608 zil_lwb_add_block(zgd
->zgd_lwb
, zgd
->zgd_bp
);
1610 if (!BP_IS_HOLE(bp
)) {
1611 blkptr_t
*bp_orig __maybe_unused
= &zio
->io_bp_orig
;
1612 ASSERT(!(zio
->io_flags
& ZIO_FLAG_NOPWRITE
));
1613 ASSERT(BP_IS_HOLE(bp_orig
) || !BP_EQUAL(bp
, bp_orig
));
1614 ASSERT(zio
->io_bp
->blk_birth
== zio
->io_txg
);
1615 ASSERT(zio
->io_txg
> spa_syncing_txg(zio
->io_spa
));
1616 zio_free(zio
->io_spa
, zio
->io_txg
, zio
->io_bp
);
1620 dmu_tx_commit(dsa
->dsa_tx
);
1622 dsa
->dsa_done(dsa
->dsa_zgd
, zio
->io_error
);
1624 abd_free(zio
->io_abd
);
1625 kmem_free(dsa
, sizeof (*dsa
));
1629 dmu_sync_late_arrival(zio_t
*pio
, objset_t
*os
, dmu_sync_cb_t
*done
, zgd_t
*zgd
,
1630 zio_prop_t
*zp
, zbookmark_phys_t
*zb
)
1632 dmu_sync_arg_t
*dsa
;
1635 tx
= dmu_tx_create(os
);
1636 dmu_tx_hold_space(tx
, zgd
->zgd_db
->db_size
);
1637 if (dmu_tx_assign(tx
, TXG_WAIT
) != 0) {
1639 /* Make zl_get_data do txg_waited_synced() */
1640 return (SET_ERROR(EIO
));
1644 * In order to prevent the zgd's lwb from being free'd prior to
1645 * dmu_sync_late_arrival_done() being called, we have to ensure
1646 * the lwb's "max txg" takes this tx's txg into account.
1648 zil_lwb_add_txg(zgd
->zgd_lwb
, dmu_tx_get_txg(tx
));
1650 dsa
= kmem_alloc(sizeof (dmu_sync_arg_t
), KM_SLEEP
);
1652 dsa
->dsa_done
= done
;
1657 * Since we are currently syncing this txg, it's nontrivial to
1658 * determine what BP to nopwrite against, so we disable nopwrite.
1660 * When syncing, the db_blkptr is initially the BP of the previous
1661 * txg. We can not nopwrite against it because it will be changed
1662 * (this is similar to the non-late-arrival case where the dbuf is
1663 * dirty in a future txg).
1665 * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1666 * We can not nopwrite against it because although the BP will not
1667 * (typically) be changed, the data has not yet been persisted to this
1670 * Finally, when dbuf_write_done() is called, it is theoretically
1671 * possible to always nopwrite, because the data that was written in
1672 * this txg is the same data that we are trying to write. However we
1673 * would need to check that this dbuf is not dirty in any future
1674 * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1675 * don't nopwrite in this case.
1677 zp
->zp_nopwrite
= B_FALSE
;
1679 zio_nowait(zio_write(pio
, os
->os_spa
, dmu_tx_get_txg(tx
), zgd
->zgd_bp
,
1680 abd_get_from_buf(zgd
->zgd_db
->db_data
, zgd
->zgd_db
->db_size
),
1681 zgd
->zgd_db
->db_size
, zgd
->zgd_db
->db_size
, zp
,
1682 dmu_sync_late_arrival_ready
, NULL
, NULL
, dmu_sync_late_arrival_done
,
1683 dsa
, ZIO_PRIORITY_SYNC_WRITE
, ZIO_FLAG_CANFAIL
, zb
));
1689 * Intent log support: sync the block associated with db to disk.
1690 * N.B. and XXX: the caller is responsible for making sure that the
1691 * data isn't changing while dmu_sync() is writing it.
1695 * EEXIST: this txg has already been synced, so there's nothing to do.
1696 * The caller should not log the write.
1698 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1699 * The caller should not log the write.
1701 * EALREADY: this block is already in the process of being synced.
1702 * The caller should track its progress (somehow).
1704 * EIO: could not do the I/O.
1705 * The caller should do a txg_wait_synced().
1707 * 0: the I/O has been initiated.
1708 * The caller should log this blkptr in the done callback.
1709 * It is possible that the I/O will fail, in which case
1710 * the error will be reported to the done callback and
1711 * propagated to pio from zio_done().
1714 dmu_sync(zio_t
*pio
, uint64_t txg
, dmu_sync_cb_t
*done
, zgd_t
*zgd
)
1716 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)zgd
->zgd_db
;
1717 objset_t
*os
= db
->db_objset
;
1718 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1719 dbuf_dirty_record_t
*dr
, *dr_next
;
1720 dmu_sync_arg_t
*dsa
;
1721 zbookmark_phys_t zb
;
1725 ASSERT(pio
!= NULL
);
1728 SET_BOOKMARK(&zb
, ds
->ds_object
,
1729 db
->db
.db_object
, db
->db_level
, db
->db_blkid
);
1733 dmu_write_policy(os
, dn
, db
->db_level
, WP_DMU_SYNC
, &zp
);
1737 * If we're frozen (running ziltest), we always need to generate a bp.
1739 if (txg
> spa_freeze_txg(os
->os_spa
))
1740 return (dmu_sync_late_arrival(pio
, os
, done
, zgd
, &zp
, &zb
));
1743 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1744 * and us. If we determine that this txg is not yet syncing,
1745 * but it begins to sync a moment later, that's OK because the
1746 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1748 mutex_enter(&db
->db_mtx
);
1750 if (txg
<= spa_last_synced_txg(os
->os_spa
)) {
1752 * This txg has already synced. There's nothing to do.
1754 mutex_exit(&db
->db_mtx
);
1755 return (SET_ERROR(EEXIST
));
1758 if (txg
<= spa_syncing_txg(os
->os_spa
)) {
1760 * This txg is currently syncing, so we can't mess with
1761 * the dirty record anymore; just write a new log block.
1763 mutex_exit(&db
->db_mtx
);
1764 return (dmu_sync_late_arrival(pio
, os
, done
, zgd
, &zp
, &zb
));
1767 dr
= dbuf_find_dirty_eq(db
, txg
);
1771 * There's no dr for this dbuf, so it must have been freed.
1772 * There's no need to log writes to freed blocks, so we're done.
1774 mutex_exit(&db
->db_mtx
);
1775 return (SET_ERROR(ENOENT
));
1778 dr_next
= list_next(&db
->db_dirty_records
, dr
);
1779 ASSERT(dr_next
== NULL
|| dr_next
->dr_txg
< txg
);
1781 if (db
->db_blkptr
!= NULL
) {
1783 * We need to fill in zgd_bp with the current blkptr so that
1784 * the nopwrite code can check if we're writing the same
1785 * data that's already on disk. We can only nopwrite if we
1786 * are sure that after making the copy, db_blkptr will not
1787 * change until our i/o completes. We ensure this by
1788 * holding the db_mtx, and only allowing nopwrite if the
1789 * block is not already dirty (see below). This is verified
1790 * by dmu_sync_done(), which VERIFYs that the db_blkptr has
1793 *zgd
->zgd_bp
= *db
->db_blkptr
;
1797 * Assume the on-disk data is X, the current syncing data (in
1798 * txg - 1) is Y, and the current in-memory data is Z (currently
1801 * We usually want to perform a nopwrite if X and Z are the
1802 * same. However, if Y is different (i.e. the BP is going to
1803 * change before this write takes effect), then a nopwrite will
1804 * be incorrect - we would override with X, which could have
1805 * been freed when Y was written.
1807 * (Note that this is not a concern when we are nop-writing from
1808 * syncing context, because X and Y must be identical, because
1809 * all previous txgs have been synced.)
1811 * Therefore, we disable nopwrite if the current BP could change
1812 * before this TXG. There are two ways it could change: by
1813 * being dirty (dr_next is non-NULL), or by being freed
1814 * (dnode_block_freed()). This behavior is verified by
1815 * zio_done(), which VERIFYs that the override BP is identical
1816 * to the on-disk BP.
1820 if (dr_next
!= NULL
|| dnode_block_freed(dn
, db
->db_blkid
))
1821 zp
.zp_nopwrite
= B_FALSE
;
1824 ASSERT(dr
->dr_txg
== txg
);
1825 if (dr
->dt
.dl
.dr_override_state
== DR_IN_DMU_SYNC
||
1826 dr
->dt
.dl
.dr_override_state
== DR_OVERRIDDEN
) {
1828 * We have already issued a sync write for this buffer,
1829 * or this buffer has already been synced. It could not
1830 * have been dirtied since, or we would have cleared the state.
1832 mutex_exit(&db
->db_mtx
);
1833 return (SET_ERROR(EALREADY
));
1836 ASSERT(dr
->dt
.dl
.dr_override_state
== DR_NOT_OVERRIDDEN
);
1837 dr
->dt
.dl
.dr_override_state
= DR_IN_DMU_SYNC
;
1838 mutex_exit(&db
->db_mtx
);
1840 dsa
= kmem_alloc(sizeof (dmu_sync_arg_t
), KM_SLEEP
);
1842 dsa
->dsa_done
= done
;
1846 zio_nowait(arc_write(pio
, os
->os_spa
, txg
,
1847 zgd
->zgd_bp
, dr
->dt
.dl
.dr_data
, dbuf_is_l2cacheable(db
),
1848 &zp
, dmu_sync_ready
, NULL
, NULL
, dmu_sync_done
, dsa
,
1849 ZIO_PRIORITY_SYNC_WRITE
, ZIO_FLAG_CANFAIL
, &zb
));
1855 dmu_object_set_nlevels(objset_t
*os
, uint64_t object
, int nlevels
, dmu_tx_t
*tx
)
1860 err
= dnode_hold(os
, object
, FTAG
, &dn
);
1863 err
= dnode_set_nlevels(dn
, nlevels
, tx
);
1864 dnode_rele(dn
, FTAG
);
1869 dmu_object_set_blocksize(objset_t
*os
, uint64_t object
, uint64_t size
, int ibs
,
1875 err
= dnode_hold(os
, object
, FTAG
, &dn
);
1878 err
= dnode_set_blksz(dn
, size
, ibs
, tx
);
1879 dnode_rele(dn
, FTAG
);
1884 dmu_object_set_maxblkid(objset_t
*os
, uint64_t object
, uint64_t maxblkid
,
1890 err
= dnode_hold(os
, object
, FTAG
, &dn
);
1893 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
1894 dnode_new_blkid(dn
, maxblkid
, tx
, B_FALSE
, B_TRUE
);
1895 rw_exit(&dn
->dn_struct_rwlock
);
1896 dnode_rele(dn
, FTAG
);
1901 dmu_object_set_checksum(objset_t
*os
, uint64_t object
, uint8_t checksum
,
1907 * Send streams include each object's checksum function. This
1908 * check ensures that the receiving system can understand the
1909 * checksum function transmitted.
1911 ASSERT3U(checksum
, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS
);
1913 VERIFY0(dnode_hold(os
, object
, FTAG
, &dn
));
1914 ASSERT3U(checksum
, <, ZIO_CHECKSUM_FUNCTIONS
);
1915 dn
->dn_checksum
= checksum
;
1916 dnode_setdirty(dn
, tx
);
1917 dnode_rele(dn
, FTAG
);
1921 dmu_object_set_compress(objset_t
*os
, uint64_t object
, uint8_t compress
,
1927 * Send streams include each object's compression function. This
1928 * check ensures that the receiving system can understand the
1929 * compression function transmitted.
1931 ASSERT3U(compress
, <, ZIO_COMPRESS_LEGACY_FUNCTIONS
);
1933 VERIFY0(dnode_hold(os
, object
, FTAG
, &dn
));
1934 dn
->dn_compress
= compress
;
1935 dnode_setdirty(dn
, tx
);
1936 dnode_rele(dn
, FTAG
);
1940 * When the "redundant_metadata" property is set to "most", only indirect
1941 * blocks of this level and higher will have an additional ditto block.
1943 static const int zfs_redundant_metadata_most_ditto_level
= 2;
1946 dmu_write_policy(objset_t
*os
, dnode_t
*dn
, int level
, int wp
, zio_prop_t
*zp
)
1948 dmu_object_type_t type
= dn
? dn
->dn_type
: DMU_OT_OBJSET
;
1949 boolean_t ismd
= (level
> 0 || DMU_OT_IS_METADATA(type
) ||
1951 enum zio_checksum checksum
= os
->os_checksum
;
1952 enum zio_compress compress
= os
->os_compress
;
1953 uint8_t complevel
= os
->os_complevel
;
1954 enum zio_checksum dedup_checksum
= os
->os_dedup_checksum
;
1955 boolean_t dedup
= B_FALSE
;
1956 boolean_t nopwrite
= B_FALSE
;
1957 boolean_t dedup_verify
= os
->os_dedup_verify
;
1958 boolean_t encrypt
= B_FALSE
;
1959 int copies
= os
->os_copies
;
1962 * We maintain different write policies for each of the following
1965 * 2. preallocated blocks (i.e. level-0 blocks of a dump device)
1966 * 3. all other level 0 blocks
1970 * XXX -- we should design a compression algorithm
1971 * that specializes in arrays of bps.
1973 compress
= zio_compress_select(os
->os_spa
,
1974 ZIO_COMPRESS_ON
, ZIO_COMPRESS_ON
);
1977 * Metadata always gets checksummed. If the data
1978 * checksum is multi-bit correctable, and it's not a
1979 * ZBT-style checksum, then it's suitable for metadata
1980 * as well. Otherwise, the metadata checksum defaults
1983 if (!(zio_checksum_table
[checksum
].ci_flags
&
1984 ZCHECKSUM_FLAG_METADATA
) ||
1985 (zio_checksum_table
[checksum
].ci_flags
&
1986 ZCHECKSUM_FLAG_EMBEDDED
))
1987 checksum
= ZIO_CHECKSUM_FLETCHER_4
;
1989 if (os
->os_redundant_metadata
== ZFS_REDUNDANT_METADATA_ALL
||
1990 (os
->os_redundant_metadata
==
1991 ZFS_REDUNDANT_METADATA_MOST
&&
1992 (level
>= zfs_redundant_metadata_most_ditto_level
||
1993 DMU_OT_IS_METADATA(type
) || (wp
& WP_SPILL
))))
1995 } else if (wp
& WP_NOFILL
) {
1999 * If we're writing preallocated blocks, we aren't actually
2000 * writing them so don't set any policy properties. These
2001 * blocks are currently only used by an external subsystem
2002 * outside of zfs (i.e. dump) and not written by the zio
2005 compress
= ZIO_COMPRESS_OFF
;
2006 checksum
= ZIO_CHECKSUM_OFF
;
2008 compress
= zio_compress_select(os
->os_spa
, dn
->dn_compress
,
2010 complevel
= zio_complevel_select(os
->os_spa
, compress
,
2011 complevel
, complevel
);
2013 checksum
= (dedup_checksum
== ZIO_CHECKSUM_OFF
) ?
2014 zio_checksum_select(dn
->dn_checksum
, checksum
) :
2018 * Determine dedup setting. If we are in dmu_sync(),
2019 * we won't actually dedup now because that's all
2020 * done in syncing context; but we do want to use the
2021 * dedup checksum. If the checksum is not strong
2022 * enough to ensure unique signatures, force
2025 if (dedup_checksum
!= ZIO_CHECKSUM_OFF
) {
2026 dedup
= (wp
& WP_DMU_SYNC
) ? B_FALSE
: B_TRUE
;
2027 if (!(zio_checksum_table
[checksum
].ci_flags
&
2028 ZCHECKSUM_FLAG_DEDUP
))
2029 dedup_verify
= B_TRUE
;
2033 * Enable nopwrite if we have secure enough checksum
2034 * algorithm (see comment in zio_nop_write) and
2035 * compression is enabled. We don't enable nopwrite if
2036 * dedup is enabled as the two features are mutually
2039 nopwrite
= (!dedup
&& (zio_checksum_table
[checksum
].ci_flags
&
2040 ZCHECKSUM_FLAG_NOPWRITE
) &&
2041 compress
!= ZIO_COMPRESS_OFF
&& zfs_nopwrite_enabled
);
2045 * All objects in an encrypted objset are protected from modification
2046 * via a MAC. Encrypted objects store their IV and salt in the last DVA
2047 * in the bp, so we cannot use all copies. Encrypted objects are also
2048 * not subject to nopwrite since writing the same data will still
2049 * result in a new ciphertext. Only encrypted blocks can be dedup'd
2050 * to avoid ambiguity in the dedup code since the DDT does not store
2053 if (os
->os_encrypted
&& (wp
& WP_NOFILL
) == 0) {
2056 if (DMU_OT_IS_ENCRYPTED(type
)) {
2057 copies
= MIN(copies
, SPA_DVAS_PER_BP
- 1);
2064 (type
== DMU_OT_DNODE
|| type
== DMU_OT_OBJSET
)) {
2065 compress
= ZIO_COMPRESS_EMPTY
;
2069 zp
->zp_compress
= compress
;
2070 zp
->zp_complevel
= complevel
;
2071 zp
->zp_checksum
= checksum
;
2072 zp
->zp_type
= (wp
& WP_SPILL
) ? dn
->dn_bonustype
: type
;
2073 zp
->zp_level
= level
;
2074 zp
->zp_copies
= MIN(copies
, spa_max_replication(os
->os_spa
));
2075 zp
->zp_dedup
= dedup
;
2076 zp
->zp_dedup_verify
= dedup
&& dedup_verify
;
2077 zp
->zp_nopwrite
= nopwrite
;
2078 zp
->zp_encrypt
= encrypt
;
2079 zp
->zp_byteorder
= ZFS_HOST_BYTEORDER
;
2080 bzero(zp
->zp_salt
, ZIO_DATA_SALT_LEN
);
2081 bzero(zp
->zp_iv
, ZIO_DATA_IV_LEN
);
2082 bzero(zp
->zp_mac
, ZIO_DATA_MAC_LEN
);
2083 zp
->zp_zpl_smallblk
= DMU_OT_IS_FILE(zp
->zp_type
) ?
2084 os
->os_zpl_special_smallblock
: 0;
2086 ASSERT3U(zp
->zp_compress
, !=, ZIO_COMPRESS_INHERIT
);
2090 * This function is only called from zfs_holey_common() for zpl_llseek()
2091 * in order to determine the location of holes. In order to accurately
2092 * report holes all dirty data must be synced to disk. This causes extremely
2093 * poor performance when seeking for holes in a dirty file. As a compromise,
2094 * only provide hole data when the dnode is clean. When a dnode is dirty
2095 * report the dnode as having no holes which is always a safe thing to do.
2098 dmu_offset_next(objset_t
*os
, uint64_t object
, boolean_t hole
, uint64_t *off
)
2104 err
= dnode_hold(os
, object
, FTAG
, &dn
);
2108 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
2110 if (dnode_is_dirty(dn
)) {
2112 * If the zfs_dmu_offset_next_sync module option is enabled
2113 * then strict hole reporting has been requested. Dirty
2114 * dnodes must be synced to disk to accurately report all
2115 * holes. When disabled dirty dnodes are reported to not
2116 * have any holes which is always safe.
2118 * When called by zfs_holey_common() the zp->z_rangelock
2119 * is held to prevent zfs_write() and mmap writeback from
2120 * re-dirtying the dnode after txg_wait_synced().
2122 if (zfs_dmu_offset_next_sync
) {
2123 rw_exit(&dn
->dn_struct_rwlock
);
2124 dnode_rele(dn
, FTAG
);
2125 txg_wait_synced(dmu_objset_pool(os
), 0);
2129 err
= SET_ERROR(EBUSY
);
2131 err
= dnode_next_offset(dn
, DNODE_FIND_HAVELOCK
|
2132 (hole
? DNODE_FIND_HOLE
: 0), off
, 1, 1, 0);
2135 rw_exit(&dn
->dn_struct_rwlock
);
2136 dnode_rele(dn
, FTAG
);
2142 __dmu_object_info_from_dnode(dnode_t
*dn
, dmu_object_info_t
*doi
)
2144 dnode_phys_t
*dnp
= dn
->dn_phys
;
2146 doi
->doi_data_block_size
= dn
->dn_datablksz
;
2147 doi
->doi_metadata_block_size
= dn
->dn_indblkshift
?
2148 1ULL << dn
->dn_indblkshift
: 0;
2149 doi
->doi_type
= dn
->dn_type
;
2150 doi
->doi_bonus_type
= dn
->dn_bonustype
;
2151 doi
->doi_bonus_size
= dn
->dn_bonuslen
;
2152 doi
->doi_dnodesize
= dn
->dn_num_slots
<< DNODE_SHIFT
;
2153 doi
->doi_indirection
= dn
->dn_nlevels
;
2154 doi
->doi_checksum
= dn
->dn_checksum
;
2155 doi
->doi_compress
= dn
->dn_compress
;
2156 doi
->doi_nblkptr
= dn
->dn_nblkptr
;
2157 doi
->doi_physical_blocks_512
= (DN_USED_BYTES(dnp
) + 256) >> 9;
2158 doi
->doi_max_offset
= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
;
2159 doi
->doi_fill_count
= 0;
2160 for (int i
= 0; i
< dnp
->dn_nblkptr
; i
++)
2161 doi
->doi_fill_count
+= BP_GET_FILL(&dnp
->dn_blkptr
[i
]);
2165 dmu_object_info_from_dnode(dnode_t
*dn
, dmu_object_info_t
*doi
)
2167 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
2168 mutex_enter(&dn
->dn_mtx
);
2170 __dmu_object_info_from_dnode(dn
, doi
);
2172 mutex_exit(&dn
->dn_mtx
);
2173 rw_exit(&dn
->dn_struct_rwlock
);
2177 * Get information on a DMU object.
2178 * If doi is NULL, just indicates whether the object exists.
2181 dmu_object_info(objset_t
*os
, uint64_t object
, dmu_object_info_t
*doi
)
2184 int err
= dnode_hold(os
, object
, FTAG
, &dn
);
2190 dmu_object_info_from_dnode(dn
, doi
);
2192 dnode_rele(dn
, FTAG
);
2197 * As above, but faster; can be used when you have a held dbuf in hand.
2200 dmu_object_info_from_db(dmu_buf_t
*db_fake
, dmu_object_info_t
*doi
)
2202 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
2205 dmu_object_info_from_dnode(DB_DNODE(db
), doi
);
2210 * Faster still when you only care about the size.
2213 dmu_object_size_from_db(dmu_buf_t
*db_fake
, uint32_t *blksize
,
2214 u_longlong_t
*nblk512
)
2216 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
2222 *blksize
= dn
->dn_datablksz
;
2223 /* add in number of slots used for the dnode itself */
2224 *nblk512
= ((DN_USED_BYTES(dn
->dn_phys
) + SPA_MINBLOCKSIZE
/2) >>
2225 SPA_MINBLOCKSHIFT
) + dn
->dn_num_slots
;
2230 dmu_object_dnsize_from_db(dmu_buf_t
*db_fake
, int *dnsize
)
2232 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)db_fake
;
2237 *dnsize
= dn
->dn_num_slots
<< DNODE_SHIFT
;
2242 byteswap_uint64_array(void *vbuf
, size_t size
)
2244 uint64_t *buf
= vbuf
;
2245 size_t count
= size
>> 3;
2248 ASSERT((size
& 7) == 0);
2250 for (i
= 0; i
< count
; i
++)
2251 buf
[i
] = BSWAP_64(buf
[i
]);
2255 byteswap_uint32_array(void *vbuf
, size_t size
)
2257 uint32_t *buf
= vbuf
;
2258 size_t count
= size
>> 2;
2261 ASSERT((size
& 3) == 0);
2263 for (i
= 0; i
< count
; i
++)
2264 buf
[i
] = BSWAP_32(buf
[i
]);
2268 byteswap_uint16_array(void *vbuf
, size_t size
)
2270 uint16_t *buf
= vbuf
;
2271 size_t count
= size
>> 1;
2274 ASSERT((size
& 1) == 0);
2276 for (i
= 0; i
< count
; i
++)
2277 buf
[i
] = BSWAP_16(buf
[i
]);
2281 byteswap_uint8_array(void *vbuf
, size_t size
)
2283 (void) vbuf
, (void) size
;
2304 arc_fini(); /* arc depends on l2arc, so arc must go first */
2316 EXPORT_SYMBOL(dmu_bonus_hold
);
2317 EXPORT_SYMBOL(dmu_bonus_hold_by_dnode
);
2318 EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus
);
2319 EXPORT_SYMBOL(dmu_buf_rele_array
);
2320 EXPORT_SYMBOL(dmu_prefetch
);
2321 EXPORT_SYMBOL(dmu_free_range
);
2322 EXPORT_SYMBOL(dmu_free_long_range
);
2323 EXPORT_SYMBOL(dmu_free_long_object
);
2324 EXPORT_SYMBOL(dmu_read
);
2325 EXPORT_SYMBOL(dmu_read_by_dnode
);
2326 EXPORT_SYMBOL(dmu_write
);
2327 EXPORT_SYMBOL(dmu_write_by_dnode
);
2328 EXPORT_SYMBOL(dmu_prealloc
);
2329 EXPORT_SYMBOL(dmu_object_info
);
2330 EXPORT_SYMBOL(dmu_object_info_from_dnode
);
2331 EXPORT_SYMBOL(dmu_object_info_from_db
);
2332 EXPORT_SYMBOL(dmu_object_size_from_db
);
2333 EXPORT_SYMBOL(dmu_object_dnsize_from_db
);
2334 EXPORT_SYMBOL(dmu_object_set_nlevels
);
2335 EXPORT_SYMBOL(dmu_object_set_blocksize
);
2336 EXPORT_SYMBOL(dmu_object_set_maxblkid
);
2337 EXPORT_SYMBOL(dmu_object_set_checksum
);
2338 EXPORT_SYMBOL(dmu_object_set_compress
);
2339 EXPORT_SYMBOL(dmu_offset_next
);
2340 EXPORT_SYMBOL(dmu_write_policy
);
2341 EXPORT_SYMBOL(dmu_sync
);
2342 EXPORT_SYMBOL(dmu_request_arcbuf
);
2343 EXPORT_SYMBOL(dmu_return_arcbuf
);
2344 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode
);
2345 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf
);
2346 EXPORT_SYMBOL(dmu_buf_hold
);
2347 EXPORT_SYMBOL(dmu_ot
);
2349 ZFS_MODULE_PARAM(zfs
, zfs_
, nopwrite_enabled
, INT
, ZMOD_RW
,
2350 "Enable NOP writes");
2352 ZFS_MODULE_PARAM(zfs
, zfs_
, per_txg_dirty_frees_percent
, ULONG
, ZMOD_RW
,
2353 "Percentage of dirtied blocks from frees in one TXG");
2355 ZFS_MODULE_PARAM(zfs
, zfs_
, dmu_offset_next_sync
, INT
, ZMOD_RW
,
2356 "Enable forcing txg sync to find holes");
2359 ZFS_MODULE_PARAM(zfs
, , dmu_prefetch_max
, INT
, ZMOD_RW
,
2360 "Limit one prefetch call to this size");