]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dmu.c
Remove badly placed comment.
[mirror_zfs.git] / module / zfs / dmu.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
1d3ba0bf 9 * or https://opensource.org/licenses/CDDL-1.0.
34dc7c2f
BB
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
196bee4c 23 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
3a17a7a9 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
bc77ba73 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
a08abc1b 26 * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
5475aada 27 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
65282ee9 28 * Copyright (c) 2019 Datto Inc.
10b3c7f5
MN
29 * Copyright (c) 2019, Klara Inc.
30 * Copyright (c) 2019, Allan Jude
5405be03 31 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
67a1b037 32 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
34dc7c2f
BB
33 */
34
34dc7c2f
BB
35#include <sys/dmu.h>
36#include <sys/dmu_impl.h>
37#include <sys/dmu_tx.h>
38#include <sys/dbuf.h>
39#include <sys/dnode.h>
40#include <sys/zfs_context.h>
41#include <sys/dmu_objset.h>
42#include <sys/dmu_traverse.h>
43#include <sys/dsl_dataset.h>
44#include <sys/dsl_dir.h>
45#include <sys/dsl_pool.h>
46#include <sys/dsl_synctask.h>
47#include <sys/dsl_prop.h>
48#include <sys/dmu_zfetch.h>
49#include <sys/zfs_ioctl.h>
50#include <sys/zap.h>
51#include <sys/zio_checksum.h>
03c6040b 52#include <sys/zio_compress.h>
428870ff 53#include <sys/sa.h>
62bdd5eb 54#include <sys/zfeature.h>
a6255b7f 55#include <sys/abd.h>
67a1b037 56#include <sys/brt.h>
e5d1c27e 57#include <sys/trace_zfs.h>
64e0fe14 58#include <sys/zfs_racct.h>
f763c3d1 59#include <sys/zfs_rlock.h>
34dc7c2f
BB
60#ifdef _KERNEL
61#include <sys/vmsystm.h>
b128c09f 62#include <sys/zfs_znode.h>
34dc7c2f
BB
63#endif
64
03c6040b
GW
65/*
66 * Enable/disable nopwrite feature.
67 */
18168da7 68static int zfs_nopwrite_enabled = 1;
03c6040b 69
539d33c7 70/*
65282ee9
AP
71 * Tunable to control percentage of dirtied L1 blocks from frees allowed into
72 * one TXG. After this threshold is crossed, additional dirty blocks from frees
73 * will wait until the next TXG.
539d33c7
GM
74 * A value of zero will disable this throttle.
75 */
ab8d9c17 76static uint_t zfs_per_txg_dirty_frees_percent = 30;
539d33c7 77
66aca247 78/*
05b3eb6d
BB
79 * Enable/disable forcing txg sync when dirty checking for holes with lseek().
80 * By default this is enabled to ensure accurate hole reporting, it can result
81 * in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads.
82 * Disabling this option will result in holes never being reported in dirty
83 * files which is always safe.
66aca247 84 */
18168da7 85static int zfs_dmu_offset_next_sync = 1;
66aca247 86
d9b4bf06
MA
87/*
88 * Limit the amount we can prefetch with one call to this amount. This
89 * helps to limit the amount of memory that can be used by prefetching.
90 * Larger objects should be prefetched a bit at a time.
91 */
fdc2d303 92uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
d9b4bf06 93
34dc7c2f 94const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
2e5dc449
MA
95 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
96 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
97 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" },
98 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" },
99 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" },
100 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" },
101 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" },
102 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" },
103 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" },
104 {DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" },
105 {DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" },
106 {DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" },
107 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" },
108 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"},
109 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" },
110 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" },
111 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" },
112 {DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" },
113 {DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" },
114 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" },
115 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" },
116 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" },
117 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" },
118 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" },
119 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" },
120 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" },
121 {DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" },
122 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" },
123 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" },
124 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" },
125 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" },
126 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" },
127 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" },
128 {DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" },
129 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" },
130 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" },
131 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" },
132 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"},
133 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" },
134 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" },
135 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"},
136 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"},
137 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" },
138 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" },
139 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" },
140 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" },
141 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" },
142 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" },
143 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" },
144 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" },
145 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" },
146 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" },
147 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" },
148 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" }
9ae529ec
CS
149};
150
a2d5643f 151dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
9ae529ec
CS
152 { byteswap_uint8_array, "uint8" },
153 { byteswap_uint16_array, "uint16" },
154 { byteswap_uint32_array, "uint32" },
155 { byteswap_uint64_array, "uint64" },
156 { zap_byteswap, "zap" },
157 { dnode_buf_byteswap, "dnode" },
158 { dmu_objset_byteswap, "objset" },
159 { zfs_znode_byteswap, "znode" },
160 { zfs_oldacl_byteswap, "oldacl" },
161 { zfs_acl_byteswap, "acl" }
34dc7c2f
BB
162};
163
65c7cc49 164static int
2bce8049 165dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
a926aab9 166 const void *tag, dmu_buf_t **dbp)
2bce8049
MA
167{
168 uint64_t blkid;
169 dmu_buf_impl_t *db;
170
2bce8049 171 rw_enter(&dn->dn_struct_rwlock, RW_READER);
f664f1ee 172 blkid = dbuf_whichblock(dn, 0, offset);
2bce8049
MA
173 db = dbuf_hold(dn, blkid, tag);
174 rw_exit(&dn->dn_struct_rwlock);
175
176 if (db == NULL) {
177 *dbp = NULL;
178 return (SET_ERROR(EIO));
179 }
180
181 *dbp = &db->db;
182 return (0);
183}
34dc7c2f 184int
9b67f605 185dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
a926aab9 186 const void *tag, dmu_buf_t **dbp)
34dc7c2f
BB
187{
188 dnode_t *dn;
189 uint64_t blkid;
190 dmu_buf_impl_t *db;
191 int err;
428870ff
BB
192
193 err = dnode_hold(os, object, FTAG, &dn);
34dc7c2f
BB
194 if (err)
195 return (err);
34dc7c2f 196 rw_enter(&dn->dn_struct_rwlock, RW_READER);
f664f1ee 197 blkid = dbuf_whichblock(dn, 0, offset);
34dc7c2f
BB
198 db = dbuf_hold(dn, blkid, tag);
199 rw_exit(&dn->dn_struct_rwlock);
9b67f605
MA
200 dnode_rele(dn, FTAG);
201
34dc7c2f 202 if (db == NULL) {
9b67f605
MA
203 *dbp = NULL;
204 return (SET_ERROR(EIO));
205 }
206
207 *dbp = &db->db;
208 return (err);
209}
210
2bce8049
MA
211int
212dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
a926aab9 213 const void *tag, dmu_buf_t **dbp, int flags)
2bce8049
MA
214{
215 int err;
216 int db_flags = DB_RF_CANFAIL;
217
218 if (flags & DMU_READ_NO_PREFETCH)
219 db_flags |= DB_RF_NOPREFETCH;
b5256303
TC
220 if (flags & DMU_READ_NO_DECRYPT)
221 db_flags |= DB_RF_NO_DECRYPT;
2bce8049
MA
222
223 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
224 if (err == 0) {
225 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
226 err = dbuf_read(db, NULL, db_flags);
227 if (err != 0) {
228 dbuf_rele(db, tag);
229 *dbp = NULL;
230 }
231 }
232
233 return (err);
234}
235
9b67f605
MA
236int
237dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
a926aab9 238 const void *tag, dmu_buf_t **dbp, int flags)
9b67f605
MA
239{
240 int err;
241 int db_flags = DB_RF_CANFAIL;
242
243 if (flags & DMU_READ_NO_PREFETCH)
244 db_flags |= DB_RF_NOPREFETCH;
b5256303
TC
245 if (flags & DMU_READ_NO_DECRYPT)
246 db_flags |= DB_RF_NO_DECRYPT;
9b67f605
MA
247
248 err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
249 if (err == 0) {
250 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
428870ff 251 err = dbuf_read(db, NULL, db_flags);
9b67f605 252 if (err != 0) {
34dc7c2f 253 dbuf_rele(db, tag);
9b67f605 254 *dbp = NULL;
34dc7c2f
BB
255 }
256 }
257
34dc7c2f
BB
258 return (err);
259}
260
261int
262dmu_bonus_max(void)
263{
50c957f7 264 return (DN_OLD_MAX_BONUSLEN);
34dc7c2f
BB
265}
266
267int
572e2857 268dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
34dc7c2f 269{
572e2857
BB
270 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
271 dnode_t *dn;
272 int error;
34dc7c2f 273
572e2857
BB
274 DB_DNODE_ENTER(db);
275 dn = DB_DNODE(db);
276
277 if (dn->dn_bonus != db) {
2e528b49 278 error = SET_ERROR(EINVAL);
572e2857 279 } else if (newsize < 0 || newsize > db_fake->db_size) {
2e528b49 280 error = SET_ERROR(EINVAL);
572e2857
BB
281 } else {
282 dnode_setbonuslen(dn, newsize, tx);
283 error = 0;
284 }
285
286 DB_DNODE_EXIT(db);
287 return (error);
34dc7c2f
BB
288}
289
428870ff 290int
572e2857 291dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
428870ff 292{
572e2857
BB
293 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
294 dnode_t *dn;
295 int error;
428870ff 296
572e2857
BB
297 DB_DNODE_ENTER(db);
298 dn = DB_DNODE(db);
428870ff 299
9ae529ec 300 if (!DMU_OT_IS_VALID(type)) {
2e528b49 301 error = SET_ERROR(EINVAL);
572e2857 302 } else if (dn->dn_bonus != db) {
2e528b49 303 error = SET_ERROR(EINVAL);
572e2857
BB
304 } else {
305 dnode_setbonus_type(dn, type, tx);
306 error = 0;
307 }
428870ff 308
572e2857
BB
309 DB_DNODE_EXIT(db);
310 return (error);
311}
312
313dmu_object_type_t
314dmu_get_bonustype(dmu_buf_t *db_fake)
315{
316 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
317 dnode_t *dn;
318 dmu_object_type_t type;
319
320 DB_DNODE_ENTER(db);
321 dn = DB_DNODE(db);
322 type = dn->dn_bonustype;
323 DB_DNODE_EXIT(db);
324
325 return (type);
428870ff
BB
326}
327
328int
329dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
330{
331 dnode_t *dn;
332 int error;
333
334 error = dnode_hold(os, object, FTAG, &dn);
335 dbuf_rm_spill(dn, tx);
336 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
337 dnode_rm_spill(dn, tx);
338 rw_exit(&dn->dn_struct_rwlock);
339 dnode_rele(dn, FTAG);
340 return (error);
341}
342
34dc7c2f 343/*
6955b401
BB
344 * Lookup and hold the bonus buffer for the provided dnode. If the dnode
345 * has not yet been allocated a new bonus dbuf a will be allocated.
346 * Returns ENOENT, EIO, or 0.
34dc7c2f 347 */
a926aab9 348int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
6955b401 349 uint32_t flags)
34dc7c2f 350{
34dc7c2f
BB
351 dmu_buf_impl_t *db;
352 int error;
b5256303
TC
353 uint32_t db_flags = DB_RF_MUST_SUCCEED;
354
355 if (flags & DMU_READ_NO_PREFETCH)
356 db_flags |= DB_RF_NOPREFETCH;
357 if (flags & DMU_READ_NO_DECRYPT)
358 db_flags |= DB_RF_NO_DECRYPT;
34dc7c2f 359
34dc7c2f
BB
360 rw_enter(&dn->dn_struct_rwlock, RW_READER);
361 if (dn->dn_bonus == NULL) {
c9e39da9
RY
362 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
363 rw_exit(&dn->dn_struct_rwlock);
364 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
365 }
34dc7c2f
BB
366 if (dn->dn_bonus == NULL)
367 dbuf_create_bonus(dn);
368 }
369 db = dn->dn_bonus;
34dc7c2f
BB
370
371 /* as long as the bonus buf is held, the dnode will be held */
c13060e4 372 if (zfs_refcount_add(&db->db_holds, tag) == 1) {
34dc7c2f 373 VERIFY(dnode_add_ref(dn, db));
73ad4a9f 374 atomic_inc_32(&dn->dn_dbufs_count);
572e2857
BB
375 }
376
377 /*
378 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
379 * hold and incrementing the dbuf count to ensure that dnode_move() sees
380 * a dnode hold for every dbuf.
381 */
382 rw_exit(&dn->dn_struct_rwlock);
34dc7c2f 383
b5256303
TC
384 error = dbuf_read(db, NULL, db_flags);
385 if (error) {
386 dnode_evict_bonus(dn);
387 dbuf_rele(db, tag);
388 *dbp = NULL;
389 return (error);
390 }
34dc7c2f
BB
391
392 *dbp = &db->db;
393 return (0);
394}
395
b5256303 396int
a926aab9 397dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp)
b5256303 398{
6955b401
BB
399 dnode_t *dn;
400 int error;
401
402 error = dnode_hold(os, object, FTAG, &dn);
403 if (error)
404 return (error);
405
406 error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
407 dnode_rele(dn, FTAG);
408
409 return (error);
b5256303
TC
410}
411
428870ff
BB
412/*
413 * returns ENOENT, EIO, or 0.
414 *
415 * This interface will allocate a blank spill dbuf when a spill blk
416 * doesn't already exist on the dnode.
417 *
418 * if you only want to find an already existing spill db, then
419 * dmu_spill_hold_existing() should be used.
420 */
421int
a926aab9
AZ
422dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag,
423 dmu_buf_t **dbp)
428870ff
BB
424{
425 dmu_buf_impl_t *db = NULL;
426 int err;
427
428 if ((flags & DB_RF_HAVESTRUCT) == 0)
429 rw_enter(&dn->dn_struct_rwlock, RW_READER);
430
431 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
432
433 if ((flags & DB_RF_HAVESTRUCT) == 0)
434 rw_exit(&dn->dn_struct_rwlock);
435
b182ac00 436 if (db == NULL) {
437 *dbp = NULL;
438 return (SET_ERROR(EIO));
439 }
572e2857
BB
440 err = dbuf_read(db, NULL, flags);
441 if (err == 0)
442 *dbp = &db->db;
b182ac00 443 else {
572e2857 444 dbuf_rele(db, tag);
b182ac00 445 *dbp = NULL;
446 }
428870ff
BB
447 return (err);
448}
449
450int
a926aab9 451dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp)
428870ff 452{
572e2857
BB
453 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
454 dnode_t *dn;
428870ff
BB
455 int err;
456
572e2857
BB
457 DB_DNODE_ENTER(db);
458 dn = DB_DNODE(db);
459
460 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
2e528b49 461 err = SET_ERROR(EINVAL);
572e2857
BB
462 } else {
463 rw_enter(&dn->dn_struct_rwlock, RW_READER);
464
465 if (!dn->dn_have_spill) {
2e528b49 466 err = SET_ERROR(ENOENT);
572e2857
BB
467 } else {
468 err = dmu_spill_hold_by_dnode(dn,
469 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
470 }
428870ff 471
428870ff 472 rw_exit(&dn->dn_struct_rwlock);
428870ff 473 }
572e2857
BB
474
475 DB_DNODE_EXIT(db);
428870ff
BB
476 return (err);
477}
478
479int
a926aab9 480dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag,
e7504d7a 481 dmu_buf_t **dbp)
428870ff 482{
572e2857
BB
483 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
484 dnode_t *dn;
485 int err;
e7504d7a
TC
486 uint32_t db_flags = DB_RF_CANFAIL;
487
488 if (flags & DMU_READ_NO_DECRYPT)
489 db_flags |= DB_RF_NO_DECRYPT;
572e2857
BB
490
491 DB_DNODE_ENTER(db);
492 dn = DB_DNODE(db);
e7504d7a 493 err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
572e2857
BB
494 DB_DNODE_EXIT(db);
495
496 return (err);
428870ff
BB
497}
498
34dc7c2f
BB
499/*
500 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
501 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
502 * and can induce severe lock contention when writing to several files
503 * whose dnodes are in the same block.
504 */
af1698f5 505int
9babb374 506dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
a926aab9
AZ
507 boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp,
508 uint32_t flags)
34dc7c2f
BB
509{
510 dmu_buf_t **dbp;
891568c9 511 zstream_t *zs = NULL;
34dc7c2f 512 uint64_t blkid, nblks, i;
9babb374 513 uint32_t dbuf_flags;
34dc7c2f 514 int err;
ec50cd24 515 zio_t *zio = NULL;
891568c9 516 boolean_t missed = B_FALSE;
34dc7c2f 517
67a1b037 518 ASSERT(!read || length <= DMU_MAX_ACCESS);
34dc7c2f 519
7f60329a
MA
520 /*
521 * Note: We directly notify the prefetch code of this read, so that
522 * we can tell it about the multi-block read. dbuf_read() only knows
523 * about the one block it is accessing.
524 */
525 dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
526 DB_RF_NOPREFETCH;
34dc7c2f 527
e8cf3a4f
AP
528 if ((flags & DMU_READ_NO_DECRYPT) != 0)
529 dbuf_flags |= DB_RF_NO_DECRYPT;
530
34dc7c2f
BB
531 rw_enter(&dn->dn_struct_rwlock, RW_READER);
532 if (dn->dn_datablkshift) {
533 int blkshift = dn->dn_datablkshift;
7f60329a
MA
534 nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
535 P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
34dc7c2f
BB
536 } else {
537 if (offset + length > dn->dn_datablksz) {
538 zfs_panic_recover("zfs: accessing past end of object "
539 "%llx/%llx (size=%u access=%llu+%llu)",
540 (longlong_t)dn->dn_objset->
541 os_dsl_dataset->ds_object,
542 (longlong_t)dn->dn_object, dn->dn_datablksz,
543 (longlong_t)offset, (longlong_t)length);
45d1cae3 544 rw_exit(&dn->dn_struct_rwlock);
2e528b49 545 return (SET_ERROR(EIO));
34dc7c2f
BB
546 }
547 nblks = 1;
548 }
79c76d5b 549 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
34dc7c2f 550
ec50cd24
FY
551 if (read)
552 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
553 ZIO_FLAG_CANFAIL);
fcff0f35 554 blkid = dbuf_whichblock(dn, 0, offset);
891568c9 555 if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
ed2f7ba0 556 length <= zfetch_array_rd_sz) {
891568c9
AM
557 /*
558 * Prepare the zfetch before initiating the demand reads, so
559 * that if multiple threads block on same indirect block, we
560 * base predictions on the original less racy request order.
561 */
ed2f7ba0
AM
562 zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read,
563 B_TRUE);
891568c9 564 }
34dc7c2f 565 for (i = 0; i < nblks; i++) {
7f60329a 566 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
34dc7c2f 567 if (db == NULL) {
891568c9
AM
568 if (zs)
569 dmu_zfetch_run(zs, missed, B_TRUE);
34dc7c2f
BB
570 rw_exit(&dn->dn_struct_rwlock);
571 dmu_buf_rele_array(dbp, nblks, tag);
ec50cd24
FY
572 if (read)
573 zio_nowait(zio);
2e528b49 574 return (SET_ERROR(EIO));
34dc7c2f 575 }
7f60329a 576
891568c9
AM
577 /*
578 * Initiate async demand data read.
579 * We check the db_state after calling dbuf_read() because
580 * (1) dbuf_read() may change the state to CACHED due to a
581 * hit in the ARC, and (2) on a cache miss, a child will
582 * have been added to "zio" but not yet completed, so the
583 * state will not yet be CACHED.
584 */
585 if (read) {
ed2f7ba0
AM
586 if (i == nblks - 1 && blkid + i < dn->dn_maxblkid &&
587 offset + length < db->db.db_offset +
588 db->db.db_size) {
589 if (offset <= db->db.db_offset)
590 dbuf_flags |= DB_RF_PARTIAL_FIRST;
591 else
592 dbuf_flags |= DB_RF_PARTIAL_MORE;
593 }
9babb374 594 (void) dbuf_read(db, zio, dbuf_flags);
891568c9
AM
595 if (db->db_state != DB_CACHED)
596 missed = B_TRUE;
597 }
34dc7c2f
BB
598 dbp[i] = &db->db;
599 }
7f60329a 600
64e0fe14
RM
601 if (!read)
602 zfs_racct_write(length, nblks);
603
891568c9
AM
604 if (zs)
605 dmu_zfetch_run(zs, missed, B_TRUE);
34dc7c2f
BB
606 rw_exit(&dn->dn_struct_rwlock);
607
34dc7c2f 608 if (read) {
ec50cd24
FY
609 /* wait for async read i/o */
610 err = zio_wait(zio);
611 if (err) {
612 dmu_buf_rele_array(dbp, nblks, tag);
613 return (err);
614 }
615
616 /* wait for other io to complete */
34dc7c2f
BB
617 for (i = 0; i < nblks; i++) {
618 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
619 mutex_enter(&db->db_mtx);
620 while (db->db_state == DB_READ ||
621 db->db_state == DB_FILL)
622 cv_wait(&db->db_changed, &db->db_mtx);
623 if (db->db_state == DB_UNCACHED)
2e528b49 624 err = SET_ERROR(EIO);
34dc7c2f
BB
625 mutex_exit(&db->db_mtx);
626 if (err) {
627 dmu_buf_rele_array(dbp, nblks, tag);
628 return (err);
629 }
630 }
631 }
632
633 *numbufsp = nblks;
634 *dbpp = dbp;
635 return (0);
636}
637
afbc6179 638int
34dc7c2f 639dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
a926aab9
AZ
640 uint64_t length, int read, const void *tag, int *numbufsp,
641 dmu_buf_t ***dbpp)
34dc7c2f
BB
642{
643 dnode_t *dn;
644 int err;
645
428870ff 646 err = dnode_hold(os, object, FTAG, &dn);
34dc7c2f
BB
647 if (err)
648 return (err);
649
650 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
9babb374 651 numbufsp, dbpp, DMU_READ_PREFETCH);
34dc7c2f
BB
652
653 dnode_rele(dn, FTAG);
654
655 return (err);
656}
657
658int
572e2857 659dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
a926aab9 660 uint64_t length, boolean_t read, const void *tag, int *numbufsp,
7f60329a 661 dmu_buf_t ***dbpp)
34dc7c2f 662{
572e2857
BB
663 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
664 dnode_t *dn;
34dc7c2f
BB
665 int err;
666
572e2857
BB
667 DB_DNODE_ENTER(db);
668 dn = DB_DNODE(db);
34dc7c2f 669 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
9babb374 670 numbufsp, dbpp, DMU_READ_PREFETCH);
572e2857 671 DB_DNODE_EXIT(db);
34dc7c2f
BB
672
673 return (err);
674}
675
676void
a926aab9 677dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag)
34dc7c2f
BB
678{
679 int i;
680 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
681
682 if (numbufs == 0)
683 return;
684
685 for (i = 0; i < numbufs; i++) {
686 if (dbp[i])
687 dbuf_rele(dbp[i], tag);
688 }
689
690 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
691}
692
e8b96c60 693/*
fcff0f35 694 * Issue prefetch i/os for the given blocks. If level is greater than 0, the
e1cfd73f 695 * indirect blocks prefetched will be those that point to the blocks containing
fcff0f35 696 * the data starting at offset, and continuing to offset + len.
e8b96c60 697 *
b5256303 698 * Note that if the indirect blocks above the blocks being prefetched are not
e1cfd73f 699 * in cache, they will be asynchronously read in.
e8b96c60 700 */
34dc7c2f 701void
fcff0f35
PD
702dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
703 uint64_t len, zio_priority_t pri)
34dc7c2f
BB
704{
705 dnode_t *dn;
706 uint64_t blkid;
e8b96c60 707 int nblks, err;
34dc7c2f 708
34dc7c2f 709 if (len == 0) { /* they're interested in the bonus buffer */
572e2857 710 dn = DMU_META_DNODE(os);
34dc7c2f
BB
711
712 if (object == 0 || object >= DN_MAX_OBJECT)
713 return;
714
715 rw_enter(&dn->dn_struct_rwlock, RW_READER);
fcff0f35
PD
716 blkid = dbuf_whichblock(dn, level,
717 object * sizeof (dnode_phys_t));
718 dbuf_prefetch(dn, level, blkid, pri, 0);
34dc7c2f
BB
719 rw_exit(&dn->dn_struct_rwlock);
720 return;
721 }
722
d9b4bf06
MA
723 /*
724 * See comment before the definition of dmu_prefetch_max.
725 */
726 len = MIN(len, dmu_prefetch_max);
727
34dc7c2f
BB
728 /*
729 * XXX - Note, if the dnode for the requested object is not
730 * already cached, we will do a *synchronous* read in the
731 * dnode_hold() call. The same is true for any indirects.
732 */
428870ff 733 err = dnode_hold(os, object, FTAG, &dn);
34dc7c2f
BB
734 if (err != 0)
735 return;
736
fcff0f35
PD
737 /*
738 * offset + len - 1 is the last byte we want to prefetch for, and offset
739 * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
740 * last block we want to prefetch, and dbuf_whichblock(dn, level,
741 * offset) is the first. Then the number we need to prefetch is the
742 * last - first + 1.
743 */
f664f1ee 744 rw_enter(&dn->dn_struct_rwlock, RW_READER);
fcff0f35
PD
745 if (level > 0 || dn->dn_datablkshift != 0) {
746 nblks = dbuf_whichblock(dn, level, offset + len - 1) -
747 dbuf_whichblock(dn, level, offset) + 1;
34dc7c2f
BB
748 } else {
749 nblks = (offset < dn->dn_datablksz);
750 }
751
752 if (nblks != 0) {
fcff0f35 753 blkid = dbuf_whichblock(dn, level, offset);
1c27024e 754 for (int i = 0; i < nblks; i++)
fcff0f35 755 dbuf_prefetch(dn, level, blkid + i, pri, 0);
34dc7c2f 756 }
34dc7c2f
BB
757 rw_exit(&dn->dn_struct_rwlock);
758
759 dnode_rele(dn, FTAG);
760}
761
45d1cae3
BB
762/*
763 * Get the next "chunk" of file data to free. We traverse the file from
764 * the end so that the file gets shorter over time (if we crashes in the
765 * middle, this will leave us in a better state). We find allocated file
766 * data by simply searching the allocated level 1 indirects.
b663a23d
MA
767 *
768 * On input, *start should be the first offset that does not need to be
769 * freed (e.g. "offset + length"). On return, *start will be the first
65282ee9
AP
770 * offset that should be freed and l1blks is set to the number of level 1
771 * indirect blocks found within the chunk.
45d1cae3 772 */
b128c09f 773static int
65282ee9 774get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
b128c09f 775{
65282ee9 776 uint64_t blks;
b663a23d
MA
777 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
778 /* bytes of data covered by a level-1 indirect block */
ec4afd27
OM
779 uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
780 EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
b128c09f 781
b663a23d 782 ASSERT3U(minimum, <=, *start);
b128c09f 783
f4c594da
TC
784 /*
785 * Check if we can free the entire range assuming that all of the
786 * L1 blocks in this range have data. If we can, we use this
787 * worst case value as an estimate so we can avoid having to look
788 * at the object's actual data.
789 */
790 uint64_t total_l1blks =
791 (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
792 iblkrange;
793 if (total_l1blks <= maxblks) {
794 *l1blks = total_l1blks;
b663a23d 795 *start = minimum;
b128c09f
BB
796 return (0);
797 }
45d1cae3 798 ASSERT(ISP2(iblkrange));
b128c09f 799
65282ee9 800 for (blks = 0; *start > minimum && blks < maxblks; blks++) {
b128c09f
BB
801 int err;
802
b663a23d
MA
803 /*
804 * dnode_next_offset(BACKWARDS) will find an allocated L1
805 * indirect block at or before the input offset. We must
806 * decrement *start so that it is at the end of the region
807 * to search.
808 */
809 (*start)--;
f4c594da 810
b128c09f 811 err = dnode_next_offset(dn,
45d1cae3 812 DNODE_FIND_BACKWARDS, start, 2, 1, 0);
b128c09f 813
b663a23d 814 /* if there are no indirect blocks before start, we are done */
45d1cae3 815 if (err == ESRCH) {
b663a23d
MA
816 *start = minimum;
817 break;
818 } else if (err != 0) {
65282ee9 819 *l1blks = blks;
b128c09f 820 return (err);
45d1cae3 821 }
b128c09f 822
b663a23d 823 /* set start to the beginning of this L1 indirect */
45d1cae3 824 *start = P2ALIGN(*start, iblkrange);
b128c09f 825 }
b663a23d
MA
826 if (*start < minimum)
827 *start = minimum;
65282ee9 828 *l1blks = blks;
f4c594da 829
b128c09f
BB
830 return (0);
831}
832
a08abc1b
GM
833/*
834 * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
835 * otherwise return false.
836 * Used below in dmu_free_long_range_impl() to enable abort when unmounting
837 */
a08abc1b
GM
838static boolean_t
839dmu_objset_zfs_unmounting(objset_t *os)
840{
841#ifdef _KERNEL
842 if (dmu_objset_type(os) == DMU_OST_ZFS)
843 return (zfs_get_vfs_flag_unmounted(os));
14e4e3cb
AZ
844#else
845 (void) os;
a08abc1b
GM
846#endif
847 return (B_FALSE);
848}
849
b128c09f
BB
850static int
851dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
0c03d21a 852 uint64_t length)
b128c09f 853{
c97d3069 854 uint64_t object_size;
b663a23d 855 int err;
539d33c7
GM
856 uint64_t dirty_frees_threshold;
857 dsl_pool_t *dp = dmu_objset_pool(os);
b663a23d 858
c97d3069
BB
859 if (dn == NULL)
860 return (SET_ERROR(EINVAL));
861
862 object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
b663a23d 863 if (offset >= object_size)
b128c09f 864 return (0);
b128c09f 865
539d33c7
GM
866 if (zfs_per_txg_dirty_frees_percent <= 100)
867 dirty_frees_threshold =
868 zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
869 else
65282ee9 870 dirty_frees_threshold = zfs_dirty_data_max / 20;
539d33c7 871
b663a23d
MA
872 if (length == DMU_OBJECT_END || offset + length > object_size)
873 length = object_size - offset;
874
875 while (length != 0) {
539d33c7 876 uint64_t chunk_end, chunk_begin, chunk_len;
65282ee9 877 uint64_t l1blks;
b663a23d
MA
878 dmu_tx_t *tx;
879
a08abc1b
GM
880 if (dmu_objset_zfs_unmounting(dn->dn_objset))
881 return (SET_ERROR(EINTR));
882
b663a23d
MA
883 chunk_end = chunk_begin = offset + length;
884
885 /* move chunk_begin backwards to the beginning of this chunk */
65282ee9 886 err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
b128c09f
BB
887 if (err)
888 return (err);
b663a23d
MA
889 ASSERT3U(chunk_begin, >=, offset);
890 ASSERT3U(chunk_begin, <=, chunk_end);
b128c09f 891
539d33c7
GM
892 chunk_len = chunk_end - chunk_begin;
893
b128c09f 894 tx = dmu_tx_create(os);
539d33c7 895 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
19d55079
MA
896
897 /*
898 * Mark this transaction as typically resulting in a net
899 * reduction in space used.
900 */
901 dmu_tx_mark_netfree(tx);
b128c09f
BB
902 err = dmu_tx_assign(tx, TXG_WAIT);
903 if (err) {
904 dmu_tx_abort(tx);
905 return (err);
906 }
539d33c7 907
f4c594da
TC
908 uint64_t txg = dmu_tx_get_txg(tx);
909
910 mutex_enter(&dp->dp_lock);
911 uint64_t long_free_dirty =
912 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
913 mutex_exit(&dp->dp_lock);
914
915 /*
916 * To avoid filling up a TXG with just frees, wait for
917 * the next TXG to open before freeing more chunks if
918 * we have reached the threshold of frees.
919 */
920 if (dirty_frees_threshold != 0 &&
921 long_free_dirty >= dirty_frees_threshold) {
922 DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
923 dmu_tx_commit(tx);
924 txg_wait_open(dp, 0, B_TRUE);
925 continue;
926 }
927
65282ee9
AP
928 /*
929 * In order to prevent unnecessary write throttling, for each
930 * TXG, we track the cumulative size of L1 blocks being dirtied
931 * in dnode_free_range() below. We compare this number to a
932 * tunable threshold, past which we prevent new L1 dirty freeing
933 * blocks from being added into the open TXG. See
934 * dmu_free_long_range_impl() for details. The threshold
935 * prevents write throttle activation due to dirty freeing L1
936 * blocks taking up a large percentage of zfs_dirty_data_max.
937 */
539d33c7 938 mutex_enter(&dp->dp_lock);
f4c594da 939 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
65282ee9 940 l1blks << dn->dn_indblkshift;
539d33c7
GM
941 mutex_exit(&dp->dp_lock);
942 DTRACE_PROBE3(free__long__range,
f4c594da
TC
943 uint64_t, long_free_dirty, uint64_t, chunk_len,
944 uint64_t, txg);
539d33c7 945 dnode_free_range(dn, chunk_begin, chunk_len, tx);
440a3eb9 946
b128c09f 947 dmu_tx_commit(tx);
b663a23d 948
539d33c7 949 length -= chunk_len;
b128c09f
BB
950 }
951 return (0);
952}
953
954int
955dmu_free_long_range(objset_t *os, uint64_t object,
956 uint64_t offset, uint64_t length)
957{
958 dnode_t *dn;
959 int err;
960
428870ff 961 err = dnode_hold(os, object, FTAG, &dn);
b128c09f
BB
962 if (err != 0)
963 return (err);
0c03d21a 964 err = dmu_free_long_range_impl(os, dn, offset, length);
92bc214c
MA
965
966 /*
967 * It is important to zero out the maxblkid when freeing the entire
968 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
969 * will take the fast path, and (b) dnode_reallocate() can verify
970 * that the entire file has been freed.
971 */
b0bc7a84 972 if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
92bc214c
MA
973 dn->dn_maxblkid = 0;
974
b128c09f
BB
975 dnode_rele(dn, FTAG);
976 return (err);
977}
978
979int
0c03d21a 980dmu_free_long_object(objset_t *os, uint64_t object)
b128c09f 981{
b128c09f
BB
982 dmu_tx_t *tx;
983 int err;
984
b663a23d 985 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
b128c09f
BB
986 if (err != 0)
987 return (err);
b663a23d
MA
988
989 tx = dmu_tx_create(os);
990 dmu_tx_hold_bonus(tx, object);
991 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
19d55079 992 dmu_tx_mark_netfree(tx);
b663a23d
MA
993 err = dmu_tx_assign(tx, TXG_WAIT);
994 if (err == 0) {
60b618a9 995 err = dmu_object_free(os, object, tx);
b663a23d 996 dmu_tx_commit(tx);
b128c09f 997 } else {
b663a23d 998 dmu_tx_abort(tx);
b128c09f 999 }
b663a23d 1000
b128c09f
BB
1001 return (err);
1002}
1003
34dc7c2f
BB
1004int
1005dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
1006 uint64_t size, dmu_tx_t *tx)
1007{
1008 dnode_t *dn;
428870ff 1009 int err = dnode_hold(os, object, FTAG, &dn);
34dc7c2f
BB
1010 if (err)
1011 return (err);
1012 ASSERT(offset < UINT64_MAX);
ee45fbd8 1013 ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
34dc7c2f
BB
1014 dnode_free_range(dn, offset, size, tx);
1015 dnode_rele(dn, FTAG);
1016 return (0);
1017}
1018
0eef1bde 1019static int
1020dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
9babb374 1021 void *buf, uint32_t flags)
34dc7c2f 1022{
34dc7c2f 1023 dmu_buf_t **dbp;
0eef1bde 1024 int numbufs, err = 0;
34dc7c2f
BB
1025
1026 /*
1027 * Deal with odd block sizes, where there can't be data past the first
1028 * block. If we ever do the tail block optimization, we will need to
1029 * handle that here as well.
1030 */
45d1cae3 1031 if (dn->dn_maxblkid == 0) {
c9520ecc 1032 uint64_t newsz = offset > dn->dn_datablksz ? 0 :
34dc7c2f 1033 MIN(size, dn->dn_datablksz - offset);
861166b0 1034 memset((char *)buf + newsz, 0, size - newsz);
34dc7c2f
BB
1035 size = newsz;
1036 }
1037
1038 while (size > 0) {
1039 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
45d1cae3 1040 int i;
34dc7c2f
BB
1041
1042 /*
1043 * NB: we could do this block-at-a-time, but it's nice
1044 * to be reading in parallel.
1045 */
1046 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
9babb374 1047 TRUE, FTAG, &numbufs, &dbp, flags);
34dc7c2f
BB
1048 if (err)
1049 break;
1050
1051 for (i = 0; i < numbufs; i++) {
c9520ecc
JZ
1052 uint64_t tocpy;
1053 int64_t bufoff;
34dc7c2f
BB
1054 dmu_buf_t *db = dbp[i];
1055
1056 ASSERT(size > 0);
1057
1058 bufoff = offset - db->db_offset;
c9520ecc 1059 tocpy = MIN(db->db_size - bufoff, size);
34dc7c2f 1060
c9520ecc 1061 (void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
34dc7c2f
BB
1062
1063 offset += tocpy;
1064 size -= tocpy;
1065 buf = (char *)buf + tocpy;
1066 }
1067 dmu_buf_rele_array(dbp, numbufs, FTAG);
1068 }
34dc7c2f
BB
1069 return (err);
1070}
1071
0eef1bde 1072int
1073dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1074 void *buf, uint32_t flags)
34dc7c2f 1075{
0eef1bde 1076 dnode_t *dn;
1077 int err;
34dc7c2f 1078
0eef1bde 1079 err = dnode_hold(os, object, FTAG, &dn);
1080 if (err != 0)
1081 return (err);
34dc7c2f 1082
0eef1bde 1083 err = dmu_read_impl(dn, offset, size, buf, flags);
1084 dnode_rele(dn, FTAG);
1085 return (err);
1086}
1087
1088int
1089dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
1090 uint32_t flags)
1091{
1092 return (dmu_read_impl(dn, offset, size, buf, flags));
1093}
1094
1095static void
1096dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
1097 const void *buf, dmu_tx_t *tx)
1098{
1099 int i;
34dc7c2f
BB
1100
1101 for (i = 0; i < numbufs; i++) {
c9520ecc
JZ
1102 uint64_t tocpy;
1103 int64_t bufoff;
34dc7c2f
BB
1104 dmu_buf_t *db = dbp[i];
1105
1106 ASSERT(size > 0);
1107
1108 bufoff = offset - db->db_offset;
c9520ecc 1109 tocpy = MIN(db->db_size - bufoff, size);
34dc7c2f
BB
1110
1111 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1112
1113 if (tocpy == db->db_size)
1114 dmu_buf_will_fill(db, tx);
1115 else
1116 dmu_buf_will_dirty(db, tx);
1117
60101509 1118 (void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
34dc7c2f
BB
1119
1120 if (tocpy == db->db_size)
1121 dmu_buf_fill_done(db, tx);
1122
1123 offset += tocpy;
1124 size -= tocpy;
1125 buf = (char *)buf + tocpy;
1126 }
0eef1bde 1127}
1128
1129void
1130dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1131 const void *buf, dmu_tx_t *tx)
1132{
1133 dmu_buf_t **dbp;
1134 int numbufs;
1135
1136 if (size == 0)
1137 return;
1138
1139 VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1140 FALSE, FTAG, &numbufs, &dbp));
1141 dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1142 dmu_buf_rele_array(dbp, numbufs, FTAG);
1143}
1144
0f8ff49e
SD
1145/*
1146 * Note: Lustre is an external consumer of this interface.
1147 */
0eef1bde 1148void
1149dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1150 const void *buf, dmu_tx_t *tx)
1151{
1152 dmu_buf_t **dbp;
1153 int numbufs;
1154
1155 if (size == 0)
1156 return;
1157
1158 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1159 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1160 dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
34dc7c2f
BB
1161 dmu_buf_rele_array(dbp, numbufs, FTAG);
1162}
1163
b128c09f
BB
1164void
1165dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1166 dmu_tx_t *tx)
1167{
1168 dmu_buf_t **dbp;
1169 int numbufs, i;
1170
1171 if (size == 0)
1172 return;
1173
1174 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
1175 FALSE, FTAG, &numbufs, &dbp));
1176
1177 for (i = 0; i < numbufs; i++) {
1178 dmu_buf_t *db = dbp[i];
1179
1180 dmu_buf_will_not_fill(db, tx);
1181 }
1182 dmu_buf_rele_array(dbp, numbufs, FTAG);
1183}
1184
9b67f605
MA
1185void
1186dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
1187 void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
1188 int compressed_size, int byteorder, dmu_tx_t *tx)
1189{
1190 dmu_buf_t *db;
1191
1192 ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
1193 ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
1194 VERIFY0(dmu_buf_hold_noread(os, object, offset,
1195 FTAG, &db));
1196
1197 dmu_buf_write_embedded(db,
1198 data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
1199 uncompressed_size, compressed_size, byteorder, tx);
1200
1201 dmu_buf_rele(db, FTAG);
1202}
1203
30af21b0
PD
1204void
1205dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1206 dmu_tx_t *tx)
1207{
1208 int numbufs, i;
1209 dmu_buf_t **dbp;
1210
1211 VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
1212 &numbufs, &dbp));
1213 for (i = 0; i < numbufs; i++)
1214 dmu_buf_redact(dbp[i], tx);
1215 dmu_buf_rele_array(dbp, numbufs, FTAG);
1216}
1217
34dc7c2f 1218#ifdef _KERNEL
5228cf01 1219int
d0cd9a5c 1220dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
872e8d26
BB
1221{
1222 dmu_buf_t **dbp;
1223 int numbufs, i, err;
872e8d26
BB
1224
1225 /*
1226 * NB: we could do this block-at-a-time, but it's nice
1227 * to be reading in parallel.
1228 */
d0cd9a5c 1229 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
804e0504 1230 TRUE, FTAG, &numbufs, &dbp, 0);
872e8d26
BB
1231 if (err)
1232 return (err);
1233
1234 for (i = 0; i < numbufs; i++) {
c9520ecc
JZ
1235 uint64_t tocpy;
1236 int64_t bufoff;
872e8d26
BB
1237 dmu_buf_t *db = dbp[i];
1238
1239 ASSERT(size > 0);
1240
d0cd9a5c 1241 bufoff = zfs_uio_offset(uio) - db->db_offset;
c9520ecc 1242 tocpy = MIN(db->db_size - bufoff, size);
872e8d26 1243
d0cd9a5c
BA
1244 err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
1245 UIO_READ, uio);
1246
872e8d26
BB
1247 if (err)
1248 break;
1249
1250 size -= tocpy;
1251 }
1252 dmu_buf_rele_array(dbp, numbufs, FTAG);
1253
1254 return (err);
1255}
1256
804e0504
MA
1257/*
1258 * Read 'size' bytes into the uio buffer.
1259 * From object zdb->db_object.
d0cd9a5c 1260 * Starting at zfs_uio_offset(uio).
804e0504
MA
1261 *
1262 * If the caller already has a dbuf in the target object
1263 * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1264 * because we don't have to find the dnode_t for the object.
1265 */
1266int
d0cd9a5c 1267dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size)
804e0504
MA
1268{
1269 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1270 dnode_t *dn;
1271 int err;
1272
1273 if (size == 0)
1274 return (0);
1275
1276 DB_DNODE_ENTER(db);
1277 dn = DB_DNODE(db);
1278 err = dmu_read_uio_dnode(dn, uio, size);
1279 DB_DNODE_EXIT(db);
1280
1281 return (err);
1282}
1283
1284/*
1285 * Read 'size' bytes into the uio buffer.
1286 * From the specified object
d0cd9a5c 1287 * Starting at offset zfs_uio_offset(uio).
804e0504
MA
1288 */
1289int
d0cd9a5c 1290dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
804e0504
MA
1291{
1292 dnode_t *dn;
1293 int err;
1294
1295 if (size == 0)
1296 return (0);
1297
1298 err = dnode_hold(os, object, FTAG, &dn);
1299 if (err)
1300 return (err);
1301
1302 err = dmu_read_uio_dnode(dn, uio, size);
1303
1304 dnode_rele(dn, FTAG);
1305
1306 return (err);
1307}
1308
5228cf01 1309int
d0cd9a5c 1310dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
872e8d26
BB
1311{
1312 dmu_buf_t **dbp;
1313 int numbufs;
1314 int err = 0;
1315 int i;
1316
d0cd9a5c 1317 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
872e8d26
BB
1318 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1319 if (err)
1320 return (err);
1321
1322 for (i = 0; i < numbufs; i++) {
c9520ecc
JZ
1323 uint64_t tocpy;
1324 int64_t bufoff;
872e8d26
BB
1325 dmu_buf_t *db = dbp[i];
1326
1327 ASSERT(size > 0);
1328
d0cd9a5c 1329 bufoff = zfs_uio_offset(uio) - db->db_offset;
c9520ecc 1330 tocpy = MIN(db->db_size - bufoff, size);
872e8d26
BB
1331
1332 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1333
1334 if (tocpy == db->db_size)
1335 dmu_buf_will_fill(db, tx);
1336 else
1337 dmu_buf_will_dirty(db, tx);
1338
1339 /*
d0cd9a5c 1340 * XXX zfs_uiomove could block forever (eg.nfs-backed
872e8d26 1341 * pages). There needs to be a uiolockdown() function
d0cd9a5c 1342 * to lock the pages in memory, so that zfs_uiomove won't
872e8d26
BB
1343 * block.
1344 */
d0cd9a5c
BA
1345 err = zfs_uio_fault_move((char *)db->db_data + bufoff,
1346 tocpy, UIO_WRITE, uio);
1347
872e8d26
BB
1348 if (tocpy == db->db_size)
1349 dmu_buf_fill_done(db, tx);
1350
1351 if (err)
1352 break;
1353
1354 size -= tocpy;
1355 }
1356
1357 dmu_buf_rele_array(dbp, numbufs, FTAG);
1358 return (err);
1359}
1360
804e0504
MA
1361/*
1362 * Write 'size' bytes from the uio buffer.
1363 * To object zdb->db_object.
d0cd9a5c 1364 * Starting at offset zfs_uio_offset(uio).
804e0504
MA
1365 *
1366 * If the caller already has a dbuf in the target object
1367 * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1368 * because we don't have to find the dnode_t for the object.
1369 */
428870ff 1370int
d0cd9a5c 1371dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
428870ff
BB
1372 dmu_tx_t *tx)
1373{
572e2857
BB
1374 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1375 dnode_t *dn;
1376 int err;
1377
428870ff
BB
1378 if (size == 0)
1379 return (0);
1380
572e2857
BB
1381 DB_DNODE_ENTER(db);
1382 dn = DB_DNODE(db);
1383 err = dmu_write_uio_dnode(dn, uio, size, tx);
1384 DB_DNODE_EXIT(db);
1385
1386 return (err);
428870ff
BB
1387}
1388
804e0504
MA
1389/*
1390 * Write 'size' bytes from the uio buffer.
1391 * To the specified object.
d0cd9a5c 1392 * Starting at offset zfs_uio_offset(uio).
804e0504 1393 */
428870ff 1394int
d0cd9a5c 1395dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
428870ff
BB
1396 dmu_tx_t *tx)
1397{
1398 dnode_t *dn;
1399 int err;
1400
1401 if (size == 0)
1402 return (0);
1403
1404 err = dnode_hold(os, object, FTAG, &dn);
1405 if (err)
1406 return (err);
1407
1408 err = dmu_write_uio_dnode(dn, uio, size, tx);
1409
1410 dnode_rele(dn, FTAG);
1411
1412 return (err);
1413}
872e8d26 1414#endif /* _KERNEL */
34dc7c2f 1415
9babb374
BB
1416/*
1417 * Allocate a loaned anonymous arc buffer.
1418 */
1419arc_buf_t *
1420dmu_request_arcbuf(dmu_buf_t *handle, int size)
1421{
572e2857 1422 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
9babb374 1423
2aa34383 1424 return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
9babb374
BB
1425}
1426
1427/*
1428 * Free a loaned arc buffer.
1429 */
1430void
1431dmu_return_arcbuf(arc_buf_t *buf)
1432{
1433 arc_return_buf(buf, FTAG);
d3c2ae1c 1434 arc_buf_destroy(buf, FTAG);
9babb374
BB
1435}
1436
ba67d821
MA
1437/*
1438 * A "lightweight" write is faster than a regular write (e.g.
1439 * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
1440 * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the
1441 * data can not be read or overwritten until the transaction's txg has been
1442 * synced. This makes it appropriate for workloads that are known to be
1443 * (temporarily) write-only, like "zfs receive".
1444 *
1445 * A single block is written, starting at the specified offset in bytes. If
1446 * the call is successful, it returns 0 and the provided abd has been
1447 * consumed (the caller should not free it).
1448 */
1449int
1450dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
4938d01d 1451 const zio_prop_t *zp, zio_flag_t flags, dmu_tx_t *tx)
ba67d821
MA
1452{
1453 dbuf_dirty_record_t *dr =
1454 dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
1455 if (dr == NULL)
1456 return (SET_ERROR(EIO));
1457 dr->dt.dll.dr_abd = abd;
1458 dr->dt.dll.dr_props = *zp;
1459 dr->dt.dll.dr_flags = flags;
1460 return (0);
1461}
1462
9babb374
BB
1463/*
1464 * When possible directly assign passed loaned arc buffer to a dbuf.
1465 * If this is not possible copy the contents of passed arc buf via
1466 * dmu_write().
1467 */
305781da 1468int
440a3eb9 1469dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
9babb374
BB
1470 dmu_tx_t *tx)
1471{
9babb374 1472 dmu_buf_impl_t *db;
440a3eb9
TC
1473 objset_t *os = dn->dn_objset;
1474 uint64_t object = dn->dn_object;
2aa34383 1475 uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
9babb374
BB
1476 uint64_t blkid;
1477
1478 rw_enter(&dn->dn_struct_rwlock, RW_READER);
fcff0f35 1479 blkid = dbuf_whichblock(dn, 0, offset);
305781da
TC
1480 db = dbuf_hold(dn, blkid, FTAG);
1481 if (db == NULL)
1482 return (SET_ERROR(EIO));
9babb374
BB
1483 rw_exit(&dn->dn_struct_rwlock);
1484
88904bb3 1485 /*
ba67d821
MA
1486 * We can only assign if the offset is aligned and the arc buf is the
1487 * same size as the dbuf.
88904bb3 1488 */
2aa34383 1489 if (offset == db->db.db_offset && blksz == db->db.db_size) {
64e0fe14 1490 zfs_racct_write(blksz, 1);
9babb374
BB
1491 dbuf_assign_arcbuf(db, buf, tx);
1492 dbuf_rele(db, FTAG);
1493 } else {
2aa34383
DK
1494 /* compressed bufs must always be assignable to their dbuf */
1495 ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
524b4217 1496 ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
2aa34383 1497
9babb374 1498 dbuf_rele(db, FTAG);
572e2857 1499 dmu_write(os, object, offset, blksz, buf->b_data, tx);
9babb374
BB
1500 dmu_return_arcbuf(buf);
1501 }
305781da
TC
1502
1503 return (0);
9babb374
BB
1504}
1505
305781da 1506int
440a3eb9
TC
1507dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1508 dmu_tx_t *tx)
1509{
305781da 1510 int err;
440a3eb9
TC
1511 dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
1512
1513 DB_DNODE_ENTER(dbuf);
305781da 1514 err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
440a3eb9 1515 DB_DNODE_EXIT(dbuf);
305781da
TC
1516
1517 return (err);
440a3eb9
TC
1518}
1519
34dc7c2f 1520typedef struct {
428870ff
BB
1521 dbuf_dirty_record_t *dsa_dr;
1522 dmu_sync_cb_t *dsa_done;
1523 zgd_t *dsa_zgd;
1524 dmu_tx_t *dsa_tx;
34dc7c2f
BB
1525} dmu_sync_arg_t;
1526
b128c09f
BB
1527static void
1528dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1529{
14e4e3cb 1530 (void) buf;
428870ff
BB
1531 dmu_sync_arg_t *dsa = varg;
1532 dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
b128c09f
BB
1533 blkptr_t *bp = zio->io_bp;
1534
428870ff
BB
1535 if (zio->io_error == 0) {
1536 if (BP_IS_HOLE(bp)) {
1537 /*
1538 * A block of zeros may compress to a hole, but the
1539 * block size still needs to be known for replay.
1540 */
1541 BP_SET_LSIZE(bp, db->db_size);
9b67f605 1542 } else if (!BP_IS_EMBEDDED(bp)) {
428870ff 1543 ASSERT(BP_GET_LEVEL(bp) == 0);
b5256303 1544 BP_SET_FILL(bp, 1);
428870ff 1545 }
b128c09f
BB
1546 }
1547}
1548
428870ff
BB
1549static void
1550dmu_sync_late_arrival_ready(zio_t *zio)
1551{
1552 dmu_sync_ready(zio, NULL, zio->io_private);
1553}
1554
34dc7c2f
BB
1555static void
1556dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1557{
14e4e3cb 1558 (void) buf;
428870ff
BB
1559 dmu_sync_arg_t *dsa = varg;
1560 dbuf_dirty_record_t *dr = dsa->dsa_dr;
34dc7c2f 1561 dmu_buf_impl_t *db = dr->dr_dbuf;
900d09b2
PS
1562 zgd_t *zgd = dsa->dsa_zgd;
1563
1564 /*
1565 * Record the vdev(s) backing this blkptr so they can be flushed after
1566 * the writes for the lwb have completed.
1567 */
1568 if (zio->io_error == 0) {
1569 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1570 }
34dc7c2f 1571
34dc7c2f
BB
1572 mutex_enter(&db->db_mtx);
1573 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
428870ff 1574 if (zio->io_error == 0) {
03c6040b
GW
1575 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1576 if (dr->dt.dl.dr_nopwrite) {
02dc43bc
MA
1577 blkptr_t *bp = zio->io_bp;
1578 blkptr_t *bp_orig = &zio->io_bp_orig;
1579 uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
03c6040b
GW
1580
1581 ASSERT(BP_EQUAL(bp, bp_orig));
02dc43bc 1582 VERIFY(BP_EQUAL(bp, db->db_blkptr));
03c6040b 1583 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
02dc43bc 1584 VERIFY(zio_checksum_table[chksum].ci_flags &
3c67d83a 1585 ZCHECKSUM_FLAG_NOPWRITE);
03c6040b 1586 }
428870ff
BB
1587 dr->dt.dl.dr_overridden_by = *zio->io_bp;
1588 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1589 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
a4069eef
PS
1590
1591 /*
1592 * Old style holes are filled with all zeros, whereas
1593 * new-style holes maintain their lsize, type, level,
1594 * and birth time (see zio_write_compress). While we
1595 * need to reset the BP_SET_LSIZE() call that happened
1596 * in dmu_sync_ready for old style holes, we do *not*
1597 * want to wipe out the information contained in new
1598 * style holes. Thus, only zero out the block pointer if
1599 * it's an old style hole.
1600 */
1601 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
1602 dr->dt.dl.dr_overridden_by.blk_birth == 0)
428870ff
BB
1603 BP_ZERO(&dr->dt.dl.dr_overridden_by);
1604 } else {
1605 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1606 }
34dc7c2f
BB
1607 cv_broadcast(&db->db_changed);
1608 mutex_exit(&db->db_mtx);
1609
428870ff 1610 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
34dc7c2f 1611
428870ff
BB
1612 kmem_free(dsa, sizeof (*dsa));
1613}
1614
1615static void
1616dmu_sync_late_arrival_done(zio_t *zio)
1617{
1618 blkptr_t *bp = zio->io_bp;
1619 dmu_sync_arg_t *dsa = zio->io_private;
900d09b2
PS
1620 zgd_t *zgd = dsa->dsa_zgd;
1621
1622 if (zio->io_error == 0) {
1623 /*
1624 * Record the vdev(s) backing this blkptr so they can be
1625 * flushed after the writes for the lwb have completed.
1626 */
1627 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1628
1629 if (!BP_IS_HOLE(bp)) {
2a8ba608 1630 blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
900d09b2
PS
1631 ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
1632 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1633 ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1634 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1635 zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1636 }
428870ff
BB
1637 }
1638
1639 dmu_tx_commit(dsa->dsa_tx);
1640
1641 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1642
e2af2acc 1643 abd_free(zio->io_abd);
428870ff
BB
1644 kmem_free(dsa, sizeof (*dsa));
1645}
1646
1647static int
1648dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
5dbd68a3 1649 zio_prop_t *zp, zbookmark_phys_t *zb)
428870ff
BB
1650{
1651 dmu_sync_arg_t *dsa;
1652 dmu_tx_t *tx;
1653
1654 tx = dmu_tx_create(os);
1655 dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1656 if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1657 dmu_tx_abort(tx);
2e528b49
MA
1658 /* Make zl_get_data do txg_waited_synced() */
1659 return (SET_ERROR(EIO));
428870ff
BB
1660 }
1661
1ce23dca
PS
1662 /*
1663 * In order to prevent the zgd's lwb from being free'd prior to
1664 * dmu_sync_late_arrival_done() being called, we have to ensure
1665 * the lwb's "max txg" takes this tx's txg into account.
1666 */
1667 zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
1668
79c76d5b 1669 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
428870ff
BB
1670 dsa->dsa_dr = NULL;
1671 dsa->dsa_done = done;
1672 dsa->dsa_zgd = zgd;
1673 dsa->dsa_tx = tx;
1674
02dc43bc
MA
1675 /*
1676 * Since we are currently syncing this txg, it's nontrivial to
1677 * determine what BP to nopwrite against, so we disable nopwrite.
1678 *
1679 * When syncing, the db_blkptr is initially the BP of the previous
1680 * txg. We can not nopwrite against it because it will be changed
1681 * (this is similar to the non-late-arrival case where the dbuf is
1682 * dirty in a future txg).
1683 *
1684 * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1685 * We can not nopwrite against it because although the BP will not
1686 * (typically) be changed, the data has not yet been persisted to this
1687 * location.
1688 *
1689 * Finally, when dbuf_write_done() is called, it is theoretically
1690 * possible to always nopwrite, because the data that was written in
1691 * this txg is the same data that we are trying to write. However we
1692 * would need to check that this dbuf is not dirty in any future
1693 * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1694 * don't nopwrite in this case.
1695 */
1696 zp->zp_nopwrite = B_FALSE;
1697
a6255b7f
DQ
1698 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1699 abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
1700 zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
1701 dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
1702 dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
428870ff
BB
1703
1704 return (0);
34dc7c2f
BB
1705}
1706
1707/*
1708 * Intent log support: sync the block associated with db to disk.
1709 * N.B. and XXX: the caller is responsible for making sure that the
1710 * data isn't changing while dmu_sync() is writing it.
1711 *
1712 * Return values:
1713 *
03c6040b 1714 * EEXIST: this txg has already been synced, so there's nothing to do.
34dc7c2f
BB
1715 * The caller should not log the write.
1716 *
1717 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1718 * The caller should not log the write.
1719 *
1720 * EALREADY: this block is already in the process of being synced.
1721 * The caller should track its progress (somehow).
1722 *
428870ff
BB
1723 * EIO: could not do the I/O.
1724 * The caller should do a txg_wait_synced().
34dc7c2f 1725 *
428870ff
BB
1726 * 0: the I/O has been initiated.
1727 * The caller should log this blkptr in the done callback.
1728 * It is possible that the I/O will fail, in which case
1729 * the error will be reported to the done callback and
1730 * propagated to pio from zio_done().
34dc7c2f
BB
1731 */
1732int
428870ff 1733dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
34dc7c2f 1734{
428870ff
BB
1735 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1736 objset_t *os = db->db_objset;
1737 dsl_dataset_t *ds = os->os_dsl_dataset;
cccbed9f 1738 dbuf_dirty_record_t *dr, *dr_next;
428870ff 1739 dmu_sync_arg_t *dsa;
5dbd68a3 1740 zbookmark_phys_t zb;
428870ff 1741 zio_prop_t zp;
572e2857 1742 dnode_t *dn;
34dc7c2f 1743
428870ff 1744 ASSERT(pio != NULL);
34dc7c2f
BB
1745 ASSERT(txg != 0);
1746
428870ff
BB
1747 SET_BOOKMARK(&zb, ds->ds_object,
1748 db->db.db_object, db->db_level, db->db_blkid);
1749
572e2857
BB
1750 DB_DNODE_ENTER(db);
1751 dn = DB_DNODE(db);
82644107 1752 dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
572e2857 1753 DB_DNODE_EXIT(db);
34dc7c2f
BB
1754
1755 /*
428870ff 1756 * If we're frozen (running ziltest), we always need to generate a bp.
34dc7c2f 1757 */
428870ff
BB
1758 if (txg > spa_freeze_txg(os->os_spa))
1759 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
34dc7c2f
BB
1760
1761 /*
428870ff
BB
1762 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1763 * and us. If we determine that this txg is not yet syncing,
1764 * but it begins to sync a moment later, that's OK because the
1765 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
34dc7c2f 1766 */
428870ff
BB
1767 mutex_enter(&db->db_mtx);
1768
1769 if (txg <= spa_last_synced_txg(os->os_spa)) {
34dc7c2f 1770 /*
428870ff 1771 * This txg has already synced. There's nothing to do.
34dc7c2f 1772 */
428870ff 1773 mutex_exit(&db->db_mtx);
2e528b49 1774 return (SET_ERROR(EEXIST));
34dc7c2f
BB
1775 }
1776
428870ff
BB
1777 if (txg <= spa_syncing_txg(os->os_spa)) {
1778 /*
1779 * This txg is currently syncing, so we can't mess with
1780 * the dirty record anymore; just write a new log block.
1781 */
1782 mutex_exit(&db->db_mtx);
1783 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
34dc7c2f
BB
1784 }
1785
cccbed9f 1786 dr = dbuf_find_dirty_eq(db, txg);
428870ff
BB
1787
1788 if (dr == NULL) {
34dc7c2f 1789 /*
428870ff 1790 * There's no dr for this dbuf, so it must have been freed.
34dc7c2f
BB
1791 * There's no need to log writes to freed blocks, so we're done.
1792 */
1793 mutex_exit(&db->db_mtx);
2e528b49 1794 return (SET_ERROR(ENOENT));
34dc7c2f
BB
1795 }
1796
cccbed9f
MM
1797 dr_next = list_next(&db->db_dirty_records, dr);
1798 ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
03c6040b 1799
02dc43bc
MA
1800 if (db->db_blkptr != NULL) {
1801 /*
1802 * We need to fill in zgd_bp with the current blkptr so that
1803 * the nopwrite code can check if we're writing the same
1804 * data that's already on disk. We can only nopwrite if we
1805 * are sure that after making the copy, db_blkptr will not
1806 * change until our i/o completes. We ensure this by
1807 * holding the db_mtx, and only allowing nopwrite if the
1808 * block is not already dirty (see below). This is verified
1809 * by dmu_sync_done(), which VERIFYs that the db_blkptr has
1810 * not changed.
1811 */
1812 *zgd->zgd_bp = *db->db_blkptr;
1813 }
1814
03c6040b 1815 /*
f3c517d8
MA
1816 * Assume the on-disk data is X, the current syncing data (in
1817 * txg - 1) is Y, and the current in-memory data is Z (currently
1818 * in dmu_sync).
1819 *
1820 * We usually want to perform a nopwrite if X and Z are the
1821 * same. However, if Y is different (i.e. the BP is going to
1822 * change before this write takes effect), then a nopwrite will
1823 * be incorrect - we would override with X, which could have
1824 * been freed when Y was written.
1825 *
1826 * (Note that this is not a concern when we are nop-writing from
1827 * syncing context, because X and Y must be identical, because
1828 * all previous txgs have been synced.)
1829 *
1830 * Therefore, we disable nopwrite if the current BP could change
1831 * before this TXG. There are two ways it could change: by
1832 * being dirty (dr_next is non-NULL), or by being freed
1833 * (dnode_block_freed()). This behavior is verified by
1834 * zio_done(), which VERIFYs that the override BP is identical
1835 * to the on-disk BP.
03c6040b 1836 */
f3c517d8
MA
1837 DB_DNODE_ENTER(db);
1838 dn = DB_DNODE(db);
cccbed9f 1839 if (dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
03c6040b 1840 zp.zp_nopwrite = B_FALSE;
f3c517d8 1841 DB_DNODE_EXIT(db);
03c6040b 1842
34dc7c2f 1843 ASSERT(dr->dr_txg == txg);
428870ff
BB
1844 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1845 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
34dc7c2f 1846 /*
428870ff
BB
1847 * We have already issued a sync write for this buffer,
1848 * or this buffer has already been synced. It could not
34dc7c2f
BB
1849 * have been dirtied since, or we would have cleared the state.
1850 */
34dc7c2f 1851 mutex_exit(&db->db_mtx);
2e528b49 1852 return (SET_ERROR(EALREADY));
34dc7c2f
BB
1853 }
1854
428870ff 1855 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
34dc7c2f 1856 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
34dc7c2f 1857 mutex_exit(&db->db_mtx);
34dc7c2f 1858
79c76d5b 1859 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
428870ff
BB
1860 dsa->dsa_dr = dr;
1861 dsa->dsa_done = done;
1862 dsa->dsa_zgd = zgd;
1863 dsa->dsa_tx = NULL;
b128c09f 1864
ed2f7ba0
AM
1865 zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp,
1866 dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db), dbuf_is_l2cacheable(db),
d3c2ae1c 1867 &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
bc77ba73 1868 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
b128c09f 1869
428870ff 1870 return (0);
34dc7c2f
BB
1871}
1872
b5256303
TC
1873int
1874dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
1875{
1876 dnode_t *dn;
1877 int err;
1878
1879 err = dnode_hold(os, object, FTAG, &dn);
1880 if (err)
1881 return (err);
1882 err = dnode_set_nlevels(dn, nlevels, tx);
1883 dnode_rele(dn, FTAG);
1884 return (err);
1885}
1886
34dc7c2f
BB
1887int
1888dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
4ea3f864 1889 dmu_tx_t *tx)
34dc7c2f
BB
1890{
1891 dnode_t *dn;
1892 int err;
1893
428870ff 1894 err = dnode_hold(os, object, FTAG, &dn);
34dc7c2f
BB
1895 if (err)
1896 return (err);
1897 err = dnode_set_blksz(dn, size, ibs, tx);
1898 dnode_rele(dn, FTAG);
1899 return (err);
1900}
1901
ae76f45c
TC
1902int
1903dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
1904 dmu_tx_t *tx)
1905{
1906 dnode_t *dn;
1907 int err;
1908
1909 err = dnode_hold(os, object, FTAG, &dn);
1910 if (err)
1911 return (err);
1912 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
369aa501 1913 dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
ae76f45c
TC
1914 rw_exit(&dn->dn_struct_rwlock);
1915 dnode_rele(dn, FTAG);
1916 return (0);
1917}
1918
34dc7c2f
BB
1919void
1920dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
4ea3f864 1921 dmu_tx_t *tx)
34dc7c2f
BB
1922{
1923 dnode_t *dn;
1924
9b67f605
MA
1925 /*
1926 * Send streams include each object's checksum function. This
1927 * check ensures that the receiving system can understand the
1928 * checksum function transmitted.
1929 */
1930 ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
1931
1932 VERIFY0(dnode_hold(os, object, FTAG, &dn));
1933 ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
34dc7c2f
BB
1934 dn->dn_checksum = checksum;
1935 dnode_setdirty(dn, tx);
1936 dnode_rele(dn, FTAG);
1937}
1938
1939void
1940dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
4ea3f864 1941 dmu_tx_t *tx)
34dc7c2f
BB
1942{
1943 dnode_t *dn;
1944
9b67f605
MA
1945 /*
1946 * Send streams include each object's compression function. This
1947 * check ensures that the receiving system can understand the
1948 * compression function transmitted.
1949 */
1950 ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
1951
1952 VERIFY0(dnode_hold(os, object, FTAG, &dn));
34dc7c2f
BB
1953 dn->dn_compress = compress;
1954 dnode_setdirty(dn, tx);
1955 dnode_rele(dn, FTAG);
1956}
1957
faf0f58c
MA
1958/*
1959 * When the "redundant_metadata" property is set to "most", only indirect
1960 * blocks of this level and higher will have an additional ditto block.
1961 */
18168da7 1962static const int zfs_redundant_metadata_most_ditto_level = 2;
faf0f58c 1963
428870ff 1964void
82644107 1965dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
428870ff
BB
1966{
1967 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
9ae529ec 1968 boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
572e2857 1969 (wp & WP_SPILL));
428870ff
BB
1970 enum zio_checksum checksum = os->os_checksum;
1971 enum zio_compress compress = os->os_compress;
10b3c7f5 1972 uint8_t complevel = os->os_complevel;
428870ff 1973 enum zio_checksum dedup_checksum = os->os_dedup_checksum;
03c6040b
GW
1974 boolean_t dedup = B_FALSE;
1975 boolean_t nopwrite = B_FALSE;
428870ff 1976 boolean_t dedup_verify = os->os_dedup_verify;
b5256303 1977 boolean_t encrypt = B_FALSE;
428870ff 1978 int copies = os->os_copies;
a7004725 1979
428870ff 1980 /*
03c6040b
GW
1981 * We maintain different write policies for each of the following
1982 * types of data:
1983 * 1. metadata
1984 * 2. preallocated blocks (i.e. level-0 blocks of a dump device)
1985 * 3. all other level 0 blocks
428870ff
BB
1986 */
1987 if (ismd) {
b1d21733
TC
1988 /*
1989 * XXX -- we should design a compression algorithm
1990 * that specializes in arrays of bps.
1991 */
1992 compress = zio_compress_select(os->os_spa,
1993 ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
03c6040b 1994
428870ff
BB
1995 /*
1996 * Metadata always gets checksummed. If the data
1997 * checksum is multi-bit correctable, and it's not a
1998 * ZBT-style checksum, then it's suitable for metadata
1999 * as well. Otherwise, the metadata checksum defaults
2000 * to fletcher4.
2001 */
3c67d83a
TH
2002 if (!(zio_checksum_table[checksum].ci_flags &
2003 ZCHECKSUM_FLAG_METADATA) ||
2004 (zio_checksum_table[checksum].ci_flags &
2005 ZCHECKSUM_FLAG_EMBEDDED))
428870ff 2006 checksum = ZIO_CHECKSUM_FLETCHER_4;
faf0f58c 2007
5405be03
A
2008 switch (os->os_redundant_metadata) {
2009 case ZFS_REDUNDANT_METADATA_ALL:
faf0f58c 2010 copies++;
5405be03
A
2011 break;
2012 case ZFS_REDUNDANT_METADATA_MOST:
2013 if (level >= zfs_redundant_metadata_most_ditto_level ||
2014 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))
2015 copies++;
2016 break;
2017 case ZFS_REDUNDANT_METADATA_SOME:
2018 if (DMU_OT_IS_CRITICAL(type))
2019 copies++;
2020 break;
2021 case ZFS_REDUNDANT_METADATA_NONE:
2022 break;
2023 }
03c6040b
GW
2024 } else if (wp & WP_NOFILL) {
2025 ASSERT(level == 0);
428870ff 2026
428870ff 2027 /*
03c6040b
GW
2028 * If we're writing preallocated blocks, we aren't actually
2029 * writing them so don't set any policy properties. These
2030 * blocks are currently only used by an external subsystem
2031 * outside of zfs (i.e. dump) and not written by the zio
2032 * pipeline.
428870ff 2033 */
03c6040b
GW
2034 compress = ZIO_COMPRESS_OFF;
2035 checksum = ZIO_CHECKSUM_OFF;
428870ff 2036 } else {
99197f03
JG
2037 compress = zio_compress_select(os->os_spa, dn->dn_compress,
2038 compress);
10b3c7f5
MN
2039 complevel = zio_complevel_select(os->os_spa, compress,
2040 complevel, complevel);
428870ff 2041
03c6040b
GW
2042 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
2043 zio_checksum_select(dn->dn_checksum, checksum) :
2044 dedup_checksum;
428870ff 2045
03c6040b
GW
2046 /*
2047 * Determine dedup setting. If we are in dmu_sync(),
2048 * we won't actually dedup now because that's all
2049 * done in syncing context; but we do want to use the
e1cfd73f 2050 * dedup checksum. If the checksum is not strong
03c6040b
GW
2051 * enough to ensure unique signatures, force
2052 * dedup_verify.
2053 */
2054 if (dedup_checksum != ZIO_CHECKSUM_OFF) {
2055 dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
3c67d83a
TH
2056 if (!(zio_checksum_table[checksum].ci_flags &
2057 ZCHECKSUM_FLAG_DEDUP))
03c6040b
GW
2058 dedup_verify = B_TRUE;
2059 }
428870ff 2060
03c6040b 2061 /*
3c67d83a
TH
2062 * Enable nopwrite if we have secure enough checksum
2063 * algorithm (see comment in zio_nop_write) and
2064 * compression is enabled. We don't enable nopwrite if
2065 * dedup is enabled as the two features are mutually
2066 * exclusive.
03c6040b 2067 */
3c67d83a
TH
2068 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
2069 ZCHECKSUM_FLAG_NOPWRITE) &&
03c6040b 2070 compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
428870ff
BB
2071 }
2072
b5256303
TC
2073 /*
2074 * All objects in an encrypted objset are protected from modification
2075 * via a MAC. Encrypted objects store their IV and salt in the last DVA
2076 * in the bp, so we cannot use all copies. Encrypted objects are also
2077 * not subject to nopwrite since writing the same data will still
2078 * result in a new ciphertext. Only encrypted blocks can be dedup'd
2079 * to avoid ambiguity in the dedup code since the DDT does not store
2080 * object types.
2081 */
2082 if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
2083 encrypt = B_TRUE;
2084
2085 if (DMU_OT_IS_ENCRYPTED(type)) {
2086 copies = MIN(copies, SPA_DVAS_PER_BP - 1);
2087 nopwrite = B_FALSE;
2088 } else {
2089 dedup = B_FALSE;
2090 }
2091
ae76f45c
TC
2092 if (level <= 0 &&
2093 (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
b5256303 2094 compress = ZIO_COMPRESS_EMPTY;
ae76f45c 2095 }
b5256303 2096 }
2aa34383 2097
b5256303 2098 zp->zp_compress = compress;
10b3c7f5 2099 zp->zp_complevel = complevel;
b5256303 2100 zp->zp_checksum = checksum;
428870ff
BB
2101 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2102 zp->zp_level = level;
faf0f58c 2103 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
428870ff
BB
2104 zp->zp_dedup = dedup;
2105 zp->zp_dedup_verify = dedup && dedup_verify;
03c6040b 2106 zp->zp_nopwrite = nopwrite;
b5256303
TC
2107 zp->zp_encrypt = encrypt;
2108 zp->zp_byteorder = ZFS_HOST_BYTEORDER;
861166b0
AZ
2109 memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN);
2110 memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN);
2111 memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN);
cc99f275
DB
2112 zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
2113 os->os_zpl_special_smallblock : 0;
b5256303
TC
2114
2115 ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
428870ff
BB
2116}
2117
66aca247 2118/*
64bfa6ba
BB
2119 * Reports the location of data and holes in an object. In order to
2120 * accurately report holes all dirty data must be synced to disk. This
2121 * causes extremely poor performance when seeking for holes in a dirty file.
2122 * As a compromise, only provide hole data when the dnode is clean. When
2123 * a dnode is dirty report the dnode as having no holes by returning EBUSY
2124 * which is always safe to do.
66aca247 2125 */
34dc7c2f
BB
2126int
2127dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2128{
2129 dnode_t *dn;
64bfa6ba 2130 int restarted = 0, err;
34dc7c2f 2131
de198f2d 2132restart:
428870ff 2133 err = dnode_hold(os, object, FTAG, &dn);
34dc7c2f
BB
2134 if (err)
2135 return (err);
66aca247 2136
de198f2d 2137 rw_enter(&dn->dn_struct_rwlock, RW_READER);
66aca247 2138
de198f2d
BB
2139 if (dnode_is_dirty(dn)) {
2140 /*
2141 * If the zfs_dmu_offset_next_sync module option is enabled
64bfa6ba
BB
2142 * then hole reporting has been requested. Dirty dnodes
2143 * must be synced to disk to accurately report holes.
de198f2d 2144 *
64bfa6ba
BB
2145 * Provided a RL_READER rangelock spanning 0-UINT64_MAX is
2146 * held by the caller only a single restart will be required.
2147 * We tolerate callers which do not hold the rangelock by
2148 * returning EBUSY and not reporting holes after one restart.
de198f2d
BB
2149 */
2150 if (zfs_dmu_offset_next_sync) {
2151 rw_exit(&dn->dn_struct_rwlock);
2152 dnode_rele(dn, FTAG);
64bfa6ba
BB
2153
2154 if (restarted)
2155 return (SET_ERROR(EBUSY));
2156
de198f2d 2157 txg_wait_synced(dmu_objset_pool(os), 0);
64bfa6ba 2158 restarted = 1;
de198f2d
BB
2159 goto restart;
2160 }
34dc7c2f 2161
66aca247 2162 err = SET_ERROR(EBUSY);
de198f2d
BB
2163 } else {
2164 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK |
2165 (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
2166 }
66aca247 2167
de198f2d 2168 rw_exit(&dn->dn_struct_rwlock);
34dc7c2f
BB
2169 dnode_rele(dn, FTAG);
2170
2171 return (err);
67a1b037
PJD
2172}
2173
2174int
2175dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
d0d91f18 2176 blkptr_t *bps, size_t *nbpsp)
67a1b037
PJD
2177{
2178 dmu_buf_t **dbp, *dbuf;
2179 dmu_buf_impl_t *db;
2180 blkptr_t *bp;
2181 int error, numbufs;
2182
2183 error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
2184 &numbufs, &dbp);
2185 if (error != 0) {
2186 if (error == ESRCH) {
2187 error = SET_ERROR(ENXIO);
2188 }
2189 return (error);
2190 }
2191
2192 ASSERT3U(numbufs, <=, *nbpsp);
2193
2194 for (int i = 0; i < numbufs; i++) {
2195 dbuf = dbp[i];
2196 db = (dmu_buf_impl_t *)dbuf;
ce0e1cc4
PJD
2197
2198 mutex_enter(&db->db_mtx);
67a1b037 2199
67a1b037
PJD
2200 if (!list_is_empty(&db->db_dirty_records)) {
2201 dbuf_dirty_record_t *dr;
2202
2203 dr = list_head(&db->db_dirty_records);
2204 if (dr->dt.dl.dr_brtwrite) {
2205 /*
2206 * This is very special case where we clone a
2207 * block and in the same transaction group we
2208 * read its BP (most likely to clone the clone).
2209 */
2210 bp = &dr->dt.dl.dr_overridden_by;
2211 } else {
2212 /*
2213 * The block was modified in the same
2214 * transaction group.
2215 */
ce0e1cc4 2216 mutex_exit(&db->db_mtx);
67a1b037
PJD
2217 error = SET_ERROR(EAGAIN);
2218 goto out;
2219 }
ce0e1cc4
PJD
2220 } else {
2221 bp = db->db_blkptr;
67a1b037 2222 }
ce0e1cc4
PJD
2223
2224 mutex_exit(&db->db_mtx);
2225
67a1b037
PJD
2226 if (bp == NULL) {
2227 /*
2228 * The block was created in this transaction group,
2229 * so it has no BP yet.
2230 */
2231 error = SET_ERROR(EAGAIN);
2232 goto out;
2233 }
67a1b037
PJD
2234 /*
2235 * Make sure we clone only data blocks.
2236 */
2237 if (BP_IS_METADATA(bp) && !BP_IS_HOLE(bp)) {
2238 error = SET_ERROR(EINVAL);
2239 goto out;
2240 }
2241
2242 bps[i] = *bp;
2243 }
2244
2245 *nbpsp = numbufs;
2246out:
2247 dmu_buf_rele_array(dbp, numbufs, FTAG);
2248
2249 return (error);
2250}
2251
bd8c6bd6 2252int
67a1b037
PJD
2253dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
2254 dmu_tx_t *tx, const blkptr_t *bps, size_t nbps, boolean_t replay)
2255{
2256 spa_t *spa;
2257 dmu_buf_t **dbp, *dbuf;
2258 dmu_buf_impl_t *db;
2259 struct dirty_leaf *dl;
2260 dbuf_dirty_record_t *dr;
2261 const blkptr_t *bp;
bd8c6bd6 2262 int error = 0, i, numbufs;
67a1b037
PJD
2263
2264 spa = os->os_spa;
2265
2266 VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
2267 &numbufs, &dbp));
2268 ASSERT3U(nbps, ==, numbufs);
2269
bd8c6bd6
PJD
2270 /*
2271 * Before we start cloning make sure that the dbufs sizes much new BPs
2272 * sizes. If they don't, that's a no-go, as we are not able to shrink
2273 * dbufs.
2274 */
2275 for (i = 0; i < numbufs; i++) {
2276 dbuf = dbp[i];
2277 db = (dmu_buf_impl_t *)dbuf;
2278 bp = &bps[i];
2279
2280 ASSERT0(db->db_level);
2281 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2282
2283 if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) {
2284 error = SET_ERROR(EXDEV);
2285 goto out;
2286 }
2287 }
2288
2289 for (i = 0; i < numbufs; i++) {
67a1b037
PJD
2290 dbuf = dbp[i];
2291 db = (dmu_buf_impl_t *)dbuf;
2292 bp = &bps[i];
2293
2294 ASSERT0(db->db_level);
2295 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2296 ASSERT(BP_IS_HOLE(bp) || dbuf->db_size == BP_GET_LSIZE(bp));
2297
555ef90c 2298 dmu_buf_will_clone(dbuf, tx);
ce0e1cc4
PJD
2299
2300 mutex_enter(&db->db_mtx);
2301
67a1b037 2302 dr = list_head(&db->db_dirty_records);
ce0e1cc4 2303 VERIFY(dr != NULL);
67a1b037
PJD
2304 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2305 dl = &dr->dt.dl;
2306 dl->dr_overridden_by = *bp;
2307 dl->dr_brtwrite = B_TRUE;
67a1b037
PJD
2308 dl->dr_override_state = DR_OVERRIDDEN;
2309 if (BP_IS_HOLE(bp)) {
2310 dl->dr_overridden_by.blk_birth = 0;
2311 dl->dr_overridden_by.blk_phys_birth = 0;
2312 } else {
2313 dl->dr_overridden_by.blk_birth = dr->dr_txg;
c71fe716
PJD
2314 if (!BP_IS_EMBEDDED(bp)) {
2315 dl->dr_overridden_by.blk_phys_birth =
2316 BP_PHYSICAL_BIRTH(bp);
2317 }
67a1b037
PJD
2318 }
2319
ce0e1cc4
PJD
2320 mutex_exit(&db->db_mtx);
2321
67a1b037
PJD
2322 /*
2323 * When data in embedded into BP there is no need to create
2324 * BRT entry as there is no data block. Just copy the BP as
2325 * it contains the data.
2326 * Also, when replaying ZIL we don't want to bump references
2327 * in the BRT as it was already done during ZIL claim.
2328 */
2329 if (!replay && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
2330 brt_pending_add(spa, bp, tx);
2331 }
2332 }
bd8c6bd6 2333out:
67a1b037 2334 dmu_buf_rele_array(dbp, numbufs, FTAG);
bd8c6bd6
PJD
2335
2336 return (error);
34dc7c2f
BB
2337}
2338
2339void
e0b0ca98 2340__dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
34dc7c2f 2341{
e0b0ca98 2342 dnode_phys_t *dnp = dn->dn_phys;
428870ff 2343
34dc7c2f
BB
2344 doi->doi_data_block_size = dn->dn_datablksz;
2345 doi->doi_metadata_block_size = dn->dn_indblkshift ?
2346 1ULL << dn->dn_indblkshift : 0;
428870ff
BB
2347 doi->doi_type = dn->dn_type;
2348 doi->doi_bonus_type = dn->dn_bonustype;
2349 doi->doi_bonus_size = dn->dn_bonuslen;
50c957f7 2350 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
34dc7c2f
BB
2351 doi->doi_indirection = dn->dn_nlevels;
2352 doi->doi_checksum = dn->dn_checksum;
2353 doi->doi_compress = dn->dn_compress;
6c59307a 2354 doi->doi_nblkptr = dn->dn_nblkptr;
428870ff 2355 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
d1fada1e 2356 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
428870ff 2357 doi->doi_fill_count = 0;
1c27024e 2358 for (int i = 0; i < dnp->dn_nblkptr; i++)
9b67f605 2359 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
e0b0ca98
BB
2360}
2361
2362void
2363dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2364{
2365 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2366 mutex_enter(&dn->dn_mtx);
2367
2368 __dmu_object_info_from_dnode(dn, doi);
34dc7c2f
BB
2369
2370 mutex_exit(&dn->dn_mtx);
2371 rw_exit(&dn->dn_struct_rwlock);
2372}
2373
2374/*
2375 * Get information on a DMU object.
2376 * If doi is NULL, just indicates whether the object exists.
2377 */
2378int
2379dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
2380{
2381 dnode_t *dn;
428870ff 2382 int err = dnode_hold(os, object, FTAG, &dn);
34dc7c2f
BB
2383
2384 if (err)
2385 return (err);
2386
2387 if (doi != NULL)
2388 dmu_object_info_from_dnode(dn, doi);
2389
2390 dnode_rele(dn, FTAG);
2391 return (0);
2392}
2393
2394/*
2395 * As above, but faster; can be used when you have a held dbuf in hand.
2396 */
2397void
572e2857 2398dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
34dc7c2f 2399{
572e2857
BB
2400 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2401
2402 DB_DNODE_ENTER(db);
2403 dmu_object_info_from_dnode(DB_DNODE(db), doi);
2404 DB_DNODE_EXIT(db);
34dc7c2f
BB
2405}
2406
2407/*
2408 * Faster still when you only care about the size.
34dc7c2f
BB
2409 */
2410void
572e2857
BB
2411dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
2412 u_longlong_t *nblk512)
34dc7c2f 2413{
572e2857
BB
2414 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2415 dnode_t *dn;
2416
2417 DB_DNODE_ENTER(db);
2418 dn = DB_DNODE(db);
34dc7c2f
BB
2419
2420 *blksize = dn->dn_datablksz;
50c957f7 2421 /* add in number of slots used for the dnode itself */
34dc7c2f 2422 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
50c957f7
NB
2423 SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
2424 DB_DNODE_EXIT(db);
2425}
2426
2427void
2428dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
2429{
2430 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2431 dnode_t *dn;
2432
2433 DB_DNODE_ENTER(db);
2434 dn = DB_DNODE(db);
2435 *dnsize = dn->dn_num_slots << DNODE_SHIFT;
572e2857 2436 DB_DNODE_EXIT(db);
34dc7c2f
BB
2437}
2438
2439void
2440byteswap_uint64_array(void *vbuf, size_t size)
2441{
2442 uint64_t *buf = vbuf;
2443 size_t count = size >> 3;
2444 int i;
2445
2446 ASSERT((size & 7) == 0);
2447
2448 for (i = 0; i < count; i++)
2449 buf[i] = BSWAP_64(buf[i]);
2450}
2451
2452void
2453byteswap_uint32_array(void *vbuf, size_t size)
2454{
2455 uint32_t *buf = vbuf;
2456 size_t count = size >> 2;
2457 int i;
2458
2459 ASSERT((size & 3) == 0);
2460
2461 for (i = 0; i < count; i++)
2462 buf[i] = BSWAP_32(buf[i]);
2463}
2464
2465void
2466byteswap_uint16_array(void *vbuf, size_t size)
2467{
2468 uint16_t *buf = vbuf;
2469 size_t count = size >> 1;
2470 int i;
2471
2472 ASSERT((size & 1) == 0);
2473
2474 for (i = 0; i < count; i++)
2475 buf[i] = BSWAP_16(buf[i]);
2476}
2477
34dc7c2f
BB
2478void
2479byteswap_uint8_array(void *vbuf, size_t size)
2480{
14e4e3cb 2481 (void) vbuf, (void) size;
34dc7c2f
BB
2482}
2483
2484void
2485dmu_init(void)
2486{
a6255b7f 2487 abd_init();
428870ff 2488 zfs_dbgmsg_init();
572e2857 2489 sa_cache_init();
572e2857 2490 dmu_objset_init();
34dc7c2f 2491 dnode_init();
428870ff 2492 zfetch_init();
570827e1 2493 dmu_tx_init();
34dc7c2f 2494 l2arc_init();
29809a6c 2495 arc_init();
d3c2ae1c 2496 dbuf_init();
34dc7c2f
BB
2497}
2498
2499void
2500dmu_fini(void)
2501{
e49f1e20 2502 arc_fini(); /* arc depends on l2arc, so arc must go first */
29809a6c 2503 l2arc_fini();
570827e1 2504 dmu_tx_fini();
428870ff 2505 zfetch_fini();
34dc7c2f 2506 dbuf_fini();
572e2857
BB
2507 dnode_fini();
2508 dmu_objset_fini();
428870ff
BB
2509 sa_cache_fini();
2510 zfs_dbgmsg_fini();
a6255b7f 2511 abd_fini();
34dc7c2f 2512}
c28b2279 2513
c28b2279 2514EXPORT_SYMBOL(dmu_bonus_hold);
6955b401 2515EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
a473d90c
AZ
2516EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
2517EXPORT_SYMBOL(dmu_buf_rele_array);
57b650b8 2518EXPORT_SYMBOL(dmu_prefetch);
c28b2279 2519EXPORT_SYMBOL(dmu_free_range);
57b650b8 2520EXPORT_SYMBOL(dmu_free_long_range);
b663a23d 2521EXPORT_SYMBOL(dmu_free_long_object);
c28b2279 2522EXPORT_SYMBOL(dmu_read);
0eef1bde 2523EXPORT_SYMBOL(dmu_read_by_dnode);
c28b2279 2524EXPORT_SYMBOL(dmu_write);
0eef1bde 2525EXPORT_SYMBOL(dmu_write_by_dnode);
57b650b8 2526EXPORT_SYMBOL(dmu_prealloc);
c28b2279
BB
2527EXPORT_SYMBOL(dmu_object_info);
2528EXPORT_SYMBOL(dmu_object_info_from_dnode);
2529EXPORT_SYMBOL(dmu_object_info_from_db);
2530EXPORT_SYMBOL(dmu_object_size_from_db);
50c957f7 2531EXPORT_SYMBOL(dmu_object_dnsize_from_db);
b5256303 2532EXPORT_SYMBOL(dmu_object_set_nlevels);
c28b2279 2533EXPORT_SYMBOL(dmu_object_set_blocksize);
ae76f45c 2534EXPORT_SYMBOL(dmu_object_set_maxblkid);
c28b2279
BB
2535EXPORT_SYMBOL(dmu_object_set_checksum);
2536EXPORT_SYMBOL(dmu_object_set_compress);
94dac3e8 2537EXPORT_SYMBOL(dmu_offset_next);
57b650b8
BB
2538EXPORT_SYMBOL(dmu_write_policy);
2539EXPORT_SYMBOL(dmu_sync);
b10c77f7
BB
2540EXPORT_SYMBOL(dmu_request_arcbuf);
2541EXPORT_SYMBOL(dmu_return_arcbuf);
440a3eb9
TC
2542EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
2543EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
b10c77f7 2544EXPORT_SYMBOL(dmu_buf_hold);
c28b2279 2545EXPORT_SYMBOL(dmu_ot);
afec56b4 2546
03fdcb9a
MM
2547ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW,
2548 "Enable NOP writes");
03c6040b 2549
ab8d9c17 2550ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW,
03fdcb9a 2551 "Percentage of dirtied blocks from frees in one TXG");
66aca247 2552
03fdcb9a 2553ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
66aca247
DB
2554 "Enable forcing txg sync to find holes");
2555
7ada752a 2556/* CSTYLED */
fdc2d303 2557ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW,
d9b4bf06 2558 "Limit one prefetch call to this size");