4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
35 #include <sys/dmu_impl.h>
37 #include <sys/sa_impl.h>
38 #include <sys/callb.h>
39 #include <sys/zfeature.h>
41 int32_t zfs_pd_bytes_max
= 50 * 1024 * 1024; /* 50MB */
42 int32_t ignore_hole_birth
= 1;
44 typedef struct prefetch_data
{
47 int32_t pd_bytes_fetched
;
53 typedef struct traverse_data
{
58 zbookmark_phys_t
*td_resume
;
60 prefetch_data_t
*td_pfd
;
62 uint64_t td_hole_birth_enabled_txg
;
65 boolean_t td_realloc_possible
;
68 static int traverse_dnode(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
69 uint64_t objset
, uint64_t object
);
70 static void prefetch_dnode_metadata(traverse_data_t
*td
, const dnode_phys_t
*,
71 uint64_t objset
, uint64_t object
);
74 traverse_zil_block(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
76 traverse_data_t
*td
= arg
;
82 if (claim_txg
== 0 && bp
->blk_birth
>= spa_first_txg(td
->td_spa
))
85 SET_BOOKMARK(&zb
, td
->td_objset
, ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
,
86 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
88 (void) td
->td_func(td
->td_spa
, zilog
, bp
, &zb
, NULL
, td
->td_arg
);
94 traverse_zil_record(zilog_t
*zilog
, lr_t
*lrc
, void *arg
, uint64_t claim_txg
)
96 traverse_data_t
*td
= arg
;
98 if (lrc
->lrc_txtype
== TX_WRITE
) {
99 lr_write_t
*lr
= (lr_write_t
*)lrc
;
100 blkptr_t
*bp
= &lr
->lr_blkptr
;
106 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
109 SET_BOOKMARK(&zb
, td
->td_objset
, lr
->lr_foid
,
110 ZB_ZIL_LEVEL
, lr
->lr_offset
/ BP_GET_LSIZE(bp
));
112 (void) td
->td_func(td
->td_spa
, zilog
, bp
, &zb
, NULL
,
119 traverse_zil(traverse_data_t
*td
, zil_header_t
*zh
)
121 uint64_t claim_txg
= zh
->zh_claim_txg
;
125 * We only want to visit blocks that have been claimed but not yet
126 * replayed; plus, in read-only mode, blocks that are already stable.
128 if (claim_txg
== 0 && spa_writeable(td
->td_spa
))
131 zilog
= zil_alloc(spa_get_dsl(td
->td_spa
)->dp_meta_objset
, zh
);
133 (void) zil_parse(zilog
, traverse_zil_block
, traverse_zil_record
, td
,
139 typedef enum resume_skip
{
146 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
147 * the block indicated by zb does not need to be visited at all. Returns
148 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
149 * resume point. This indicates that this block should be visited but not its
150 * children (since they must have been visited in a previous traversal).
151 * Otherwise returns RESUME_SKIP_NONE.
154 resume_skip_check(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
155 const zbookmark_phys_t
*zb
)
157 if (td
->td_resume
!= NULL
&& !ZB_IS_ZERO(td
->td_resume
)) {
159 * If we already visited this bp & everything below,
160 * don't bother doing it again.
162 if (zbookmark_is_before(dnp
, zb
, td
->td_resume
))
163 return (RESUME_SKIP_ALL
);
166 * If we found the block we're trying to resume from, zero
167 * the bookmark out to indicate that we have resumed.
169 if (bcmp(zb
, td
->td_resume
, sizeof (*zb
)) == 0) {
170 bzero(td
->td_resume
, sizeof (*zb
));
171 if (td
->td_flags
& TRAVERSE_POST
)
172 return (RESUME_SKIP_CHILDREN
);
175 return (RESUME_SKIP_NONE
);
179 traverse_prefetch_metadata(traverse_data_t
*td
,
180 const blkptr_t
*bp
, const zbookmark_phys_t
*zb
)
182 arc_flags_t flags
= ARC_FLAG_NOWAIT
| ARC_FLAG_PREFETCH
;
184 if (!(td
->td_flags
& TRAVERSE_PREFETCH_METADATA
))
187 * If we are in the process of resuming, don't prefetch, because
188 * some children will not be needed (and in fact may have already
191 if (td
->td_resume
!= NULL
&& !ZB_IS_ZERO(td
->td_resume
))
193 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= td
->td_min_txg
)
195 if (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_DNODE
)
198 (void) arc_read(NULL
, td
->td_spa
, bp
, NULL
, NULL
,
199 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
203 prefetch_needed(prefetch_data_t
*pfd
, const blkptr_t
*bp
)
205 ASSERT(pfd
->pd_flags
& TRAVERSE_PREFETCH_DATA
);
206 if (BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
) ||
207 BP_GET_TYPE(bp
) == DMU_OT_INTENT_LOG
)
213 traverse_visitbp(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
214 const blkptr_t
*bp
, const zbookmark_phys_t
*zb
)
217 arc_buf_t
*buf
= NULL
;
218 prefetch_data_t
*pd
= td
->td_pfd
;
220 switch (resume_skip_check(td
, dnp
, zb
)) {
221 case RESUME_SKIP_ALL
:
223 case RESUME_SKIP_CHILDREN
:
225 case RESUME_SKIP_NONE
:
231 if (bp
->blk_birth
== 0) {
233 * Since this block has a birth time of 0 it must be one of
234 * two things: a hole created before the
235 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole
236 * which has always been a hole in an object.
238 * If a file is written sparsely, then the unwritten parts of
239 * the file were "always holes" -- that is, they have been
240 * holes since this object was allocated. However, we (and
241 * our callers) can not necessarily tell when an object was
242 * allocated. Therefore, if it's possible that this object
243 * was freed and then its object number reused, we need to
244 * visit all the holes with birth==0.
246 * If it isn't possible that the object number was reused,
247 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote
248 * all the blocks we will visit as part of this traversal,
249 * then this hole must have always existed, so we can skip
250 * it. We visit blocks born after (exclusive) td_min_txg.
252 * Note that the meta-dnode cannot be reallocated.
254 if (!ignore_hole_birth
&& (!td
->td_realloc_possible
||
255 zb
->zb_object
== DMU_META_DNODE_OBJECT
) &&
256 td
->td_hole_birth_enabled_txg
<= td
->td_min_txg
)
258 } else if (bp
->blk_birth
<= td
->td_min_txg
) {
262 if (pd
!= NULL
&& !pd
->pd_exited
&& prefetch_needed(pd
, bp
)) {
263 uint64_t size
= BP_GET_LSIZE(bp
);
264 mutex_enter(&pd
->pd_mtx
);
265 ASSERT(pd
->pd_bytes_fetched
>= 0);
266 while (pd
->pd_bytes_fetched
< size
&& !pd
->pd_exited
)
267 cv_wait_sig(&pd
->pd_cv
, &pd
->pd_mtx
);
268 pd
->pd_bytes_fetched
-= size
;
269 cv_broadcast(&pd
->pd_cv
);
270 mutex_exit(&pd
->pd_mtx
);
273 if (BP_IS_HOLE(bp
)) {
274 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
, td
->td_arg
);
280 if (td
->td_flags
& TRAVERSE_PRE
) {
281 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
,
283 if (err
== TRAVERSE_VISIT_NO_CHILDREN
)
289 if (BP_GET_LEVEL(bp
) > 0) {
290 uint32_t flags
= ARC_FLAG_WAIT
;
292 int32_t epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
293 zbookmark_phys_t
*czb
;
295 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
296 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
300 czb
= kmem_alloc(sizeof (zbookmark_phys_t
), KM_SLEEP
);
302 for (i
= 0; i
< epb
; i
++) {
303 SET_BOOKMARK(czb
, zb
->zb_objset
, zb
->zb_object
,
305 zb
->zb_blkid
* epb
+ i
);
306 traverse_prefetch_metadata(td
,
307 &((blkptr_t
*)buf
->b_data
)[i
], czb
);
310 /* recursively visitbp() blocks below this */
311 for (i
= 0; i
< epb
; i
++) {
312 SET_BOOKMARK(czb
, zb
->zb_objset
, zb
->zb_object
,
314 zb
->zb_blkid
* epb
+ i
);
315 err
= traverse_visitbp(td
, dnp
,
316 &((blkptr_t
*)buf
->b_data
)[i
], czb
);
321 kmem_free(czb
, sizeof (zbookmark_phys_t
));
323 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
324 uint32_t flags
= ARC_FLAG_WAIT
;
326 int32_t epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
329 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
330 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
335 for (i
= 0; i
< epb
; i
++) {
336 prefetch_dnode_metadata(td
, &cdnp
[i
], zb
->zb_objset
,
337 zb
->zb_blkid
* epb
+ i
);
340 /* recursively visitbp() blocks below this */
341 for (i
= 0; i
< epb
; i
++) {
342 err
= traverse_dnode(td
, &cdnp
[i
], zb
->zb_objset
,
343 zb
->zb_blkid
* epb
+ i
);
347 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
348 arc_flags_t flags
= ARC_FLAG_WAIT
;
350 dnode_phys_t
*mdnp
, *gdnp
, *udnp
;
352 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
353 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
358 mdnp
= &osp
->os_meta_dnode
;
359 gdnp
= &osp
->os_groupused_dnode
;
360 udnp
= &osp
->os_userused_dnode
;
362 prefetch_dnode_metadata(td
, mdnp
, zb
->zb_objset
,
363 DMU_META_DNODE_OBJECT
);
365 * See the block comment above for the goal of this variable.
366 * If the maxblkid of the meta-dnode is 0, then we know that
367 * we've never had more than DNODES_PER_BLOCK objects in the
368 * dataset, which means we can't have reused any object ids.
370 if (osp
->os_meta_dnode
.dn_maxblkid
== 0)
371 td
->td_realloc_possible
= B_FALSE
;
373 if (arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
374 prefetch_dnode_metadata(td
, gdnp
, zb
->zb_objset
,
375 DMU_GROUPUSED_OBJECT
);
376 prefetch_dnode_metadata(td
, udnp
, zb
->zb_objset
,
377 DMU_USERUSED_OBJECT
);
380 err
= traverse_dnode(td
, mdnp
, zb
->zb_objset
,
381 DMU_META_DNODE_OBJECT
);
382 if (err
== 0 && arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
383 err
= traverse_dnode(td
, gdnp
, zb
->zb_objset
,
384 DMU_GROUPUSED_OBJECT
);
386 if (err
== 0 && arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
387 err
= traverse_dnode(td
, udnp
, zb
->zb_objset
,
388 DMU_USERUSED_OBJECT
);
393 (void) arc_buf_remove_ref(buf
, &buf
);
396 if (err
== 0 && (td
->td_flags
& TRAVERSE_POST
))
397 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
, td
->td_arg
);
399 if ((td
->td_flags
& TRAVERSE_HARD
) && (err
== EIO
|| err
== ECKSUM
)) {
401 * Ignore this disk error as requested by the HARD flag,
402 * and continue traversal.
408 * If we are stopping here, set td_resume.
410 if (td
->td_resume
!= NULL
&& err
!= 0 && !td
->td_paused
) {
411 td
->td_resume
->zb_objset
= zb
->zb_objset
;
412 td
->td_resume
->zb_object
= zb
->zb_object
;
413 td
->td_resume
->zb_level
= 0;
415 * If we have stopped on an indirect block (e.g. due to
416 * i/o error), we have not visited anything below it.
417 * Set the bookmark to the first level-0 block that we need
418 * to visit. This way, the resuming code does not need to
419 * deal with resuming from indirect blocks.
421 td
->td_resume
->zb_blkid
= zb
->zb_blkid
<<
422 (zb
->zb_level
* (dnp
->dn_indblkshift
- SPA_BLKPTRSHIFT
));
423 td
->td_paused
= B_TRUE
;
430 prefetch_dnode_metadata(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
431 uint64_t objset
, uint64_t object
)
434 zbookmark_phys_t czb
;
436 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
437 SET_BOOKMARK(&czb
, objset
, object
, dnp
->dn_nlevels
- 1, j
);
438 traverse_prefetch_metadata(td
, &dnp
->dn_blkptr
[j
], &czb
);
441 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
442 SET_BOOKMARK(&czb
, objset
, object
, 0, DMU_SPILL_BLKID
);
443 traverse_prefetch_metadata(td
, &dnp
->dn_spill
, &czb
);
448 traverse_dnode(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
449 uint64_t objset
, uint64_t object
)
452 zbookmark_phys_t czb
;
454 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
455 SET_BOOKMARK(&czb
, objset
, object
, dnp
->dn_nlevels
- 1, j
);
456 err
= traverse_visitbp(td
, dnp
, &dnp
->dn_blkptr
[j
], &czb
);
461 if (err
== 0 && dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
462 SET_BOOKMARK(&czb
, objset
, object
, 0, DMU_SPILL_BLKID
);
463 err
= traverse_visitbp(td
, dnp
, &dnp
->dn_spill
, &czb
);
470 traverse_prefetcher(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
471 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
473 prefetch_data_t
*pfd
= arg
;
474 arc_flags_t aflags
= ARC_FLAG_NOWAIT
| ARC_FLAG_PREFETCH
;
476 ASSERT(pfd
->pd_bytes_fetched
>= 0);
478 return (SET_ERROR(EINTR
));
480 if (!prefetch_needed(pfd
, bp
))
483 mutex_enter(&pfd
->pd_mtx
);
484 while (!pfd
->pd_cancel
&& pfd
->pd_bytes_fetched
>= zfs_pd_bytes_max
)
485 cv_wait_sig(&pfd
->pd_cv
, &pfd
->pd_mtx
);
486 pfd
->pd_bytes_fetched
+= BP_GET_LSIZE(bp
);
487 cv_broadcast(&pfd
->pd_cv
);
488 mutex_exit(&pfd
->pd_mtx
);
490 (void) arc_read(NULL
, spa
, bp
, NULL
, NULL
, ZIO_PRIORITY_ASYNC_READ
,
491 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
, &aflags
, zb
);
497 traverse_prefetch_thread(void *arg
)
499 traverse_data_t
*td_main
= arg
;
500 traverse_data_t td
= *td_main
;
501 zbookmark_phys_t czb
;
502 fstrans_cookie_t cookie
= spl_fstrans_mark();
504 td
.td_func
= traverse_prefetcher
;
505 td
.td_arg
= td_main
->td_pfd
;
508 SET_BOOKMARK(&czb
, td
.td_objset
,
509 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
510 (void) traverse_visitbp(&td
, NULL
, td
.td_rootbp
, &czb
);
512 mutex_enter(&td_main
->td_pfd
->pd_mtx
);
513 td_main
->td_pfd
->pd_exited
= B_TRUE
;
514 cv_broadcast(&td_main
->td_pfd
->pd_cv
);
515 mutex_exit(&td_main
->td_pfd
->pd_mtx
);
516 spl_fstrans_unmark(cookie
);
520 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
521 * in syncing context).
524 traverse_impl(spa_t
*spa
, dsl_dataset_t
*ds
, uint64_t objset
, blkptr_t
*rootbp
,
525 uint64_t txg_start
, zbookmark_phys_t
*resume
, int flags
,
526 blkptr_cb_t func
, void *arg
)
530 zbookmark_phys_t
*czb
;
533 ASSERT(ds
== NULL
|| objset
== ds
->ds_object
);
534 ASSERT(!(flags
& TRAVERSE_PRE
) || !(flags
& TRAVERSE_POST
));
537 * The data prefetching mechanism (the prefetch thread) is incompatible
538 * with resuming from a bookmark.
540 ASSERT(resume
== NULL
|| !(flags
& TRAVERSE_PREFETCH_DATA
));
542 td
= kmem_alloc(sizeof (traverse_data_t
), KM_SLEEP
);
543 pd
= kmem_zalloc(sizeof (prefetch_data_t
), KM_SLEEP
);
544 czb
= kmem_alloc(sizeof (zbookmark_phys_t
), KM_SLEEP
);
547 td
->td_objset
= objset
;
548 td
->td_rootbp
= rootbp
;
549 td
->td_min_txg
= txg_start
;
550 td
->td_resume
= resume
;
554 td
->td_flags
= flags
;
555 td
->td_paused
= B_FALSE
;
556 td
->td_realloc_possible
= (txg_start
== 0 ? B_FALSE
: B_TRUE
);
558 if (spa_feature_is_active(spa
, SPA_FEATURE_HOLE_BIRTH
)) {
559 VERIFY(spa_feature_enabled_txg(spa
,
560 SPA_FEATURE_HOLE_BIRTH
, &td
->td_hole_birth_enabled_txg
));
562 td
->td_hole_birth_enabled_txg
= UINT64_MAX
;
565 pd
->pd_flags
= flags
;
566 mutex_init(&pd
->pd_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
567 cv_init(&pd
->pd_cv
, NULL
, CV_DEFAULT
, NULL
);
569 SET_BOOKMARK(czb
, td
->td_objset
,
570 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
572 /* See comment on ZIL traversal in dsl_scan_visitds. */
573 if (ds
!= NULL
&& !ds
->ds_is_snapshot
&& !BP_IS_HOLE(rootbp
)) {
574 uint32_t flags
= ARC_FLAG_WAIT
;
578 err
= arc_read(NULL
, td
->td_spa
, rootbp
,
579 arc_getbuf_func
, &buf
,
580 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, czb
);
585 traverse_zil(td
, &osp
->os_zil_header
);
586 (void) arc_buf_remove_ref(buf
, &buf
);
589 if (!(flags
& TRAVERSE_PREFETCH_DATA
) ||
590 0 == taskq_dispatch(system_taskq
, traverse_prefetch_thread
,
592 pd
->pd_exited
= B_TRUE
;
594 err
= traverse_visitbp(td
, NULL
, rootbp
, czb
);
596 mutex_enter(&pd
->pd_mtx
);
597 pd
->pd_cancel
= B_TRUE
;
598 cv_broadcast(&pd
->pd_cv
);
599 while (!pd
->pd_exited
)
600 cv_wait_sig(&pd
->pd_cv
, &pd
->pd_mtx
);
601 mutex_exit(&pd
->pd_mtx
);
603 mutex_destroy(&pd
->pd_mtx
);
604 cv_destroy(&pd
->pd_cv
);
606 kmem_free(czb
, sizeof (zbookmark_phys_t
));
607 kmem_free(pd
, sizeof (struct prefetch_data
));
608 kmem_free(td
, sizeof (struct traverse_data
));
614 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
615 * in syncing context).
618 traverse_dataset(dsl_dataset_t
*ds
, uint64_t txg_start
, int flags
,
619 blkptr_cb_t func
, void *arg
)
621 return (traverse_impl(ds
->ds_dir
->dd_pool
->dp_spa
, ds
, ds
->ds_object
,
622 &dsl_dataset_phys(ds
)->ds_bp
, txg_start
, NULL
, flags
, func
, arg
));
626 traverse_dataset_destroyed(spa_t
*spa
, blkptr_t
*blkptr
,
627 uint64_t txg_start
, zbookmark_phys_t
*resume
, int flags
,
628 blkptr_cb_t func
, void *arg
)
630 return (traverse_impl(spa
, NULL
, ZB_DESTROYED_OBJSET
,
631 blkptr
, txg_start
, resume
, flags
, func
, arg
));
635 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
638 traverse_pool(spa_t
*spa
, uint64_t txg_start
, int flags
,
639 blkptr_cb_t func
, void *arg
)
643 dsl_pool_t
*dp
= spa_get_dsl(spa
);
644 objset_t
*mos
= dp
->dp_meta_objset
;
645 boolean_t hard
= (flags
& TRAVERSE_HARD
);
648 err
= traverse_impl(spa
, NULL
, 0, spa_get_rootblkptr(spa
),
649 txg_start
, NULL
, flags
, func
, arg
);
653 /* visit each dataset */
654 for (obj
= 1; err
== 0;
655 err
= dmu_object_next(mos
, &obj
, FALSE
, txg_start
)) {
656 dmu_object_info_t doi
;
658 err
= dmu_object_info(mos
, obj
, &doi
);
665 if (doi
.doi_bonus_type
== DMU_OT_DSL_DATASET
) {
667 uint64_t txg
= txg_start
;
669 dsl_pool_config_enter(dp
, FTAG
);
670 err
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
);
671 dsl_pool_config_exit(dp
, FTAG
);
677 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
> txg
)
678 txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
679 err
= traverse_dataset(ds
, txg
, flags
, func
, arg
);
680 dsl_dataset_rele(ds
, FTAG
);
690 #if defined(_KERNEL) && defined(HAVE_SPL)
691 EXPORT_SYMBOL(traverse_dataset
);
692 EXPORT_SYMBOL(traverse_pool
);
694 module_param(zfs_pd_bytes_max
, int, 0644);
695 MODULE_PARM_DESC(zfs_pd_bytes_max
, "Max number of bytes to prefetch");
697 module_param(ignore_hole_birth
, int, 0644);
698 MODULE_PARM_DESC(ignore_hole_birth
, "Ignore hole_birth txg for send");