4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
35 #include <sys/dmu_impl.h>
37 #include <sys/sa_impl.h>
38 #include <sys/callb.h>
40 int zfs_pd_blks_max
= 100;
42 typedef struct prefetch_data
{
52 typedef struct traverse_data
{
57 zbookmark_t
*td_resume
;
59 prefetch_data_t
*td_pfd
;
64 #define TD_HARD(td) (td->td_flags & TRAVERSE_HARD)
66 static int traverse_dnode(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
67 uint64_t objset
, uint64_t object
);
68 static void prefetch_dnode_metadata(traverse_data_t
*td
, const dnode_phys_t
*,
69 uint64_t objset
, uint64_t object
);
72 traverse_zil_block(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
74 traverse_data_t
*td
= arg
;
77 if (bp
->blk_birth
== 0)
80 if (claim_txg
== 0 && bp
->blk_birth
>= spa_first_txg(td
->td_spa
))
83 SET_BOOKMARK(&zb
, td
->td_objset
, ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
,
84 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
86 (void) td
->td_func(td
->td_spa
, zilog
, bp
, &zb
, NULL
, td
->td_arg
);
92 traverse_zil_record(zilog_t
*zilog
, lr_t
*lrc
, void *arg
, uint64_t claim_txg
)
94 traverse_data_t
*td
= arg
;
96 if (lrc
->lrc_txtype
== TX_WRITE
) {
97 lr_write_t
*lr
= (lr_write_t
*)lrc
;
98 blkptr_t
*bp
= &lr
->lr_blkptr
;
101 if (bp
->blk_birth
== 0)
104 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
107 SET_BOOKMARK(&zb
, td
->td_objset
, lr
->lr_foid
,
108 ZB_ZIL_LEVEL
, lr
->lr_offset
/ BP_GET_LSIZE(bp
));
110 (void) td
->td_func(td
->td_spa
, zilog
, bp
, &zb
, NULL
,
117 traverse_zil(traverse_data_t
*td
, zil_header_t
*zh
)
119 uint64_t claim_txg
= zh
->zh_claim_txg
;
123 * We only want to visit blocks that have been claimed but not yet
124 * replayed; plus, in read-only mode, blocks that are already stable.
126 if (claim_txg
== 0 && spa_writeable(td
->td_spa
))
129 zilog
= zil_alloc(spa_get_dsl(td
->td_spa
)->dp_meta_objset
, zh
);
131 (void) zil_parse(zilog
, traverse_zil_block
, traverse_zil_record
, td
,
137 typedef enum resume_skip
{
144 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
145 * the block indicated by zb does not need to be visited at all. Returns
146 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
147 * resume point. This indicates that this block should be visited but not its
148 * children (since they must have been visited in a previous traversal).
149 * Otherwise returns RESUME_SKIP_NONE.
152 resume_skip_check(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
153 const zbookmark_t
*zb
)
155 if (td
->td_resume
!= NULL
&& !ZB_IS_ZERO(td
->td_resume
)) {
157 * If we already visited this bp & everything below,
158 * don't bother doing it again.
160 if (zbookmark_is_before(dnp
, zb
, td
->td_resume
))
161 return (RESUME_SKIP_ALL
);
164 * If we found the block we're trying to resume from, zero
165 * the bookmark out to indicate that we have resumed.
167 ASSERT3U(zb
->zb_object
, <=, td
->td_resume
->zb_object
);
168 if (bcmp(zb
, td
->td_resume
, sizeof (*zb
)) == 0) {
169 bzero(td
->td_resume
, sizeof (*zb
));
170 if (td
->td_flags
& TRAVERSE_POST
)
171 return (RESUME_SKIP_CHILDREN
);
174 return (RESUME_SKIP_NONE
);
178 traverse_pause(traverse_data_t
*td
, const zbookmark_t
*zb
)
180 ASSERT(td
->td_resume
!= NULL
);
181 ASSERT0(zb
->zb_level
);
182 bcopy(zb
, td
->td_resume
, sizeof (*td
->td_resume
));
186 traverse_prefetch_metadata(traverse_data_t
*td
,
187 const blkptr_t
*bp
, const zbookmark_t
*zb
)
189 uint32_t flags
= ARC_NOWAIT
| ARC_PREFETCH
;
191 if (!(td
->td_flags
& TRAVERSE_PREFETCH_METADATA
))
194 * If we are in the process of resuming, don't prefetch, because
195 * some children will not be needed (and in fact may have already
198 if (td
->td_resume
!= NULL
&& !ZB_IS_ZERO(td
->td_resume
))
200 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= td
->td_min_txg
)
202 if (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_DNODE
)
205 (void) arc_read(NULL
, td
->td_spa
, bp
, NULL
, NULL
,
206 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
210 traverse_visitbp(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
211 const blkptr_t
*bp
, const zbookmark_t
*zb
)
213 int err
= 0, lasterr
= 0;
214 arc_buf_t
*buf
= NULL
;
215 boolean_t pause
= B_FALSE
;
217 switch (resume_skip_check(td
, dnp
, zb
)) {
218 case RESUME_SKIP_ALL
:
220 case RESUME_SKIP_CHILDREN
:
222 case RESUME_SKIP_NONE
:
228 if (BP_IS_HOLE(bp
)) {
229 err
= td
->td_func(td
->td_spa
, NULL
, NULL
, zb
, dnp
, td
->td_arg
);
233 if (bp
->blk_birth
<= td
->td_min_txg
)
236 if (td
->td_pfd
&& !td
->td_pfd
->pd_exited
&&
237 ((td
->td_pfd
->pd_flags
& TRAVERSE_PREFETCH_DATA
) ||
238 BP_GET_TYPE(bp
) == DMU_OT_DNODE
|| BP_GET_LEVEL(bp
) > 0)) {
239 mutex_enter(&td
->td_pfd
->pd_mtx
);
240 ASSERT(td
->td_pfd
->pd_blks_fetched
>= 0);
241 while (td
->td_pfd
->pd_blks_fetched
== 0 &&
242 !td
->td_pfd
->pd_exited
)
243 cv_wait(&td
->td_pfd
->pd_cv
, &td
->td_pfd
->pd_mtx
);
244 td
->td_pfd
->pd_blks_fetched
--;
245 cv_broadcast(&td
->td_pfd
->pd_cv
);
246 mutex_exit(&td
->td_pfd
->pd_mtx
);
249 if (td
->td_flags
& TRAVERSE_PRE
) {
250 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
,
252 if (err
== TRAVERSE_VISIT_NO_CHILDREN
)
255 pause
= B_TRUE
; /* handle pausing at a common point */
260 if (BP_GET_LEVEL(bp
) > 0) {
261 uint32_t flags
= ARC_WAIT
;
263 int32_t epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
266 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
267 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
271 czb
= kmem_alloc(sizeof (zbookmark_t
), KM_PUSHPAGE
);
273 for (i
= 0; i
< epb
; i
++) {
274 SET_BOOKMARK(czb
, zb
->zb_objset
, zb
->zb_object
,
276 zb
->zb_blkid
* epb
+ i
);
277 traverse_prefetch_metadata(td
,
278 &((blkptr_t
*)buf
->b_data
)[i
], czb
);
281 /* recursively visitbp() blocks below this */
282 for (i
= 0; i
< epb
; i
++) {
283 SET_BOOKMARK(czb
, zb
->zb_objset
, zb
->zb_object
,
285 zb
->zb_blkid
* epb
+ i
);
286 err
= traverse_visitbp(td
, dnp
,
287 &((blkptr_t
*)buf
->b_data
)[i
], czb
);
295 kmem_free(czb
, sizeof (zbookmark_t
));
297 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
298 uint32_t flags
= ARC_WAIT
;
300 int32_t epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
302 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
303 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
308 for (i
= 0; i
< epb
; i
++) {
309 prefetch_dnode_metadata(td
, &dnp
[i
], zb
->zb_objset
,
310 zb
->zb_blkid
* epb
+ i
);
313 /* recursively visitbp() blocks below this */
314 for (i
= 0; i
< epb
; i
++) {
315 err
= traverse_dnode(td
, &dnp
[i
], zb
->zb_objset
,
316 zb
->zb_blkid
* epb
+ i
);
323 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
324 uint32_t flags
= ARC_WAIT
;
328 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
329 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
334 dnp
= &osp
->os_meta_dnode
;
335 prefetch_dnode_metadata(td
, dnp
, zb
->zb_objset
,
336 DMU_META_DNODE_OBJECT
);
337 if (arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
338 prefetch_dnode_metadata(td
, &osp
->os_userused_dnode
,
339 zb
->zb_objset
, DMU_USERUSED_OBJECT
);
340 prefetch_dnode_metadata(td
, &osp
->os_groupused_dnode
,
341 zb
->zb_objset
, DMU_USERUSED_OBJECT
);
344 err
= traverse_dnode(td
, dnp
, zb
->zb_objset
,
345 DMU_META_DNODE_OBJECT
);
346 if (err
&& TD_HARD(td
)) {
350 if (err
== 0 && arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
351 dnp
= &osp
->os_userused_dnode
;
352 err
= traverse_dnode(td
, dnp
, zb
->zb_objset
,
353 DMU_USERUSED_OBJECT
);
355 if (err
&& TD_HARD(td
)) {
359 if (err
== 0 && arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
360 dnp
= &osp
->os_groupused_dnode
;
361 err
= traverse_dnode(td
, dnp
, zb
->zb_objset
,
362 DMU_GROUPUSED_OBJECT
);
367 (void) arc_buf_remove_ref(buf
, &buf
);
370 if (err
== 0 && (td
->td_flags
& TRAVERSE_POST
)) {
371 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
, td
->td_arg
);
376 if (pause
&& td
->td_resume
!= NULL
) {
377 ASSERT3U(err
, ==, ERESTART
);
378 ASSERT(!TD_HARD(td
));
379 traverse_pause(td
, zb
);
382 return (err
!= 0 ? err
: lasterr
);
386 prefetch_dnode_metadata(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
387 uint64_t objset
, uint64_t object
)
392 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
393 SET_BOOKMARK(&czb
, objset
, object
, dnp
->dn_nlevels
- 1, j
);
394 traverse_prefetch_metadata(td
, &dnp
->dn_blkptr
[j
], &czb
);
397 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
398 SET_BOOKMARK(&czb
, objset
, object
, 0, DMU_SPILL_BLKID
);
399 traverse_prefetch_metadata(td
, &dnp
->dn_spill
, &czb
);
404 traverse_dnode(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
405 uint64_t objset
, uint64_t object
)
407 int j
, err
= 0, lasterr
= 0;
410 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
411 SET_BOOKMARK(&czb
, objset
, object
, dnp
->dn_nlevels
- 1, j
);
412 err
= traverse_visitbp(td
, dnp
, &dnp
->dn_blkptr
[j
], &czb
);
420 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
421 SET_BOOKMARK(&czb
, objset
, object
, 0, DMU_SPILL_BLKID
);
422 err
= traverse_visitbp(td
, dnp
, &dnp
->dn_spill
, &czb
);
429 return (err
!= 0 ? err
: lasterr
);
434 traverse_prefetcher(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
435 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
437 prefetch_data_t
*pfd
= arg
;
438 uint32_t aflags
= ARC_NOWAIT
| ARC_PREFETCH
;
440 ASSERT(pfd
->pd_blks_fetched
>= 0);
442 return (SET_ERROR(EINTR
));
444 if (bp
== NULL
|| !((pfd
->pd_flags
& TRAVERSE_PREFETCH_DATA
) ||
445 BP_GET_TYPE(bp
) == DMU_OT_DNODE
|| BP_GET_LEVEL(bp
) > 0) ||
446 BP_GET_TYPE(bp
) == DMU_OT_INTENT_LOG
)
449 mutex_enter(&pfd
->pd_mtx
);
450 while (!pfd
->pd_cancel
&& pfd
->pd_blks_fetched
>= pfd
->pd_blks_max
)
451 cv_wait(&pfd
->pd_cv
, &pfd
->pd_mtx
);
452 pfd
->pd_blks_fetched
++;
453 cv_broadcast(&pfd
->pd_cv
);
454 mutex_exit(&pfd
->pd_mtx
);
456 (void) arc_read(NULL
, spa
, bp
, NULL
, NULL
, ZIO_PRIORITY_ASYNC_READ
,
457 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
, &aflags
, zb
);
463 traverse_prefetch_thread(void *arg
)
465 traverse_data_t
*td_main
= arg
;
466 traverse_data_t td
= *td_main
;
469 td
.td_func
= traverse_prefetcher
;
470 td
.td_arg
= td_main
->td_pfd
;
473 SET_BOOKMARK(&czb
, td
.td_objset
,
474 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
475 (void) traverse_visitbp(&td
, NULL
, td
.td_rootbp
, &czb
);
477 mutex_enter(&td_main
->td_pfd
->pd_mtx
);
478 td_main
->td_pfd
->pd_exited
= B_TRUE
;
479 cv_broadcast(&td_main
->td_pfd
->pd_cv
);
480 mutex_exit(&td_main
->td_pfd
->pd_mtx
);
484 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
485 * in syncing context).
488 traverse_impl(spa_t
*spa
, dsl_dataset_t
*ds
, uint64_t objset
, blkptr_t
*rootbp
,
489 uint64_t txg_start
, zbookmark_t
*resume
, int flags
,
490 blkptr_cb_t func
, void *arg
)
497 ASSERT(ds
== NULL
|| objset
== ds
->ds_object
);
498 ASSERT(!(flags
& TRAVERSE_PRE
) || !(flags
& TRAVERSE_POST
));
501 * The data prefetching mechanism (the prefetch thread) is incompatible
502 * with resuming from a bookmark.
504 ASSERT(resume
== NULL
|| !(flags
& TRAVERSE_PREFETCH_DATA
));
506 td
= kmem_alloc(sizeof (traverse_data_t
), KM_PUSHPAGE
);
507 pd
= kmem_zalloc(sizeof (prefetch_data_t
), KM_PUSHPAGE
);
508 czb
= kmem_alloc(sizeof (zbookmark_t
), KM_PUSHPAGE
);
511 td
->td_objset
= objset
;
512 td
->td_rootbp
= rootbp
;
513 td
->td_min_txg
= txg_start
;
514 td
->td_resume
= resume
;
518 td
->td_flags
= flags
;
520 pd
->pd_blks_max
= zfs_pd_blks_max
;
521 pd
->pd_flags
= flags
;
522 mutex_init(&pd
->pd_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
523 cv_init(&pd
->pd_cv
, NULL
, CV_DEFAULT
, NULL
);
525 SET_BOOKMARK(czb
, td
->td_objset
,
526 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
528 /* See comment on ZIL traversal in dsl_scan_visitds. */
529 if (ds
!= NULL
&& !dsl_dataset_is_snapshot(ds
) && !BP_IS_HOLE(rootbp
)) {
530 uint32_t flags
= ARC_WAIT
;
534 err
= arc_read(NULL
, td
->td_spa
, rootbp
,
535 arc_getbuf_func
, &buf
,
536 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, czb
);
541 traverse_zil(td
, &osp
->os_zil_header
);
542 (void) arc_buf_remove_ref(buf
, &buf
);
545 if (!(flags
& TRAVERSE_PREFETCH_DATA
) ||
546 0 == taskq_dispatch(system_taskq
, traverse_prefetch_thread
,
548 pd
->pd_exited
= B_TRUE
;
550 err
= traverse_visitbp(td
, NULL
, rootbp
, czb
);
552 mutex_enter(&pd
->pd_mtx
);
553 pd
->pd_cancel
= B_TRUE
;
554 cv_broadcast(&pd
->pd_cv
);
555 while (!pd
->pd_exited
)
556 cv_wait(&pd
->pd_cv
, &pd
->pd_mtx
);
557 mutex_exit(&pd
->pd_mtx
);
559 mutex_destroy(&pd
->pd_mtx
);
560 cv_destroy(&pd
->pd_cv
);
562 kmem_free(czb
, sizeof (zbookmark_t
));
563 kmem_free(pd
, sizeof (struct prefetch_data
));
564 kmem_free(td
, sizeof (struct traverse_data
));
570 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
571 * in syncing context).
574 traverse_dataset(dsl_dataset_t
*ds
, uint64_t txg_start
, int flags
,
575 blkptr_cb_t func
, void *arg
)
577 return (traverse_impl(ds
->ds_dir
->dd_pool
->dp_spa
, ds
, ds
->ds_object
,
578 &ds
->ds_phys
->ds_bp
, txg_start
, NULL
, flags
, func
, arg
));
582 traverse_dataset_destroyed(spa_t
*spa
, blkptr_t
*blkptr
,
583 uint64_t txg_start
, zbookmark_t
*resume
, int flags
,
584 blkptr_cb_t func
, void *arg
)
586 return (traverse_impl(spa
, NULL
, ZB_DESTROYED_OBJSET
,
587 blkptr
, txg_start
, resume
, flags
, func
, arg
));
591 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
594 traverse_pool(spa_t
*spa
, uint64_t txg_start
, int flags
,
595 blkptr_cb_t func
, void *arg
)
597 int err
, lasterr
= 0;
599 dsl_pool_t
*dp
= spa_get_dsl(spa
);
600 objset_t
*mos
= dp
->dp_meta_objset
;
601 boolean_t hard
= (flags
& TRAVERSE_HARD
);
604 err
= traverse_impl(spa
, NULL
, 0, spa_get_rootblkptr(spa
),
605 txg_start
, NULL
, flags
, func
, arg
);
609 /* visit each dataset */
610 for (obj
= 1; err
== 0 || (err
!= ESRCH
&& hard
);
611 err
= dmu_object_next(mos
, &obj
, FALSE
, txg_start
)) {
612 dmu_object_info_t doi
;
614 err
= dmu_object_info(mos
, obj
, &doi
);
622 if (doi
.doi_type
== DMU_OT_DSL_DATASET
) {
624 uint64_t txg
= txg_start
;
626 dsl_pool_config_enter(dp
, FTAG
);
627 err
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
);
628 dsl_pool_config_exit(dp
, FTAG
);
635 if (ds
->ds_phys
->ds_prev_snap_txg
> txg
)
636 txg
= ds
->ds_phys
->ds_prev_snap_txg
;
637 err
= traverse_dataset(ds
, txg
, flags
, func
, arg
);
638 dsl_dataset_rele(ds
, FTAG
);
648 return (err
!= 0 ? err
: lasterr
);
651 #if defined(_KERNEL) && defined(HAVE_SPL)
652 EXPORT_SYMBOL(traverse_dataset
);
653 EXPORT_SYMBOL(traverse_pool
);
655 module_param(zfs_pd_blks_max
, int, 0644);
656 MODULE_PARM_DESC(zfs_pd_blks_max
, "Max number of blocks to prefetch");