]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/dmu_traverse.c
Imported Upstream version 0.6.4.2
[mirror_zfs-debian.git] / module / zfs / dmu_traverse.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
33 #include <sys/spa.h>
34 #include <sys/zio.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/sa.h>
37 #include <sys/sa_impl.h>
38 #include <sys/callb.h>
39 #include <sys/zfeature.h>
40
41 int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
42
43 typedef struct prefetch_data {
44 kmutex_t pd_mtx;
45 kcondvar_t pd_cv;
46 int32_t pd_bytes_fetched;
47 int pd_flags;
48 boolean_t pd_cancel;
49 boolean_t pd_exited;
50 } prefetch_data_t;
51
52 typedef struct traverse_data {
53 spa_t *td_spa;
54 uint64_t td_objset;
55 blkptr_t *td_rootbp;
56 uint64_t td_min_txg;
57 zbookmark_phys_t *td_resume;
58 int td_flags;
59 prefetch_data_t *td_pfd;
60 boolean_t td_paused;
61 blkptr_cb_t *td_func;
62 void *td_arg;
63 } traverse_data_t;
64
65 static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
66 uint64_t objset, uint64_t object);
67 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
68 uint64_t objset, uint64_t object);
69
70 static int
71 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
72 {
73 traverse_data_t *td = arg;
74 zbookmark_phys_t zb;
75
76 if (BP_IS_HOLE(bp))
77 return (0);
78
79 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
80 return (0);
81
82 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
83 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
84
85 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
86
87 return (0);
88 }
89
90 static int
91 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
92 {
93 traverse_data_t *td = arg;
94
95 if (lrc->lrc_txtype == TX_WRITE) {
96 lr_write_t *lr = (lr_write_t *)lrc;
97 blkptr_t *bp = &lr->lr_blkptr;
98 zbookmark_phys_t zb;
99
100 if (BP_IS_HOLE(bp))
101 return (0);
102
103 if (claim_txg == 0 || bp->blk_birth < claim_txg)
104 return (0);
105
106 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
107 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
108
109 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
110 td->td_arg);
111 }
112 return (0);
113 }
114
115 static void
116 traverse_zil(traverse_data_t *td, zil_header_t *zh)
117 {
118 uint64_t claim_txg = zh->zh_claim_txg;
119 zilog_t *zilog;
120
121 /*
122 * We only want to visit blocks that have been claimed but not yet
123 * replayed; plus, in read-only mode, blocks that are already stable.
124 */
125 if (claim_txg == 0 && spa_writeable(td->td_spa))
126 return;
127
128 zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
129
130 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
131 claim_txg);
132
133 zil_free(zilog);
134 }
135
136 typedef enum resume_skip {
137 RESUME_SKIP_ALL,
138 RESUME_SKIP_NONE,
139 RESUME_SKIP_CHILDREN
140 } resume_skip_t;
141
142 /*
143 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
144 * the block indicated by zb does not need to be visited at all. Returns
145 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
146 * resume point. This indicates that this block should be visited but not its
147 * children (since they must have been visited in a previous traversal).
148 * Otherwise returns RESUME_SKIP_NONE.
149 */
150 static resume_skip_t
151 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
152 const zbookmark_phys_t *zb)
153 {
154 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
155 /*
156 * If we already visited this bp & everything below,
157 * don't bother doing it again.
158 */
159 if (zbookmark_is_before(dnp, zb, td->td_resume))
160 return (RESUME_SKIP_ALL);
161
162 /*
163 * If we found the block we're trying to resume from, zero
164 * the bookmark out to indicate that we have resumed.
165 */
166 if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
167 bzero(td->td_resume, sizeof (*zb));
168 if (td->td_flags & TRAVERSE_POST)
169 return (RESUME_SKIP_CHILDREN);
170 }
171 }
172 return (RESUME_SKIP_NONE);
173 }
174
175 static void
176 traverse_prefetch_metadata(traverse_data_t *td,
177 const blkptr_t *bp, const zbookmark_phys_t *zb)
178 {
179 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH;
180
181 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
182 return;
183 /*
184 * If we are in the process of resuming, don't prefetch, because
185 * some children will not be needed (and in fact may have already
186 * been freed).
187 */
188 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
189 return;
190 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
191 return;
192 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
193 return;
194
195 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
196 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
197 }
198
199 static boolean_t
200 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
201 {
202 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
203 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
204 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
205 return (B_FALSE);
206 return (B_TRUE);
207 }
208
209 static int
210 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
211 const blkptr_t *bp, const zbookmark_phys_t *zb)
212 {
213 int err = 0;
214 arc_buf_t *buf = NULL;
215 prefetch_data_t *pd = td->td_pfd;
216
217 switch (resume_skip_check(td, dnp, zb)) {
218 case RESUME_SKIP_ALL:
219 return (0);
220 case RESUME_SKIP_CHILDREN:
221 goto post;
222 case RESUME_SKIP_NONE:
223 break;
224 default:
225 ASSERT(0);
226 }
227
228 if (bp->blk_birth == 0) {
229 if (spa_feature_is_active(td->td_spa, SPA_FEATURE_HOLE_BIRTH)) {
230 /*
231 * Since this block has a birth time of 0 it must be a
232 * hole created before the SPA_FEATURE_HOLE_BIRTH
233 * feature was enabled. If SPA_FEATURE_HOLE_BIRTH
234 * was enabled before the min_txg for this traveral we
235 * know the hole must have been created before the
236 * min_txg for this traveral, so we can skip it. If
237 * SPA_FEATURE_HOLE_BIRTH was enabled after the min_txg
238 * for this traveral we cannot tell if the hole was
239 * created before or after the min_txg for this
240 * traversal, so we cannot skip it.
241 */
242 uint64_t hole_birth_enabled_txg;
243 VERIFY(spa_feature_enabled_txg(td->td_spa,
244 SPA_FEATURE_HOLE_BIRTH, &hole_birth_enabled_txg));
245 if (hole_birth_enabled_txg < td->td_min_txg)
246 return (0);
247 }
248 } else if (bp->blk_birth <= td->td_min_txg) {
249 return (0);
250 }
251
252 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) {
253 uint64_t size = BP_GET_LSIZE(bp);
254 mutex_enter(&pd->pd_mtx);
255 ASSERT(pd->pd_bytes_fetched >= 0);
256 while (pd->pd_bytes_fetched < size && !pd->pd_exited)
257 cv_wait_interruptible(&pd->pd_cv, &pd->pd_mtx);
258 pd->pd_bytes_fetched -= size;
259 cv_broadcast(&pd->pd_cv);
260 mutex_exit(&pd->pd_mtx);
261 }
262
263 if (BP_IS_HOLE(bp)) {
264 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
265 if (err != 0)
266 goto post;
267 return (0);
268 }
269
270 if (td->td_flags & TRAVERSE_PRE) {
271 err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
272 td->td_arg);
273 if (err == TRAVERSE_VISIT_NO_CHILDREN)
274 return (0);
275 if (err != 0)
276 goto post;
277 }
278
279 if (BP_GET_LEVEL(bp) > 0) {
280 uint32_t flags = ARC_WAIT;
281 int32_t i;
282 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
283 zbookmark_phys_t *czb;
284
285 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
286 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
287 if (err != 0)
288 goto post;
289
290 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
291
292 for (i = 0; i < epb; i++) {
293 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
294 zb->zb_level - 1,
295 zb->zb_blkid * epb + i);
296 traverse_prefetch_metadata(td,
297 &((blkptr_t *)buf->b_data)[i], czb);
298 }
299
300 /* recursively visitbp() blocks below this */
301 for (i = 0; i < epb; i++) {
302 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
303 zb->zb_level - 1,
304 zb->zb_blkid * epb + i);
305 err = traverse_visitbp(td, dnp,
306 &((blkptr_t *)buf->b_data)[i], czb);
307 if (err != 0)
308 break;
309 }
310
311 kmem_free(czb, sizeof (zbookmark_phys_t));
312
313 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
314 uint32_t flags = ARC_WAIT;
315 int32_t i;
316 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
317
318 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
319 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
320 if (err != 0)
321 goto post;
322 dnp = buf->b_data;
323
324 for (i = 0; i < epb; i++) {
325 prefetch_dnode_metadata(td, &dnp[i], zb->zb_objset,
326 zb->zb_blkid * epb + i);
327 }
328
329 /* recursively visitbp() blocks below this */
330 for (i = 0; i < epb; i++) {
331 err = traverse_dnode(td, &dnp[i], zb->zb_objset,
332 zb->zb_blkid * epb + i);
333 if (err != 0)
334 break;
335 }
336 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
337 uint32_t flags = ARC_WAIT;
338 objset_phys_t *osp;
339 dnode_phys_t *dnp;
340
341 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
342 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
343 if (err != 0)
344 goto post;
345
346 osp = buf->b_data;
347 dnp = &osp->os_meta_dnode;
348 prefetch_dnode_metadata(td, dnp, zb->zb_objset,
349 DMU_META_DNODE_OBJECT);
350 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
351 prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
352 zb->zb_objset, DMU_GROUPUSED_OBJECT);
353 prefetch_dnode_metadata(td, &osp->os_userused_dnode,
354 zb->zb_objset, DMU_USERUSED_OBJECT);
355 }
356
357 err = traverse_dnode(td, dnp, zb->zb_objset,
358 DMU_META_DNODE_OBJECT);
359 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
360 dnp = &osp->os_groupused_dnode;
361 err = traverse_dnode(td, dnp, zb->zb_objset,
362 DMU_GROUPUSED_OBJECT);
363 }
364 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
365 dnp = &osp->os_userused_dnode;
366 err = traverse_dnode(td, dnp, zb->zb_objset,
367 DMU_USERUSED_OBJECT);
368 }
369 }
370
371 if (buf)
372 (void) arc_buf_remove_ref(buf, &buf);
373
374 post:
375 if (err == 0 && (td->td_flags & TRAVERSE_POST))
376 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
377
378 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) {
379 /*
380 * Ignore this disk error as requested by the HARD flag,
381 * and continue traversal.
382 */
383 err = 0;
384 }
385
386 /*
387 * If we are stopping here, set td_resume.
388 */
389 if (td->td_resume != NULL && err != 0 && !td->td_paused) {
390 td->td_resume->zb_objset = zb->zb_objset;
391 td->td_resume->zb_object = zb->zb_object;
392 td->td_resume->zb_level = 0;
393 /*
394 * If we have stopped on an indirect block (e.g. due to
395 * i/o error), we have not visited anything below it.
396 * Set the bookmark to the first level-0 block that we need
397 * to visit. This way, the resuming code does not need to
398 * deal with resuming from indirect blocks.
399 */
400 td->td_resume->zb_blkid = zb->zb_blkid <<
401 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
402 td->td_paused = B_TRUE;
403 }
404
405 return (err);
406 }
407
408 static void
409 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
410 uint64_t objset, uint64_t object)
411 {
412 int j;
413 zbookmark_phys_t czb;
414
415 for (j = 0; j < dnp->dn_nblkptr; j++) {
416 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
417 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
418 }
419
420 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
421 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
422 traverse_prefetch_metadata(td, &dnp->dn_spill, &czb);
423 }
424 }
425
426 static int
427 traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
428 uint64_t objset, uint64_t object)
429 {
430 int j, err = 0;
431 zbookmark_phys_t czb;
432
433 for (j = 0; j < dnp->dn_nblkptr; j++) {
434 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
435 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
436 if (err != 0)
437 break;
438 }
439
440 if (err == 0 && dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
441 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
442 err = traverse_visitbp(td, dnp, &dnp->dn_spill, &czb);
443 }
444 return (err);
445 }
446
447 /* ARGSUSED */
448 static int
449 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
450 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
451 {
452 prefetch_data_t *pfd = arg;
453 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
454
455 ASSERT(pfd->pd_bytes_fetched >= 0);
456 if (pfd->pd_cancel)
457 return (SET_ERROR(EINTR));
458
459 if (!prefetch_needed(pfd, bp))
460 return (0);
461
462 mutex_enter(&pfd->pd_mtx);
463 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max)
464 cv_wait_interruptible(&pfd->pd_cv, &pfd->pd_mtx);
465 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp);
466 cv_broadcast(&pfd->pd_cv);
467 mutex_exit(&pfd->pd_mtx);
468
469 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
470 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, zb);
471
472 return (0);
473 }
474
475 static void
476 traverse_prefetch_thread(void *arg)
477 {
478 traverse_data_t *td_main = arg;
479 traverse_data_t td = *td_main;
480 zbookmark_phys_t czb;
481
482 td.td_func = traverse_prefetcher;
483 td.td_arg = td_main->td_pfd;
484 td.td_pfd = NULL;
485
486 SET_BOOKMARK(&czb, td.td_objset,
487 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
488 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
489
490 mutex_enter(&td_main->td_pfd->pd_mtx);
491 td_main->td_pfd->pd_exited = B_TRUE;
492 cv_broadcast(&td_main->td_pfd->pd_cv);
493 mutex_exit(&td_main->td_pfd->pd_mtx);
494 }
495
496 /*
497 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
498 * in syncing context).
499 */
500 static int
501 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
502 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
503 blkptr_cb_t func, void *arg)
504 {
505 traverse_data_t *td;
506 prefetch_data_t *pd;
507 zbookmark_phys_t *czb;
508 int err;
509
510 ASSERT(ds == NULL || objset == ds->ds_object);
511 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
512
513 /*
514 * The data prefetching mechanism (the prefetch thread) is incompatible
515 * with resuming from a bookmark.
516 */
517 ASSERT(resume == NULL || !(flags & TRAVERSE_PREFETCH_DATA));
518
519 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP);
520 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP);
521 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
522
523 td->td_spa = spa;
524 td->td_objset = objset;
525 td->td_rootbp = rootbp;
526 td->td_min_txg = txg_start;
527 td->td_resume = resume;
528 td->td_func = func;
529 td->td_arg = arg;
530 td->td_pfd = pd;
531 td->td_flags = flags;
532 td->td_paused = B_FALSE;
533
534 pd->pd_flags = flags;
535 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL);
536 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL);
537
538 SET_BOOKMARK(czb, td->td_objset,
539 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
540
541 /* See comment on ZIL traversal in dsl_scan_visitds. */
542 if (ds != NULL && !dsl_dataset_is_snapshot(ds) && !BP_IS_HOLE(rootbp)) {
543 uint32_t flags = ARC_WAIT;
544 objset_phys_t *osp;
545 arc_buf_t *buf;
546
547 err = arc_read(NULL, td->td_spa, rootbp,
548 arc_getbuf_func, &buf,
549 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, czb);
550 if (err != 0)
551 return (err);
552
553 osp = buf->b_data;
554 traverse_zil(td, &osp->os_zil_header);
555 (void) arc_buf_remove_ref(buf, &buf);
556 }
557
558 if (!(flags & TRAVERSE_PREFETCH_DATA) ||
559 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
560 td, TQ_NOQUEUE))
561 pd->pd_exited = B_TRUE;
562
563 err = traverse_visitbp(td, NULL, rootbp, czb);
564
565 mutex_enter(&pd->pd_mtx);
566 pd->pd_cancel = B_TRUE;
567 cv_broadcast(&pd->pd_cv);
568 while (!pd->pd_exited)
569 cv_wait_interruptible(&pd->pd_cv, &pd->pd_mtx);
570 mutex_exit(&pd->pd_mtx);
571
572 mutex_destroy(&pd->pd_mtx);
573 cv_destroy(&pd->pd_cv);
574
575 kmem_free(czb, sizeof (zbookmark_phys_t));
576 kmem_free(pd, sizeof (struct prefetch_data));
577 kmem_free(td, sizeof (struct traverse_data));
578
579 return (err);
580 }
581
582 /*
583 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
584 * in syncing context).
585 */
586 int
587 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags,
588 blkptr_cb_t func, void *arg)
589 {
590 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
591 &ds->ds_phys->ds_bp, txg_start, NULL, flags, func, arg));
592 }
593
594 int
595 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
596 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
597 blkptr_cb_t func, void *arg)
598 {
599 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
600 blkptr, txg_start, resume, flags, func, arg));
601 }
602
603 /*
604 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
605 */
606 int
607 traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
608 blkptr_cb_t func, void *arg)
609 {
610 int err;
611 uint64_t obj;
612 dsl_pool_t *dp = spa_get_dsl(spa);
613 objset_t *mos = dp->dp_meta_objset;
614 boolean_t hard = (flags & TRAVERSE_HARD);
615
616 /* visit the MOS */
617 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
618 txg_start, NULL, flags, func, arg);
619 if (err != 0)
620 return (err);
621
622 /* visit each dataset */
623 for (obj = 1; err == 0;
624 err = dmu_object_next(mos, &obj, FALSE, txg_start)) {
625 dmu_object_info_t doi;
626
627 err = dmu_object_info(mos, obj, &doi);
628 if (err != 0) {
629 if (hard)
630 continue;
631 break;
632 }
633
634 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
635 dsl_dataset_t *ds;
636 uint64_t txg = txg_start;
637
638 dsl_pool_config_enter(dp, FTAG);
639 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
640 dsl_pool_config_exit(dp, FTAG);
641 if (err != 0) {
642 if (hard)
643 continue;
644 break;
645 }
646 if (ds->ds_phys->ds_prev_snap_txg > txg)
647 txg = ds->ds_phys->ds_prev_snap_txg;
648 err = traverse_dataset(ds, txg, flags, func, arg);
649 dsl_dataset_rele(ds, FTAG);
650 if (err != 0)
651 break;
652 }
653 }
654 if (err == ESRCH)
655 err = 0;
656 return (err);
657 }
658
659 #if defined(_KERNEL) && defined(HAVE_SPL)
660 EXPORT_SYMBOL(traverse_dataset);
661 EXPORT_SYMBOL(traverse_pool);
662
663 module_param(zfs_pd_bytes_max, int, 0644);
664 MODULE_PARM_DESC(zfs_pd_bytes_max, "Max number of bytes to prefetch");
665 #endif