]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dmu_traverse.c
Change KM_PUSHPAGE -> KM_SLEEP
[mirror_zfs.git] / module / zfs / dmu_traverse.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
33 #include <sys/spa.h>
34 #include <sys/zio.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/sa.h>
37 #include <sys/sa_impl.h>
38 #include <sys/callb.h>
39 #include <sys/zfeature.h>
40
41 int zfs_pd_blks_max = 100;
42
43 typedef struct prefetch_data {
44 kmutex_t pd_mtx;
45 kcondvar_t pd_cv;
46 int pd_blks_max;
47 int pd_blks_fetched;
48 int pd_flags;
49 boolean_t pd_cancel;
50 boolean_t pd_exited;
51 } prefetch_data_t;
52
53 typedef struct traverse_data {
54 spa_t *td_spa;
55 uint64_t td_objset;
56 blkptr_t *td_rootbp;
57 uint64_t td_min_txg;
58 zbookmark_phys_t *td_resume;
59 int td_flags;
60 prefetch_data_t *td_pfd;
61 boolean_t td_paused;
62 blkptr_cb_t *td_func;
63 void *td_arg;
64 } traverse_data_t;
65
66 static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
67 uint64_t objset, uint64_t object);
68 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
69 uint64_t objset, uint64_t object);
70
71 static int
72 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
73 {
74 traverse_data_t *td = arg;
75 zbookmark_phys_t zb;
76
77 if (BP_IS_HOLE(bp))
78 return (0);
79
80 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
81 return (0);
82
83 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
84 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
85
86 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
87
88 return (0);
89 }
90
91 static int
92 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
93 {
94 traverse_data_t *td = arg;
95
96 if (lrc->lrc_txtype == TX_WRITE) {
97 lr_write_t *lr = (lr_write_t *)lrc;
98 blkptr_t *bp = &lr->lr_blkptr;
99 zbookmark_phys_t zb;
100
101 if (BP_IS_HOLE(bp))
102 return (0);
103
104 if (claim_txg == 0 || bp->blk_birth < claim_txg)
105 return (0);
106
107 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
108 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
109
110 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
111 td->td_arg);
112 }
113 return (0);
114 }
115
116 static void
117 traverse_zil(traverse_data_t *td, zil_header_t *zh)
118 {
119 uint64_t claim_txg = zh->zh_claim_txg;
120 zilog_t *zilog;
121
122 /*
123 * We only want to visit blocks that have been claimed but not yet
124 * replayed; plus, in read-only mode, blocks that are already stable.
125 */
126 if (claim_txg == 0 && spa_writeable(td->td_spa))
127 return;
128
129 zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
130
131 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
132 claim_txg);
133
134 zil_free(zilog);
135 }
136
137 typedef enum resume_skip {
138 RESUME_SKIP_ALL,
139 RESUME_SKIP_NONE,
140 RESUME_SKIP_CHILDREN
141 } resume_skip_t;
142
143 /*
144 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
145 * the block indicated by zb does not need to be visited at all. Returns
146 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
147 * resume point. This indicates that this block should be visited but not its
148 * children (since they must have been visited in a previous traversal).
149 * Otherwise returns RESUME_SKIP_NONE.
150 */
151 static resume_skip_t
152 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
153 const zbookmark_phys_t *zb)
154 {
155 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
156 /*
157 * If we already visited this bp & everything below,
158 * don't bother doing it again.
159 */
160 if (zbookmark_is_before(dnp, zb, td->td_resume))
161 return (RESUME_SKIP_ALL);
162
163 /*
164 * If we found the block we're trying to resume from, zero
165 * the bookmark out to indicate that we have resumed.
166 */
167 if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
168 bzero(td->td_resume, sizeof (*zb));
169 if (td->td_flags & TRAVERSE_POST)
170 return (RESUME_SKIP_CHILDREN);
171 }
172 }
173 return (RESUME_SKIP_NONE);
174 }
175
176 static void
177 traverse_prefetch_metadata(traverse_data_t *td,
178 const blkptr_t *bp, const zbookmark_phys_t *zb)
179 {
180 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH;
181
182 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
183 return;
184 /*
185 * If we are in the process of resuming, don't prefetch, because
186 * some children will not be needed (and in fact may have already
187 * been freed).
188 */
189 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
190 return;
191 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
192 return;
193 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
194 return;
195
196 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
197 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
198 }
199
200 static boolean_t
201 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
202 {
203 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
204 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
205 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
206 return (B_FALSE);
207 return (B_TRUE);
208 }
209
210 static int
211 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
212 const blkptr_t *bp, const zbookmark_phys_t *zb)
213 {
214 int err = 0;
215 arc_buf_t *buf = NULL;
216
217 switch (resume_skip_check(td, dnp, zb)) {
218 case RESUME_SKIP_ALL:
219 return (0);
220 case RESUME_SKIP_CHILDREN:
221 goto post;
222 case RESUME_SKIP_NONE:
223 break;
224 default:
225 ASSERT(0);
226 }
227
228 if (bp->blk_birth == 0) {
229 if (spa_feature_is_active(td->td_spa, SPA_FEATURE_HOLE_BIRTH)) {
230 /*
231 * Since this block has a birth time of 0 it must be a
232 * hole created before the SPA_FEATURE_HOLE_BIRTH
233 * feature was enabled. If SPA_FEATURE_HOLE_BIRTH
234 * was enabled before the min_txg for this traveral we
235 * know the hole must have been created before the
236 * min_txg for this traveral, so we can skip it. If
237 * SPA_FEATURE_HOLE_BIRTH was enabled after the min_txg
238 * for this traveral we cannot tell if the hole was
239 * created before or after the min_txg for this
240 * traversal, so we cannot skip it.
241 */
242 uint64_t hole_birth_enabled_txg;
243 VERIFY(spa_feature_enabled_txg(td->td_spa,
244 SPA_FEATURE_HOLE_BIRTH, &hole_birth_enabled_txg));
245 if (hole_birth_enabled_txg < td->td_min_txg)
246 return (0);
247 }
248 } else if (bp->blk_birth <= td->td_min_txg) {
249 return (0);
250 }
251
252 if (td->td_pfd != NULL && !td->td_pfd->pd_exited &&
253 prefetch_needed(td->td_pfd, bp)) {
254 mutex_enter(&td->td_pfd->pd_mtx);
255 ASSERT(td->td_pfd->pd_blks_fetched >= 0);
256 while (td->td_pfd->pd_blks_fetched == 0 &&
257 !td->td_pfd->pd_exited)
258 cv_wait(&td->td_pfd->pd_cv, &td->td_pfd->pd_mtx);
259 td->td_pfd->pd_blks_fetched--;
260 cv_broadcast(&td->td_pfd->pd_cv);
261 mutex_exit(&td->td_pfd->pd_mtx);
262 }
263
264 if (BP_IS_HOLE(bp)) {
265 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
266 if (err != 0)
267 goto post;
268 return (0);
269 }
270
271 if (td->td_flags & TRAVERSE_PRE) {
272 err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
273 td->td_arg);
274 if (err == TRAVERSE_VISIT_NO_CHILDREN)
275 return (0);
276 if (err != 0)
277 goto post;
278 }
279
280 if (BP_GET_LEVEL(bp) > 0) {
281 uint32_t flags = ARC_WAIT;
282 int32_t i;
283 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
284 zbookmark_phys_t *czb;
285
286 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
287 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
288 if (err != 0)
289 goto post;
290
291 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
292
293 for (i = 0; i < epb; i++) {
294 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
295 zb->zb_level - 1,
296 zb->zb_blkid * epb + i);
297 traverse_prefetch_metadata(td,
298 &((blkptr_t *)buf->b_data)[i], czb);
299 }
300
301 /* recursively visitbp() blocks below this */
302 for (i = 0; i < epb; i++) {
303 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
304 zb->zb_level - 1,
305 zb->zb_blkid * epb + i);
306 err = traverse_visitbp(td, dnp,
307 &((blkptr_t *)buf->b_data)[i], czb);
308 if (err != 0)
309 break;
310 }
311
312 kmem_free(czb, sizeof (zbookmark_phys_t));
313
314 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
315 uint32_t flags = ARC_WAIT;
316 int32_t i;
317 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
318
319 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
320 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
321 if (err != 0)
322 goto post;
323 dnp = buf->b_data;
324
325 for (i = 0; i < epb; i++) {
326 prefetch_dnode_metadata(td, &dnp[i], zb->zb_objset,
327 zb->zb_blkid * epb + i);
328 }
329
330 /* recursively visitbp() blocks below this */
331 for (i = 0; i < epb; i++) {
332 err = traverse_dnode(td, &dnp[i], zb->zb_objset,
333 zb->zb_blkid * epb + i);
334 if (err != 0)
335 break;
336 }
337 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
338 uint32_t flags = ARC_WAIT;
339 objset_phys_t *osp;
340 dnode_phys_t *dnp;
341
342 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
343 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
344 if (err != 0)
345 goto post;
346
347 osp = buf->b_data;
348 dnp = &osp->os_meta_dnode;
349 prefetch_dnode_metadata(td, dnp, zb->zb_objset,
350 DMU_META_DNODE_OBJECT);
351 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
352 prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
353 zb->zb_objset, DMU_GROUPUSED_OBJECT);
354 prefetch_dnode_metadata(td, &osp->os_userused_dnode,
355 zb->zb_objset, DMU_USERUSED_OBJECT);
356 }
357
358 err = traverse_dnode(td, dnp, zb->zb_objset,
359 DMU_META_DNODE_OBJECT);
360 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
361 dnp = &osp->os_groupused_dnode;
362 err = traverse_dnode(td, dnp, zb->zb_objset,
363 DMU_GROUPUSED_OBJECT);
364 }
365 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
366 dnp = &osp->os_userused_dnode;
367 err = traverse_dnode(td, dnp, zb->zb_objset,
368 DMU_USERUSED_OBJECT);
369 }
370 }
371
372 if (buf)
373 (void) arc_buf_remove_ref(buf, &buf);
374
375 post:
376 if (err == 0 && (td->td_flags & TRAVERSE_POST))
377 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
378
379 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) {
380 /*
381 * Ignore this disk error as requested by the HARD flag,
382 * and continue traversal.
383 */
384 err = 0;
385 }
386
387 /*
388 * If we are stopping here, set td_resume.
389 */
390 if (td->td_resume != NULL && err != 0 && !td->td_paused) {
391 td->td_resume->zb_objset = zb->zb_objset;
392 td->td_resume->zb_object = zb->zb_object;
393 td->td_resume->zb_level = 0;
394 /*
395 * If we have stopped on an indirect block (e.g. due to
396 * i/o error), we have not visited anything below it.
397 * Set the bookmark to the first level-0 block that we need
398 * to visit. This way, the resuming code does not need to
399 * deal with resuming from indirect blocks.
400 */
401 td->td_resume->zb_blkid = zb->zb_blkid <<
402 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
403 td->td_paused = B_TRUE;
404 }
405
406 return (err);
407 }
408
409 static void
410 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
411 uint64_t objset, uint64_t object)
412 {
413 int j;
414 zbookmark_phys_t czb;
415
416 for (j = 0; j < dnp->dn_nblkptr; j++) {
417 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
418 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
419 }
420
421 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
422 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
423 traverse_prefetch_metadata(td, &dnp->dn_spill, &czb);
424 }
425 }
426
427 static int
428 traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
429 uint64_t objset, uint64_t object)
430 {
431 int j, err = 0;
432 zbookmark_phys_t czb;
433
434 for (j = 0; j < dnp->dn_nblkptr; j++) {
435 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
436 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
437 if (err != 0)
438 break;
439 }
440
441 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
442 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
443 err = traverse_visitbp(td, dnp, &dnp->dn_spill, &czb);
444 }
445 return (err);
446 }
447
448 /* ARGSUSED */
449 static int
450 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
451 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
452 {
453 prefetch_data_t *pfd = arg;
454 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
455
456 ASSERT(pfd->pd_blks_fetched >= 0);
457 if (pfd->pd_cancel)
458 return (SET_ERROR(EINTR));
459
460 if (!prefetch_needed(pfd, bp))
461 return (0);
462
463 mutex_enter(&pfd->pd_mtx);
464 while (!pfd->pd_cancel && pfd->pd_blks_fetched >= pfd->pd_blks_max)
465 cv_wait(&pfd->pd_cv, &pfd->pd_mtx);
466 pfd->pd_blks_fetched++;
467 cv_broadcast(&pfd->pd_cv);
468 mutex_exit(&pfd->pd_mtx);
469
470 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
471 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, zb);
472
473 return (0);
474 }
475
476 static void
477 traverse_prefetch_thread(void *arg)
478 {
479 traverse_data_t *td_main = arg;
480 traverse_data_t td = *td_main;
481 zbookmark_phys_t czb;
482
483 td.td_func = traverse_prefetcher;
484 td.td_arg = td_main->td_pfd;
485 td.td_pfd = NULL;
486
487 SET_BOOKMARK(&czb, td.td_objset,
488 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
489 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
490
491 mutex_enter(&td_main->td_pfd->pd_mtx);
492 td_main->td_pfd->pd_exited = B_TRUE;
493 cv_broadcast(&td_main->td_pfd->pd_cv);
494 mutex_exit(&td_main->td_pfd->pd_mtx);
495 }
496
497 /*
498 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
499 * in syncing context).
500 */
501 static int
502 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
503 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
504 blkptr_cb_t func, void *arg)
505 {
506 traverse_data_t *td;
507 prefetch_data_t *pd;
508 zbookmark_phys_t *czb;
509 int err;
510
511 ASSERT(ds == NULL || objset == ds->ds_object);
512 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
513
514 /*
515 * The data prefetching mechanism (the prefetch thread) is incompatible
516 * with resuming from a bookmark.
517 */
518 ASSERT(resume == NULL || !(flags & TRAVERSE_PREFETCH_DATA));
519
520 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP);
521 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP);
522 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
523
524 td->td_spa = spa;
525 td->td_objset = objset;
526 td->td_rootbp = rootbp;
527 td->td_min_txg = txg_start;
528 td->td_resume = resume;
529 td->td_func = func;
530 td->td_arg = arg;
531 td->td_pfd = pd;
532 td->td_flags = flags;
533 td->td_paused = B_FALSE;
534
535 pd->pd_blks_max = zfs_pd_blks_max;
536 pd->pd_flags = flags;
537 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL);
538 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL);
539
540 SET_BOOKMARK(czb, td->td_objset,
541 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
542
543 /* See comment on ZIL traversal in dsl_scan_visitds. */
544 if (ds != NULL && !dsl_dataset_is_snapshot(ds) && !BP_IS_HOLE(rootbp)) {
545 uint32_t flags = ARC_WAIT;
546 objset_phys_t *osp;
547 arc_buf_t *buf;
548
549 err = arc_read(NULL, td->td_spa, rootbp,
550 arc_getbuf_func, &buf,
551 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, czb);
552 if (err != 0)
553 return (err);
554
555 osp = buf->b_data;
556 traverse_zil(td, &osp->os_zil_header);
557 (void) arc_buf_remove_ref(buf, &buf);
558 }
559
560 if (!(flags & TRAVERSE_PREFETCH_DATA) ||
561 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
562 td, TQ_NOQUEUE))
563 pd->pd_exited = B_TRUE;
564
565 err = traverse_visitbp(td, NULL, rootbp, czb);
566
567 mutex_enter(&pd->pd_mtx);
568 pd->pd_cancel = B_TRUE;
569 cv_broadcast(&pd->pd_cv);
570 while (!pd->pd_exited)
571 cv_wait(&pd->pd_cv, &pd->pd_mtx);
572 mutex_exit(&pd->pd_mtx);
573
574 mutex_destroy(&pd->pd_mtx);
575 cv_destroy(&pd->pd_cv);
576
577 kmem_free(czb, sizeof (zbookmark_phys_t));
578 kmem_free(pd, sizeof (struct prefetch_data));
579 kmem_free(td, sizeof (struct traverse_data));
580
581 return (err);
582 }
583
584 /*
585 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
586 * in syncing context).
587 */
588 int
589 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags,
590 blkptr_cb_t func, void *arg)
591 {
592 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
593 &ds->ds_phys->ds_bp, txg_start, NULL, flags, func, arg));
594 }
595
596 int
597 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
598 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
599 blkptr_cb_t func, void *arg)
600 {
601 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
602 blkptr, txg_start, resume, flags, func, arg));
603 }
604
605 /*
606 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
607 */
608 int
609 traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
610 blkptr_cb_t func, void *arg)
611 {
612 int err;
613 uint64_t obj;
614 dsl_pool_t *dp = spa_get_dsl(spa);
615 objset_t *mos = dp->dp_meta_objset;
616 boolean_t hard = (flags & TRAVERSE_HARD);
617
618 /* visit the MOS */
619 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
620 txg_start, NULL, flags, func, arg);
621 if (err != 0)
622 return (err);
623
624 /* visit each dataset */
625 for (obj = 1; err == 0;
626 err = dmu_object_next(mos, &obj, FALSE, txg_start)) {
627 dmu_object_info_t doi;
628
629 err = dmu_object_info(mos, obj, &doi);
630 if (err != 0) {
631 if (hard)
632 continue;
633 break;
634 }
635
636 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
637 dsl_dataset_t *ds;
638 uint64_t txg = txg_start;
639
640 dsl_pool_config_enter(dp, FTAG);
641 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
642 dsl_pool_config_exit(dp, FTAG);
643 if (err != 0) {
644 if (hard)
645 continue;
646 break;
647 }
648 if (ds->ds_phys->ds_prev_snap_txg > txg)
649 txg = ds->ds_phys->ds_prev_snap_txg;
650 err = traverse_dataset(ds, txg, flags, func, arg);
651 dsl_dataset_rele(ds, FTAG);
652 if (err != 0)
653 break;
654 }
655 }
656 if (err == ESRCH)
657 err = 0;
658 return (err);
659 }
660
661 #if defined(_KERNEL) && defined(HAVE_SPL)
662 EXPORT_SYMBOL(traverse_dataset);
663 EXPORT_SYMBOL(traverse_pool);
664
665 module_param(zfs_pd_blks_max, int, 0644);
666 MODULE_PARM_DESC(zfs_pd_blks_max, "Max number of blocks to prefetch");
667 #endif