]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dmu_traverse.c
OpenZFS 9166 - zfs storage pool checkpoint
[mirror_zfs.git] / module / zfs / dmu_traverse.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
33 #include <sys/spa.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zio.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/callb.h>
40 #include <sys/zfeature.h>
41
42 int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
43 int32_t send_holes_without_birth_time = 1;
44
45 typedef struct prefetch_data {
46 kmutex_t pd_mtx;
47 kcondvar_t pd_cv;
48 int32_t pd_bytes_fetched;
49 int pd_flags;
50 boolean_t pd_cancel;
51 boolean_t pd_exited;
52 zbookmark_phys_t pd_resume;
53 } prefetch_data_t;
54
55 typedef struct traverse_data {
56 spa_t *td_spa;
57 uint64_t td_objset;
58 blkptr_t *td_rootbp;
59 uint64_t td_min_txg;
60 zbookmark_phys_t *td_resume;
61 int td_flags;
62 prefetch_data_t *td_pfd;
63 boolean_t td_paused;
64 uint64_t td_hole_birth_enabled_txg;
65 blkptr_cb_t *td_func;
66 void *td_arg;
67 boolean_t td_realloc_possible;
68 } traverse_data_t;
69
70 static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
71 uint64_t objset, uint64_t object);
72 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
73 uint64_t objset, uint64_t object);
74
75 static int
76 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
77 {
78 traverse_data_t *td = arg;
79 zbookmark_phys_t zb;
80
81 if (BP_IS_HOLE(bp))
82 return (0);
83
84 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(td->td_spa))
85 return (-1);
86
87 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
88 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
89
90 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
91
92 return (0);
93 }
94
95 static int
96 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
97 {
98 traverse_data_t *td = arg;
99
100 if (lrc->lrc_txtype == TX_WRITE) {
101 lr_write_t *lr = (lr_write_t *)lrc;
102 blkptr_t *bp = &lr->lr_blkptr;
103 zbookmark_phys_t zb;
104
105 if (BP_IS_HOLE(bp))
106 return (0);
107
108 if (claim_txg == 0 || bp->blk_birth < claim_txg)
109 return (0);
110
111 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
112 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
113
114 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
115 td->td_arg);
116 }
117 return (0);
118 }
119
120 static void
121 traverse_zil(traverse_data_t *td, zil_header_t *zh)
122 {
123 uint64_t claim_txg = zh->zh_claim_txg;
124
125 /*
126 * We only want to visit blocks that have been claimed but not yet
127 * replayed; plus blocks that are already stable in read-only mode.
128 */
129 if (claim_txg == 0 && spa_writeable(td->td_spa))
130 return;
131
132 zilog_t *zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
133 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
134 claim_txg, !(td->td_flags & TRAVERSE_NO_DECRYPT));
135 zil_free(zilog);
136 }
137
138 typedef enum resume_skip {
139 RESUME_SKIP_ALL,
140 RESUME_SKIP_NONE,
141 RESUME_SKIP_CHILDREN
142 } resume_skip_t;
143
144 /*
145 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
146 * the block indicated by zb does not need to be visited at all. Returns
147 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
148 * resume point. This indicates that this block should be visited but not its
149 * children (since they must have been visited in a previous traversal).
150 * Otherwise returns RESUME_SKIP_NONE.
151 */
152 static resume_skip_t
153 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
154 const zbookmark_phys_t *zb)
155 {
156 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
157 /*
158 * If we already visited this bp & everything below,
159 * don't bother doing it again.
160 */
161 if (zbookmark_subtree_completed(dnp, zb, td->td_resume))
162 return (RESUME_SKIP_ALL);
163
164 /*
165 * If we found the block we're trying to resume from, zero
166 * the bookmark out to indicate that we have resumed.
167 */
168 if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
169 bzero(td->td_resume, sizeof (*zb));
170 if (td->td_flags & TRAVERSE_POST)
171 return (RESUME_SKIP_CHILDREN);
172 }
173 }
174 return (RESUME_SKIP_NONE);
175 }
176
177 static void
178 traverse_prefetch_metadata(traverse_data_t *td,
179 const blkptr_t *bp, const zbookmark_phys_t *zb)
180 {
181 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
182 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
183
184 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
185 return;
186 /*
187 * If we are in the process of resuming, don't prefetch, because
188 * some children will not be needed (and in fact may have already
189 * been freed).
190 */
191 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
192 return;
193 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
194 return;
195 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
196 return;
197
198 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
199 zio_flags |= ZIO_FLAG_RAW;
200
201 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
202 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
203 }
204
205 static boolean_t
206 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
207 {
208 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
209 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
210 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
211 return (B_FALSE);
212 return (B_TRUE);
213 }
214
215 static int
216 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
217 const blkptr_t *bp, const zbookmark_phys_t *zb)
218 {
219 int err = 0;
220 arc_buf_t *buf = NULL;
221 prefetch_data_t *pd = td->td_pfd;
222
223 switch (resume_skip_check(td, dnp, zb)) {
224 case RESUME_SKIP_ALL:
225 return (0);
226 case RESUME_SKIP_CHILDREN:
227 goto post;
228 case RESUME_SKIP_NONE:
229 break;
230 default:
231 ASSERT(0);
232 }
233
234 if (bp->blk_birth == 0) {
235 /*
236 * Since this block has a birth time of 0 it must be one of
237 * two things: a hole created before the
238 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole
239 * which has always been a hole in an object.
240 *
241 * If a file is written sparsely, then the unwritten parts of
242 * the file were "always holes" -- that is, they have been
243 * holes since this object was allocated. However, we (and
244 * our callers) can not necessarily tell when an object was
245 * allocated. Therefore, if it's possible that this object
246 * was freed and then its object number reused, we need to
247 * visit all the holes with birth==0.
248 *
249 * If it isn't possible that the object number was reused,
250 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote
251 * all the blocks we will visit as part of this traversal,
252 * then this hole must have always existed, so we can skip
253 * it. We visit blocks born after (exclusive) td_min_txg.
254 *
255 * Note that the meta-dnode cannot be reallocated.
256 */
257 if (!send_holes_without_birth_time &&
258 (!td->td_realloc_possible ||
259 zb->zb_object == DMU_META_DNODE_OBJECT) &&
260 td->td_hole_birth_enabled_txg <= td->td_min_txg)
261 return (0);
262 } else if (bp->blk_birth <= td->td_min_txg) {
263 return (0);
264 }
265
266 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) {
267 uint64_t size = BP_GET_LSIZE(bp);
268 mutex_enter(&pd->pd_mtx);
269 ASSERT(pd->pd_bytes_fetched >= 0);
270 while (pd->pd_bytes_fetched < size && !pd->pd_exited)
271 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
272 pd->pd_bytes_fetched -= size;
273 cv_broadcast(&pd->pd_cv);
274 mutex_exit(&pd->pd_mtx);
275 }
276
277 if (BP_IS_HOLE(bp)) {
278 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
279 if (err != 0)
280 goto post;
281 return (0);
282 }
283
284 if (td->td_flags & TRAVERSE_PRE) {
285 err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
286 td->td_arg);
287 if (err == TRAVERSE_VISIT_NO_CHILDREN)
288 return (0);
289 if (err != 0)
290 goto post;
291 }
292
293 if (BP_GET_LEVEL(bp) > 0) {
294 uint32_t flags = ARC_FLAG_WAIT;
295 int32_t i;
296 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
297 zbookmark_phys_t *czb;
298
299 ASSERT(!BP_IS_PROTECTED(bp));
300
301 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
302 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
303 if (err != 0)
304 goto post;
305
306 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
307
308 for (i = 0; i < epb; i++) {
309 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
310 zb->zb_level - 1,
311 zb->zb_blkid * epb + i);
312 traverse_prefetch_metadata(td,
313 &((blkptr_t *)buf->b_data)[i], czb);
314 }
315
316 /* recursively visitbp() blocks below this */
317 for (i = 0; i < epb; i++) {
318 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
319 zb->zb_level - 1,
320 zb->zb_blkid * epb + i);
321 err = traverse_visitbp(td, dnp,
322 &((blkptr_t *)buf->b_data)[i], czb);
323 if (err != 0)
324 break;
325 }
326
327 kmem_free(czb, sizeof (zbookmark_phys_t));
328
329 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
330 uint32_t flags = ARC_FLAG_WAIT;
331 uint32_t zio_flags = ZIO_FLAG_CANFAIL;
332 int32_t i;
333 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
334 dnode_phys_t *child_dnp;
335
336 /*
337 * dnode blocks might have their bonus buffers encrypted, so
338 * we must be careful to honor TRAVERSE_NO_DECRYPT
339 */
340 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
341 zio_flags |= ZIO_FLAG_RAW;
342
343 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
344 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
345 if (err != 0)
346 goto post;
347
348 child_dnp = buf->b_data;
349
350 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
351 prefetch_dnode_metadata(td, &child_dnp[i],
352 zb->zb_objset, zb->zb_blkid * epb + i);
353 }
354
355 /* recursively visitbp() blocks below this */
356 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
357 err = traverse_dnode(td, &child_dnp[i],
358 zb->zb_objset, zb->zb_blkid * epb + i);
359 if (err != 0)
360 break;
361 }
362 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
363 uint32_t zio_flags = ZIO_FLAG_CANFAIL;
364 arc_flags_t flags = ARC_FLAG_WAIT;
365 objset_phys_t *osp;
366
367 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
368 zio_flags |= ZIO_FLAG_RAW;
369
370 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
371 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
372 if (err != 0)
373 goto post;
374
375 osp = buf->b_data;
376 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset,
377 DMU_META_DNODE_OBJECT);
378 /*
379 * See the block comment above for the goal of this variable.
380 * If the maxblkid of the meta-dnode is 0, then we know that
381 * we've never had more than DNODES_PER_BLOCK objects in the
382 * dataset, which means we can't have reused any object ids.
383 */
384 if (osp->os_meta_dnode.dn_maxblkid == 0)
385 td->td_realloc_possible = B_FALSE;
386
387 if (OBJSET_BUF_HAS_USERUSED(buf)) {
388 if (OBJSET_BUF_HAS_PROJECTUSED(buf))
389 prefetch_dnode_metadata(td,
390 &osp->os_projectused_dnode,
391 zb->zb_objset, DMU_PROJECTUSED_OBJECT);
392 prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
393 zb->zb_objset, DMU_GROUPUSED_OBJECT);
394 prefetch_dnode_metadata(td, &osp->os_userused_dnode,
395 zb->zb_objset, DMU_USERUSED_OBJECT);
396 }
397
398 err = traverse_dnode(td, &osp->os_meta_dnode, zb->zb_objset,
399 DMU_META_DNODE_OBJECT);
400 if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) {
401 if (OBJSET_BUF_HAS_PROJECTUSED(buf))
402 err = traverse_dnode(td,
403 &osp->os_projectused_dnode, zb->zb_objset,
404 DMU_PROJECTUSED_OBJECT);
405 if (err == 0)
406 err = traverse_dnode(td,
407 &osp->os_groupused_dnode, zb->zb_objset,
408 DMU_GROUPUSED_OBJECT);
409 if (err == 0)
410 err = traverse_dnode(td,
411 &osp->os_userused_dnode, zb->zb_objset,
412 DMU_USERUSED_OBJECT);
413 }
414 }
415
416 if (buf)
417 arc_buf_destroy(buf, &buf);
418
419 post:
420 if (err == 0 && (td->td_flags & TRAVERSE_POST))
421 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
422
423 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) {
424 /*
425 * Ignore this disk error as requested by the HARD flag,
426 * and continue traversal.
427 */
428 err = 0;
429 }
430
431 /*
432 * If we are stopping here, set td_resume.
433 */
434 if (td->td_resume != NULL && err != 0 && !td->td_paused) {
435 td->td_resume->zb_objset = zb->zb_objset;
436 td->td_resume->zb_object = zb->zb_object;
437 td->td_resume->zb_level = 0;
438 /*
439 * If we have stopped on an indirect block (e.g. due to
440 * i/o error), we have not visited anything below it.
441 * Set the bookmark to the first level-0 block that we need
442 * to visit. This way, the resuming code does not need to
443 * deal with resuming from indirect blocks.
444 *
445 * Note, if zb_level <= 0, dnp may be NULL, so we don't want
446 * to dereference it.
447 */
448 td->td_resume->zb_blkid = zb->zb_blkid;
449 if (zb->zb_level > 0) {
450 td->td_resume->zb_blkid <<= zb->zb_level *
451 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT);
452 }
453 td->td_paused = B_TRUE;
454 }
455
456 return (err);
457 }
458
459 static void
460 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
461 uint64_t objset, uint64_t object)
462 {
463 int j;
464 zbookmark_phys_t czb;
465
466 for (j = 0; j < dnp->dn_nblkptr; j++) {
467 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
468 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
469 }
470
471 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
472 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
473 traverse_prefetch_metadata(td, DN_SPILL_BLKPTR(dnp), &czb);
474 }
475 }
476
477 static int
478 traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
479 uint64_t objset, uint64_t object)
480 {
481 int j, err = 0;
482 zbookmark_phys_t czb;
483
484 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL &&
485 object < td->td_resume->zb_object)
486 return (0);
487
488 if (td->td_flags & TRAVERSE_PRE) {
489 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
490 ZB_DNODE_BLKID);
491 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp,
492 td->td_arg);
493 if (err == TRAVERSE_VISIT_NO_CHILDREN)
494 return (0);
495 if (err != 0)
496 return (err);
497 }
498
499 for (j = 0; j < dnp->dn_nblkptr; j++) {
500 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
501 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
502 if (err != 0)
503 break;
504 }
505
506 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
507 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
508 err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb);
509 }
510
511 if (err == 0 && (td->td_flags & TRAVERSE_POST)) {
512 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
513 ZB_DNODE_BLKID);
514 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp,
515 td->td_arg);
516 if (err == TRAVERSE_VISIT_NO_CHILDREN)
517 return (0);
518 if (err != 0)
519 return (err);
520 }
521 return (err);
522 }
523
524 /* ARGSUSED */
525 static int
526 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
527 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
528 {
529 prefetch_data_t *pfd = arg;
530 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
531 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
532 ARC_FLAG_PRESCIENT_PREFETCH;
533
534 ASSERT(pfd->pd_bytes_fetched >= 0);
535 if (bp == NULL)
536 return (0);
537 if (pfd->pd_cancel)
538 return (SET_ERROR(EINTR));
539
540 if (!prefetch_needed(pfd, bp))
541 return (0);
542
543 mutex_enter(&pfd->pd_mtx);
544 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max)
545 cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx);
546 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp);
547 cv_broadcast(&pfd->pd_cv);
548 mutex_exit(&pfd->pd_mtx);
549
550 if ((pfd->pd_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
551 zio_flags |= ZIO_FLAG_RAW;
552
553 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
554 zio_flags, &aflags, zb);
555
556 return (0);
557 }
558
559 static void
560 traverse_prefetch_thread(void *arg)
561 {
562 traverse_data_t *td_main = arg;
563 traverse_data_t td = *td_main;
564 zbookmark_phys_t czb;
565 fstrans_cookie_t cookie = spl_fstrans_mark();
566
567 td.td_func = traverse_prefetcher;
568 td.td_arg = td_main->td_pfd;
569 td.td_pfd = NULL;
570 td.td_resume = &td_main->td_pfd->pd_resume;
571
572 SET_BOOKMARK(&czb, td.td_objset,
573 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
574 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
575
576 mutex_enter(&td_main->td_pfd->pd_mtx);
577 td_main->td_pfd->pd_exited = B_TRUE;
578 cv_broadcast(&td_main->td_pfd->pd_cv);
579 mutex_exit(&td_main->td_pfd->pd_mtx);
580 spl_fstrans_unmark(cookie);
581 }
582
583 /*
584 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
585 * in syncing context).
586 */
587 static int
588 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
589 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
590 blkptr_cb_t func, void *arg)
591 {
592 traverse_data_t *td;
593 prefetch_data_t *pd;
594 zbookmark_phys_t *czb;
595 int err;
596
597 ASSERT(ds == NULL || objset == ds->ds_object);
598 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
599
600 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP);
601 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP);
602 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
603
604 td->td_spa = spa;
605 td->td_objset = objset;
606 td->td_rootbp = rootbp;
607 td->td_min_txg = txg_start;
608 td->td_resume = resume;
609 td->td_func = func;
610 td->td_arg = arg;
611 td->td_pfd = pd;
612 td->td_flags = flags;
613 td->td_paused = B_FALSE;
614 td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE);
615
616 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
617 VERIFY(spa_feature_enabled_txg(spa,
618 SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg));
619 } else {
620 td->td_hole_birth_enabled_txg = UINT64_MAX;
621 }
622
623 pd->pd_flags = flags;
624 if (resume != NULL)
625 pd->pd_resume = *resume;
626 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL);
627 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL);
628
629 SET_BOOKMARK(czb, td->td_objset,
630 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
631
632 /* See comment on ZIL traversal in dsl_scan_visitds. */
633 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) {
634 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
635 uint32_t flags = ARC_FLAG_WAIT;
636 objset_phys_t *osp;
637 arc_buf_t *buf;
638
639 if ((td->td_flags & TRAVERSE_NO_DECRYPT) &&
640 BP_IS_PROTECTED(rootbp))
641 zio_flags |= ZIO_FLAG_RAW;
642
643 err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func,
644 &buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb);
645 if (err != 0) {
646 /*
647 * If both TRAVERSE_HARD and TRAVERSE_PRE are set,
648 * continue to visitbp so that td_func can be called
649 * in pre stage, and err will reset to zero.
650 */
651 if (!(td->td_flags & TRAVERSE_HARD) ||
652 !(td->td_flags & TRAVERSE_PRE))
653 return (err);
654 } else {
655 osp = buf->b_data;
656 traverse_zil(td, &osp->os_zil_header);
657 arc_buf_destroy(buf, &buf);
658 }
659 }
660
661 if (!(flags & TRAVERSE_PREFETCH_DATA) ||
662 taskq_dispatch(spa->spa_prefetch_taskq, traverse_prefetch_thread,
663 td, TQ_NOQUEUE) == TASKQID_INVALID)
664 pd->pd_exited = B_TRUE;
665
666 err = traverse_visitbp(td, NULL, rootbp, czb);
667
668 mutex_enter(&pd->pd_mtx);
669 pd->pd_cancel = B_TRUE;
670 cv_broadcast(&pd->pd_cv);
671 while (!pd->pd_exited)
672 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
673 mutex_exit(&pd->pd_mtx);
674
675 mutex_destroy(&pd->pd_mtx);
676 cv_destroy(&pd->pd_cv);
677
678 kmem_free(czb, sizeof (zbookmark_phys_t));
679 kmem_free(pd, sizeof (struct prefetch_data));
680 kmem_free(td, sizeof (struct traverse_data));
681
682 return (err);
683 }
684
685 /*
686 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
687 * in syncing context).
688 */
689 int
690 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start,
691 zbookmark_phys_t *resume,
692 int flags, blkptr_cb_t func, void *arg)
693 {
694 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
695 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg));
696 }
697
698 int
699 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start,
700 int flags, blkptr_cb_t func, void *arg)
701 {
702 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg));
703 }
704
705 int
706 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
707 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
708 blkptr_cb_t func, void *arg)
709 {
710 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
711 blkptr, txg_start, resume, flags, func, arg));
712 }
713
714 /*
715 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
716 */
717 int
718 traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
719 blkptr_cb_t func, void *arg)
720 {
721 int err;
722 dsl_pool_t *dp = spa_get_dsl(spa);
723 objset_t *mos = dp->dp_meta_objset;
724 boolean_t hard = (flags & TRAVERSE_HARD);
725
726 /* visit the MOS */
727 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
728 txg_start, NULL, flags, func, arg);
729 if (err != 0)
730 return (err);
731
732 /* visit each dataset */
733 for (uint64_t obj = 1; err == 0;
734 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) {
735 dmu_object_info_t doi;
736
737 err = dmu_object_info(mos, obj, &doi);
738 if (err != 0) {
739 if (hard)
740 continue;
741 break;
742 }
743
744 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
745 dsl_dataset_t *ds;
746 uint64_t txg = txg_start;
747
748 dsl_pool_config_enter(dp, FTAG);
749 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
750 dsl_pool_config_exit(dp, FTAG);
751 if (err != 0) {
752 if (hard)
753 continue;
754 break;
755 }
756 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
757 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
758 err = traverse_dataset(ds, txg, flags, func, arg);
759 dsl_dataset_rele(ds, FTAG);
760 if (err != 0)
761 break;
762 }
763 }
764 if (err == ESRCH)
765 err = 0;
766 return (err);
767 }
768
769 #if defined(_KERNEL)
770 EXPORT_SYMBOL(traverse_dataset);
771 EXPORT_SYMBOL(traverse_pool);
772
773 module_param(zfs_pd_bytes_max, int, 0644);
774 MODULE_PARM_DESC(zfs_pd_bytes_max, "Max number of bytes to prefetch");
775
776 module_param_named(ignore_hole_birth, send_holes_without_birth_time, int, 0644);
777 MODULE_PARM_DESC(ignore_hole_birth, "Alias for send_holes_without_birth_time");
778
779 module_param_named(send_holes_without_birth_time,
780 send_holes_without_birth_time, int, 0644);
781 MODULE_PARM_DESC(send_holes_without_birth_time,
782 "Ignore hole_birth txg for zfs send");
783 #endif