4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24 * Copyright 2016 Gary Mills
25 * Copyright (c) 2017 Datto Inc.
26 * Copyright 2017 Joyent, Inc.
29 #include <sys/dsl_scan.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dnode.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/dmu_objset.h>
41 #include <sys/zfs_context.h>
42 #include <sys/fs/zfs.h>
43 #include <sys/zfs_znode.h>
44 #include <sys/spa_impl.h>
45 #include <sys/vdev_impl.h>
46 #include <sys/zil_impl.h>
47 #include <sys/zio_checksum.h>
50 #include <sys/sa_impl.h>
51 #include <sys/zfeature.h>
53 #include <sys/range_tree.h>
55 #include <sys/zfs_vfsops.h>
59 * Grand theory statement on scan queue sorting
61 * Scanning is implemented by recursively traversing all indirection levels
62 * in an object and reading all blocks referenced from said objects. This
63 * results in us approximately traversing the object from lowest logical
64 * offset to the highest. For best performance, we would want the logical
65 * blocks to be physically contiguous. However, this is frequently not the
66 * case with pools given the allocation patterns of copy-on-write filesystems.
67 * So instead, we put the I/Os into a reordering queue and issue them in a
68 * way that will most benefit physical disks (LBA-order).
72 * Ideally, we would want to scan all metadata and queue up all block I/O
73 * prior to starting to issue it, because that allows us to do an optimal
74 * sorting job. This can however consume large amounts of memory. Therefore
75 * we continuously monitor the size of the queues and constrain them to 5%
76 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
77 * limit, we clear out a few of the largest extents at the head of the queues
78 * to make room for more scanning. Hopefully, these extents will be fairly
79 * large and contiguous, allowing us to approach sequential I/O throughput
80 * even without a fully sorted tree.
82 * Metadata scanning takes place in dsl_scan_visit(), which is called from
83 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all
84 * metadata on the pool, or we need to make room in memory because our
85 * queues are too large, dsl_scan_visit() is postponed and
86 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
87 * that metadata scanning and queued I/O issuing are mutually exclusive. This
88 * allows us to provide maximum sequential I/O throughput for the majority of
89 * I/O's issued since sequential I/O performance is significantly negatively
90 * impacted if it is interleaved with random I/O.
92 * Implementation Notes
94 * One side effect of the queued scanning algorithm is that the scanning code
95 * needs to be notified whenever a block is freed. This is needed to allow
96 * the scanning code to remove these I/Os from the issuing queue. Additionally,
97 * we do not attempt to queue gang blocks to be issued sequentially since this
98 * is very hard to do and would have an extremely limited performance benefit.
99 * Instead, we simply issue gang I/Os as soon as we find them using the legacy
102 * Backwards compatibility
104 * This new algorithm is backwards compatible with the legacy on-disk data
105 * structures (and therefore does not require a new feature flag).
106 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
107 * will stop scanning metadata (in logical order) and wait for all outstanding
108 * sorted I/O to complete. Once this is done, we write out a checkpoint
109 * bookmark, indicating that we have scanned everything logically before it.
110 * If the pool is imported on a machine without the new sorting algorithm,
111 * the scan simply resumes from the last checkpoint using the legacy algorithm.
114 typedef int (scan_cb_t
)(dsl_pool_t
*, const blkptr_t
*,
115 const zbookmark_phys_t
*);
117 static scan_cb_t dsl_scan_scrub_cb
;
119 static int scan_ds_queue_compare(const void *a
, const void *b
);
120 static int scan_prefetch_queue_compare(const void *a
, const void *b
);
121 static void scan_ds_queue_clear(dsl_scan_t
*scn
);
122 static boolean_t
scan_ds_queue_contains(dsl_scan_t
*scn
, uint64_t dsobj
,
124 static void scan_ds_queue_insert(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t txg
);
125 static void scan_ds_queue_remove(dsl_scan_t
*scn
, uint64_t dsobj
);
126 static void scan_ds_queue_sync(dsl_scan_t
*scn
, dmu_tx_t
*tx
);
127 static uint64_t dsl_scan_count_leaves(vdev_t
*vd
);
129 extern int zfs_vdev_async_write_active_min_dirty_percent
;
132 * By default zfs will check to ensure it is not over the hard memory
133 * limit before each txg. If finer-grained control of this is needed
134 * this value can be set to 1 to enable checking before scanning each
137 int zfs_scan_strict_mem_lim
= B_FALSE
;
140 * Maximum number of parallelly executed bytes per leaf vdev. We attempt
141 * to strike a balance here between keeping the vdev queues full of I/Os
142 * at all times and not overflowing the queues to cause long latency,
143 * which would cause long txg sync times. No matter what, we will not
144 * overload the drives with I/O, since that is protected by
145 * zfs_vdev_scrub_max_active.
147 unsigned long zfs_scan_vdev_limit
= 4 << 20;
149 int zfs_scan_issue_strategy
= 0;
150 int zfs_scan_legacy
= B_FALSE
; /* don't queue & sort zios, go direct */
151 unsigned long zfs_scan_max_ext_gap
= 2 << 20; /* in bytes */
154 * fill_weight is non-tunable at runtime, so we copy it at module init from
155 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
156 * break queue sorting.
158 int zfs_scan_fill_weight
= 3;
159 static uint64_t fill_weight
;
161 /* See dsl_scan_should_clear() for details on the memory limit tunables */
162 uint64_t zfs_scan_mem_lim_min
= 16 << 20; /* bytes */
163 uint64_t zfs_scan_mem_lim_soft_max
= 128 << 20; /* bytes */
164 int zfs_scan_mem_lim_fact
= 20; /* fraction of physmem */
165 int zfs_scan_mem_lim_soft_fact
= 20; /* fraction of mem lim above */
167 int zfs_scrub_min_time_ms
= 1000; /* min millisecs to scrub per txg */
168 int zfs_obsolete_min_time_ms
= 500; /* min millisecs to obsolete per txg */
169 int zfs_free_min_time_ms
= 1000; /* min millisecs to free per txg */
170 int zfs_resilver_min_time_ms
= 3000; /* min millisecs to resilver per txg */
171 int zfs_scan_checkpoint_intval
= 7200; /* in seconds */
172 int zfs_no_scrub_io
= B_FALSE
; /* set to disable scrub i/o */
173 int zfs_no_scrub_prefetch
= B_FALSE
; /* set to disable scrub prefetch */
174 enum ddt_class zfs_scrub_ddt_class_max
= DDT_CLASS_DUPLICATE
;
175 /* max number of blocks to free in a single TXG */
176 unsigned long zfs_async_block_max_blocks
= 100000;
179 * We wait a few txgs after importing a pool to begin scanning so that
180 * the import / mounting code isn't held up by scrub / resilver IO.
181 * Unfortunately, it is a bit difficult to determine exactly how long
182 * this will take since userspace will trigger fs mounts asynchronously
183 * and the kernel will create zvol minors asynchronously. As a result,
184 * the value provided here is a bit arbitrary, but represents a
185 * reasonable estimate of how many txgs it will take to finish fully
188 #define SCAN_IMPORT_WAIT_TXGS 5
190 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
191 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
192 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
195 * Enable/disable the processing of the free_bpobj object.
197 int zfs_free_bpobj_enabled
= 1;
199 /* the order has to match pool_scan_type */
200 static scan_cb_t
*scan_funcs
[POOL_SCAN_FUNCS
] = {
202 dsl_scan_scrub_cb
, /* POOL_SCAN_SCRUB */
203 dsl_scan_scrub_cb
, /* POOL_SCAN_RESILVER */
206 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */
214 * This controls what conditions are placed on dsl_scan_sync_state():
215 * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
216 * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
217 * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
218 * write out the scn_phys_cached version.
219 * See dsl_scan_sync_state for details.
228 * This struct represents the minimum information needed to reconstruct a
229 * zio for sequential scanning. This is useful because many of these will
230 * accumulate in the sequential IO queues before being issued, so saving
231 * memory matters here.
233 typedef struct scan_io
{
234 /* fields from blkptr_t */
236 uint64_t sio_blk_prop
;
237 uint64_t sio_phys_birth
;
239 zio_cksum_t sio_cksum
;
242 /* fields from zio_t */
244 zbookmark_phys_t sio_zb
;
246 /* members for queue sorting */
248 avl_node_t sio_addr_node
; /* link into issueing queue */
249 list_node_t sio_list_node
; /* link for issuing to disk */
253 struct dsl_scan_io_queue
{
254 dsl_scan_t
*q_scn
; /* associated dsl_scan_t */
255 vdev_t
*q_vd
; /* top-level vdev that this queue represents */
257 /* trees used for sorting I/Os and extents of I/Os */
258 range_tree_t
*q_exts_by_addr
;
259 avl_tree_t q_exts_by_size
;
260 avl_tree_t q_sios_by_addr
;
262 /* members for zio rate limiting */
263 uint64_t q_maxinflight_bytes
;
264 uint64_t q_inflight_bytes
;
265 kcondvar_t q_zio_cv
; /* used under vd->vdev_scan_io_queue_lock */
267 /* per txg statistics */
268 uint64_t q_total_seg_size_this_txg
;
269 uint64_t q_segs_this_txg
;
270 uint64_t q_total_zio_size_this_txg
;
271 uint64_t q_zios_this_txg
;
274 /* private data for dsl_scan_prefetch_cb() */
275 typedef struct scan_prefetch_ctx
{
276 zfs_refcount_t spc_refcnt
; /* refcount for memory management */
277 dsl_scan_t
*spc_scn
; /* dsl_scan_t for the pool */
278 boolean_t spc_root
; /* is this prefetch for an objset? */
279 uint8_t spc_indblkshift
; /* dn_indblkshift of current dnode */
280 uint16_t spc_datablkszsec
; /* dn_idatablkszsec of current dnode */
281 } scan_prefetch_ctx_t
;
283 /* private data for dsl_scan_prefetch() */
284 typedef struct scan_prefetch_issue_ctx
{
285 avl_node_t spic_avl_node
; /* link into scn->scn_prefetch_queue */
286 scan_prefetch_ctx_t
*spic_spc
; /* spc for the callback */
287 blkptr_t spic_bp
; /* bp to prefetch */
288 zbookmark_phys_t spic_zb
; /* bookmark to prefetch */
289 } scan_prefetch_issue_ctx_t
;
291 static void scan_exec_io(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
292 const zbookmark_phys_t
*zb
, dsl_scan_io_queue_t
*queue
);
293 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t
*queue
,
296 static dsl_scan_io_queue_t
*scan_io_queue_create(vdev_t
*vd
);
297 static void scan_io_queues_destroy(dsl_scan_t
*scn
);
299 static kmem_cache_t
*sio_cache
;
305 * This is used in ext_size_compare() to weight segments
306 * based on how sparse they are. This cannot be changed
307 * mid-scan and the tree comparison functions don't currently
308 * have a mechanism for passing additional context to the
309 * compare functions. Thus we store this value globally and
310 * we only allow it to be set at module initialization time
312 fill_weight
= zfs_scan_fill_weight
;
314 sio_cache
= kmem_cache_create("sio_cache",
315 sizeof (scan_io_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
321 kmem_cache_destroy(sio_cache
);
324 static inline boolean_t
325 dsl_scan_is_running(const dsl_scan_t
*scn
)
327 return (scn
->scn_phys
.scn_state
== DSS_SCANNING
);
331 dsl_scan_resilvering(dsl_pool_t
*dp
)
333 return (dsl_scan_is_running(dp
->dp_scan
) &&
334 dp
->dp_scan
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
);
338 sio2bp(const scan_io_t
*sio
, blkptr_t
*bp
, uint64_t vdev_id
)
340 bzero(bp
, sizeof (*bp
));
341 DVA_SET_ASIZE(&bp
->blk_dva
[0], sio
->sio_asize
);
342 DVA_SET_VDEV(&bp
->blk_dva
[0], vdev_id
);
343 DVA_SET_OFFSET(&bp
->blk_dva
[0], sio
->sio_offset
);
344 bp
->blk_prop
= sio
->sio_blk_prop
;
345 bp
->blk_phys_birth
= sio
->sio_phys_birth
;
346 bp
->blk_birth
= sio
->sio_birth
;
347 bp
->blk_fill
= 1; /* we always only work with data pointers */
348 bp
->blk_cksum
= sio
->sio_cksum
;
352 bp2sio(const blkptr_t
*bp
, scan_io_t
*sio
, int dva_i
)
354 /* we discard the vdev id, since we can deduce it from the queue */
355 sio
->sio_offset
= DVA_GET_OFFSET(&bp
->blk_dva
[dva_i
]);
356 sio
->sio_asize
= DVA_GET_ASIZE(&bp
->blk_dva
[dva_i
]);
357 sio
->sio_blk_prop
= bp
->blk_prop
;
358 sio
->sio_phys_birth
= bp
->blk_phys_birth
;
359 sio
->sio_birth
= bp
->blk_birth
;
360 sio
->sio_cksum
= bp
->blk_cksum
;
364 dsl_scan_init(dsl_pool_t
*dp
, uint64_t txg
)
368 spa_t
*spa
= dp
->dp_spa
;
371 scn
= dp
->dp_scan
= kmem_zalloc(sizeof (dsl_scan_t
), KM_SLEEP
);
375 * It's possible that we're resuming a scan after a reboot so
376 * make sure that the scan_async_destroying flag is initialized
379 ASSERT(!scn
->scn_async_destroying
);
380 scn
->scn_async_destroying
= spa_feature_is_active(dp
->dp_spa
,
381 SPA_FEATURE_ASYNC_DESTROY
);
384 * Calculate the max number of in-flight bytes for pool-wide
385 * scanning operations (minimum 1MB). Limits for the issuing
386 * phase are done per top-level vdev and are handled separately.
388 scn
->scn_maxinflight_bytes
= MAX(zfs_scan_vdev_limit
*
389 dsl_scan_count_leaves(spa
->spa_root_vdev
), 1ULL << 20);
391 bcopy(&scn
->scn_phys
, &scn
->scn_phys_cached
, sizeof (scn
->scn_phys
));
392 avl_create(&scn
->scn_queue
, scan_ds_queue_compare
, sizeof (scan_ds_t
),
393 offsetof(scan_ds_t
, sds_node
));
394 avl_create(&scn
->scn_prefetch_queue
, scan_prefetch_queue_compare
,
395 sizeof (scan_prefetch_issue_ctx_t
),
396 offsetof(scan_prefetch_issue_ctx_t
, spic_avl_node
));
398 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
399 "scrub_func", sizeof (uint64_t), 1, &f
);
402 * There was an old-style scrub in progress. Restart a
403 * new-style scrub from the beginning.
405 scn
->scn_restart_txg
= txg
;
406 zfs_dbgmsg("old-style scrub was in progress; "
407 "restarting new-style scrub in txg %llu",
408 (longlong_t
)scn
->scn_restart_txg
);
411 * Load the queue obj from the old location so that it
412 * can be freed by dsl_scan_done().
414 (void) zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
415 "scrub_queue", sizeof (uint64_t), 1,
416 &scn
->scn_phys
.scn_queue_obj
);
418 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
419 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
422 * Detect if the pool contains the signature of #2094. If it
423 * does properly update the scn->scn_phys structure and notify
424 * the administrator by setting an errata for the pool.
426 if (err
== EOVERFLOW
) {
427 uint64_t zaptmp
[SCAN_PHYS_NUMINTS
+ 1];
428 VERIFY3S(SCAN_PHYS_NUMINTS
, ==, 24);
429 VERIFY3S(offsetof(dsl_scan_phys_t
, scn_flags
), ==,
430 (23 * sizeof (uint64_t)));
432 err
= zap_lookup(dp
->dp_meta_objset
,
433 DMU_POOL_DIRECTORY_OBJECT
, DMU_POOL_SCAN
,
434 sizeof (uint64_t), SCAN_PHYS_NUMINTS
+ 1, &zaptmp
);
436 uint64_t overflow
= zaptmp
[SCAN_PHYS_NUMINTS
];
438 if (overflow
& ~DSL_SCAN_FLAGS_MASK
||
439 scn
->scn_async_destroying
) {
441 ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY
;
445 bcopy(zaptmp
, &scn
->scn_phys
,
446 SCAN_PHYS_NUMINTS
* sizeof (uint64_t));
447 scn
->scn_phys
.scn_flags
= overflow
;
449 /* Required scrub already in progress. */
450 if (scn
->scn_phys
.scn_state
== DSS_FINISHED
||
451 scn
->scn_phys
.scn_state
== DSS_CANCELED
)
453 ZPOOL_ERRATA_ZOL_2094_SCRUB
;
463 * We might be restarting after a reboot, so jump the issued
464 * counter to how far we've scanned. We know we're consistent
467 scn
->scn_issued_before_pass
= scn
->scn_phys
.scn_examined
;
469 if (dsl_scan_is_running(scn
) &&
470 spa_prev_software_version(dp
->dp_spa
) < SPA_VERSION_SCAN
) {
472 * A new-type scrub was in progress on an old
473 * pool, and the pool was accessed by old
474 * software. Restart from the beginning, since
475 * the old software may have changed the pool in
478 scn
->scn_restart_txg
= txg
;
479 zfs_dbgmsg("new-style scrub was modified "
480 "by old software; restarting in txg %llu",
481 (longlong_t
)scn
->scn_restart_txg
);
485 /* reload the queue into the in-core state */
486 if (scn
->scn_phys
.scn_queue_obj
!= 0) {
490 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
491 scn
->scn_phys
.scn_queue_obj
);
492 zap_cursor_retrieve(&zc
, &za
) == 0;
493 (void) zap_cursor_advance(&zc
)) {
494 scan_ds_queue_insert(scn
,
495 zfs_strtonum(za
.za_name
, NULL
),
496 za
.za_first_integer
);
498 zap_cursor_fini(&zc
);
501 spa_scan_stat_init(spa
);
506 dsl_scan_fini(dsl_pool_t
*dp
)
508 if (dp
->dp_scan
!= NULL
) {
509 dsl_scan_t
*scn
= dp
->dp_scan
;
511 if (scn
->scn_taskq
!= NULL
)
512 taskq_destroy(scn
->scn_taskq
);
513 scan_ds_queue_clear(scn
);
514 avl_destroy(&scn
->scn_queue
);
515 avl_destroy(&scn
->scn_prefetch_queue
);
517 kmem_free(dp
->dp_scan
, sizeof (dsl_scan_t
));
523 dsl_scan_restarting(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
525 return (scn
->scn_restart_txg
!= 0 &&
526 scn
->scn_restart_txg
<= tx
->tx_txg
);
530 dsl_scan_scrubbing(const dsl_pool_t
*dp
)
532 dsl_scan_phys_t
*scn_phys
= &dp
->dp_scan
->scn_phys
;
534 return (scn_phys
->scn_state
== DSS_SCANNING
&&
535 scn_phys
->scn_func
== POOL_SCAN_SCRUB
);
539 dsl_scan_is_paused_scrub(const dsl_scan_t
*scn
)
541 return (dsl_scan_scrubbing(scn
->scn_dp
) &&
542 scn
->scn_phys
.scn_flags
& DSF_SCRUB_PAUSED
);
546 * Writes out a persistent dsl_scan_phys_t record to the pool directory.
547 * Because we can be running in the block sorting algorithm, we do not always
548 * want to write out the record, only when it is "safe" to do so. This safety
549 * condition is achieved by making sure that the sorting queues are empty
550 * (scn_bytes_pending == 0). When this condition is not true, the sync'd state
551 * is inconsistent with how much actual scanning progress has been made. The
552 * kind of sync to be performed is specified by the sync_type argument. If the
553 * sync is optional, we only sync if the queues are empty. If the sync is
554 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The
555 * third possible state is a "cached" sync. This is done in response to:
556 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
557 * destroyed, so we wouldn't be able to restart scanning from it.
558 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
559 * superseded by a newer snapshot.
560 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
561 * swapped with its clone.
562 * In all cases, a cached sync simply rewrites the last record we've written,
563 * just slightly modified. For the modifications that are performed to the
564 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
565 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
568 dsl_scan_sync_state(dsl_scan_t
*scn
, dmu_tx_t
*tx
, state_sync_type_t sync_type
)
571 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
573 ASSERT(sync_type
!= SYNC_MANDATORY
|| scn
->scn_bytes_pending
== 0);
574 if (scn
->scn_bytes_pending
== 0) {
575 for (i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
576 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
577 dsl_scan_io_queue_t
*q
= vd
->vdev_scan_io_queue
;
582 mutex_enter(&vd
->vdev_scan_io_queue_lock
);
583 ASSERT3P(avl_first(&q
->q_sios_by_addr
), ==, NULL
);
584 ASSERT3P(avl_first(&q
->q_exts_by_size
), ==, NULL
);
585 ASSERT3P(range_tree_first(q
->q_exts_by_addr
), ==, NULL
);
586 mutex_exit(&vd
->vdev_scan_io_queue_lock
);
589 if (scn
->scn_phys
.scn_queue_obj
!= 0)
590 scan_ds_queue_sync(scn
, tx
);
591 VERIFY0(zap_update(scn
->scn_dp
->dp_meta_objset
,
592 DMU_POOL_DIRECTORY_OBJECT
,
593 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
594 &scn
->scn_phys
, tx
));
595 bcopy(&scn
->scn_phys
, &scn
->scn_phys_cached
,
596 sizeof (scn
->scn_phys
));
598 if (scn
->scn_checkpointing
)
599 zfs_dbgmsg("finish scan checkpoint");
601 scn
->scn_checkpointing
= B_FALSE
;
602 scn
->scn_last_checkpoint
= ddi_get_lbolt();
603 } else if (sync_type
== SYNC_CACHED
) {
604 VERIFY0(zap_update(scn
->scn_dp
->dp_meta_objset
,
605 DMU_POOL_DIRECTORY_OBJECT
,
606 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
607 &scn
->scn_phys_cached
, tx
));
613 dsl_scan_setup_check(void *arg
, dmu_tx_t
*tx
)
615 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
617 if (dsl_scan_is_running(scn
))
618 return (SET_ERROR(EBUSY
));
624 dsl_scan_setup_sync(void *arg
, dmu_tx_t
*tx
)
626 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
627 pool_scan_func_t
*funcp
= arg
;
628 dmu_object_type_t ot
= 0;
629 dsl_pool_t
*dp
= scn
->scn_dp
;
630 spa_t
*spa
= dp
->dp_spa
;
632 ASSERT(!dsl_scan_is_running(scn
));
633 ASSERT(*funcp
> POOL_SCAN_NONE
&& *funcp
< POOL_SCAN_FUNCS
);
634 bzero(&scn
->scn_phys
, sizeof (scn
->scn_phys
));
635 scn
->scn_phys
.scn_func
= *funcp
;
636 scn
->scn_phys
.scn_state
= DSS_SCANNING
;
637 scn
->scn_phys
.scn_min_txg
= 0;
638 scn
->scn_phys
.scn_max_txg
= tx
->tx_txg
;
639 scn
->scn_phys
.scn_ddt_class_max
= DDT_CLASSES
- 1; /* the entire DDT */
640 scn
->scn_phys
.scn_start_time
= gethrestime_sec();
641 scn
->scn_phys
.scn_errors
= 0;
642 scn
->scn_phys
.scn_to_examine
= spa
->spa_root_vdev
->vdev_stat
.vs_alloc
;
643 scn
->scn_issued_before_pass
= 0;
644 scn
->scn_restart_txg
= 0;
645 scn
->scn_done_txg
= 0;
646 scn
->scn_last_checkpoint
= 0;
647 scn
->scn_checkpointing
= B_FALSE
;
648 spa_scan_stat_init(spa
);
650 if (DSL_SCAN_IS_SCRUB_RESILVER(scn
)) {
651 scn
->scn_phys
.scn_ddt_class_max
= zfs_scrub_ddt_class_max
;
653 /* rewrite all disk labels */
654 vdev_config_dirty(spa
->spa_root_vdev
);
656 if (vdev_resilver_needed(spa
->spa_root_vdev
,
657 &scn
->scn_phys
.scn_min_txg
, &scn
->scn_phys
.scn_max_txg
)) {
658 spa_event_notify(spa
, NULL
, NULL
,
659 ESC_ZFS_RESILVER_START
);
661 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_START
);
664 spa
->spa_scrub_started
= B_TRUE
;
666 * If this is an incremental scrub, limit the DDT scrub phase
667 * to just the auto-ditto class (for correctness); the rest
668 * of the scrub should go faster using top-down pruning.
670 if (scn
->scn_phys
.scn_min_txg
> TXG_INITIAL
)
671 scn
->scn_phys
.scn_ddt_class_max
= DDT_CLASS_DITTO
;
675 /* back to the generic stuff */
677 if (dp
->dp_blkstats
== NULL
) {
679 vmem_alloc(sizeof (zfs_all_blkstats_t
), KM_SLEEP
);
680 mutex_init(&dp
->dp_blkstats
->zab_lock
, NULL
,
681 MUTEX_DEFAULT
, NULL
);
683 bzero(&dp
->dp_blkstats
->zab_type
, sizeof (dp
->dp_blkstats
->zab_type
));
685 if (spa_version(spa
) < SPA_VERSION_DSL_SCRUB
)
686 ot
= DMU_OT_ZAP_OTHER
;
688 scn
->scn_phys
.scn_queue_obj
= zap_create(dp
->dp_meta_objset
,
689 ot
? ot
: DMU_OT_SCAN_QUEUE
, DMU_OT_NONE
, 0, tx
);
691 bcopy(&scn
->scn_phys
, &scn
->scn_phys_cached
, sizeof (scn
->scn_phys
));
693 dsl_scan_sync_state(scn
, tx
, SYNC_MANDATORY
);
695 spa_history_log_internal(spa
, "scan setup", tx
,
696 "func=%u mintxg=%llu maxtxg=%llu",
697 *funcp
, scn
->scn_phys
.scn_min_txg
, scn
->scn_phys
.scn_max_txg
);
701 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
702 * Can also be called to resume a paused scrub.
705 dsl_scan(dsl_pool_t
*dp
, pool_scan_func_t func
)
707 spa_t
*spa
= dp
->dp_spa
;
708 dsl_scan_t
*scn
= dp
->dp_scan
;
711 * Purge all vdev caches and probe all devices. We do this here
712 * rather than in sync context because this requires a writer lock
713 * on the spa_config lock, which we can't do from sync context. The
714 * spa_scrub_reopen flag indicates that vdev_open() should not
715 * attempt to start another scrub.
717 spa_vdev_state_enter(spa
, SCL_NONE
);
718 spa
->spa_scrub_reopen
= B_TRUE
;
719 vdev_reopen(spa
->spa_root_vdev
);
720 spa
->spa_scrub_reopen
= B_FALSE
;
721 (void) spa_vdev_state_exit(spa
, NULL
, 0);
723 if (func
== POOL_SCAN_SCRUB
&& dsl_scan_is_paused_scrub(scn
)) {
724 /* got scrub start cmd, resume paused scrub */
725 int err
= dsl_scrub_set_pause_resume(scn
->scn_dp
,
728 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_RESUME
);
732 return (SET_ERROR(err
));
735 return (dsl_sync_task(spa_name(spa
), dsl_scan_setup_check
,
736 dsl_scan_setup_sync
, &func
, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED
));
741 dsl_scan_done(dsl_scan_t
*scn
, boolean_t complete
, dmu_tx_t
*tx
)
743 static const char *old_names
[] = {
745 "scrub_ddt_bookmark",
746 "scrub_ddt_class_max",
755 dsl_pool_t
*dp
= scn
->scn_dp
;
756 spa_t
*spa
= dp
->dp_spa
;
759 /* Remove any remnants of an old-style scrub. */
760 for (i
= 0; old_names
[i
]; i
++) {
761 (void) zap_remove(dp
->dp_meta_objset
,
762 DMU_POOL_DIRECTORY_OBJECT
, old_names
[i
], tx
);
765 if (scn
->scn_phys
.scn_queue_obj
!= 0) {
766 VERIFY0(dmu_object_free(dp
->dp_meta_objset
,
767 scn
->scn_phys
.scn_queue_obj
, tx
));
768 scn
->scn_phys
.scn_queue_obj
= 0;
770 scan_ds_queue_clear(scn
);
772 scn
->scn_phys
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
775 * If we were "restarted" from a stopped state, don't bother
776 * with anything else.
778 if (!dsl_scan_is_running(scn
)) {
779 ASSERT(!scn
->scn_is_sorted
);
783 if (scn
->scn_is_sorted
) {
784 scan_io_queues_destroy(scn
);
785 scn
->scn_is_sorted
= B_FALSE
;
787 if (scn
->scn_taskq
!= NULL
) {
788 taskq_destroy(scn
->scn_taskq
);
789 scn
->scn_taskq
= NULL
;
793 scn
->scn_phys
.scn_state
= complete
? DSS_FINISHED
: DSS_CANCELED
;
795 if (dsl_scan_restarting(scn
, tx
))
796 spa_history_log_internal(spa
, "scan aborted, restarting", tx
,
797 "errors=%llu", spa_get_errlog_size(spa
));
799 spa_history_log_internal(spa
, "scan cancelled", tx
,
800 "errors=%llu", spa_get_errlog_size(spa
));
802 spa_history_log_internal(spa
, "scan done", tx
,
803 "errors=%llu", spa_get_errlog_size(spa
));
805 if (DSL_SCAN_IS_SCRUB_RESILVER(scn
)) {
806 spa
->spa_scrub_started
= B_FALSE
;
807 spa
->spa_scrub_active
= B_FALSE
;
810 * If the scrub/resilver completed, update all DTLs to
811 * reflect this. Whether it succeeded or not, vacate
812 * all temporary scrub DTLs.
814 * As the scrub does not currently support traversing
815 * data that have been freed but are part of a checkpoint,
816 * we don't mark the scrub as done in the DTLs as faults
817 * may still exist in those vdevs.
820 !spa_feature_is_active(spa
, SPA_FEATURE_POOL_CHECKPOINT
)) {
821 vdev_dtl_reassess(spa
->spa_root_vdev
, tx
->tx_txg
,
822 scn
->scn_phys
.scn_max_txg
, B_TRUE
);
824 spa_event_notify(spa
, NULL
, NULL
,
825 scn
->scn_phys
.scn_min_txg
?
826 ESC_ZFS_RESILVER_FINISH
: ESC_ZFS_SCRUB_FINISH
);
828 vdev_dtl_reassess(spa
->spa_root_vdev
, tx
->tx_txg
,
831 spa_errlog_rotate(spa
);
834 * We may have finished replacing a device.
835 * Let the async thread assess this and handle the detach.
837 spa_async_request(spa
, SPA_ASYNC_RESILVER_DONE
);
840 scn
->scn_phys
.scn_end_time
= gethrestime_sec();
842 if (spa
->spa_errata
== ZPOOL_ERRATA_ZOL_2094_SCRUB
)
845 ASSERT(!dsl_scan_is_running(scn
));
850 dsl_scan_cancel_check(void *arg
, dmu_tx_t
*tx
)
852 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
854 if (!dsl_scan_is_running(scn
))
855 return (SET_ERROR(ENOENT
));
861 dsl_scan_cancel_sync(void *arg
, dmu_tx_t
*tx
)
863 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
865 dsl_scan_done(scn
, B_FALSE
, tx
);
866 dsl_scan_sync_state(scn
, tx
, SYNC_MANDATORY
);
867 spa_event_notify(scn
->scn_dp
->dp_spa
, NULL
, NULL
, ESC_ZFS_SCRUB_ABORT
);
871 dsl_scan_cancel(dsl_pool_t
*dp
)
873 return (dsl_sync_task(spa_name(dp
->dp_spa
), dsl_scan_cancel_check
,
874 dsl_scan_cancel_sync
, NULL
, 3, ZFS_SPACE_CHECK_RESERVED
));
878 dsl_scrub_pause_resume_check(void *arg
, dmu_tx_t
*tx
)
880 pool_scrub_cmd_t
*cmd
= arg
;
881 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
882 dsl_scan_t
*scn
= dp
->dp_scan
;
884 if (*cmd
== POOL_SCRUB_PAUSE
) {
885 /* can't pause a scrub when there is no in-progress scrub */
886 if (!dsl_scan_scrubbing(dp
))
887 return (SET_ERROR(ENOENT
));
889 /* can't pause a paused scrub */
890 if (dsl_scan_is_paused_scrub(scn
))
891 return (SET_ERROR(EBUSY
));
892 } else if (*cmd
!= POOL_SCRUB_NORMAL
) {
893 return (SET_ERROR(ENOTSUP
));
900 dsl_scrub_pause_resume_sync(void *arg
, dmu_tx_t
*tx
)
902 pool_scrub_cmd_t
*cmd
= arg
;
903 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
904 spa_t
*spa
= dp
->dp_spa
;
905 dsl_scan_t
*scn
= dp
->dp_scan
;
907 if (*cmd
== POOL_SCRUB_PAUSE
) {
908 /* can't pause a scrub when there is no in-progress scrub */
909 spa
->spa_scan_pass_scrub_pause
= gethrestime_sec();
910 scn
->scn_phys
.scn_flags
|= DSF_SCRUB_PAUSED
;
911 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
912 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_PAUSED
);
914 ASSERT3U(*cmd
, ==, POOL_SCRUB_NORMAL
);
915 if (dsl_scan_is_paused_scrub(scn
)) {
917 * We need to keep track of how much time we spend
918 * paused per pass so that we can adjust the scrub rate
919 * shown in the output of 'zpool status'
921 spa
->spa_scan_pass_scrub_spent_paused
+=
922 gethrestime_sec() - spa
->spa_scan_pass_scrub_pause
;
923 spa
->spa_scan_pass_scrub_pause
= 0;
924 scn
->scn_phys
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
925 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
931 * Set scrub pause/resume state if it makes sense to do so
934 dsl_scrub_set_pause_resume(const dsl_pool_t
*dp
, pool_scrub_cmd_t cmd
)
936 return (dsl_sync_task(spa_name(dp
->dp_spa
),
937 dsl_scrub_pause_resume_check
, dsl_scrub_pause_resume_sync
, &cmd
, 3,
938 ZFS_SPACE_CHECK_RESERVED
));
942 /* start a new scan, or restart an existing one. */
944 dsl_resilver_restart(dsl_pool_t
*dp
, uint64_t txg
)
948 tx
= dmu_tx_create_dd(dp
->dp_mos_dir
);
949 VERIFY(0 == dmu_tx_assign(tx
, TXG_WAIT
));
951 txg
= dmu_tx_get_txg(tx
);
952 dp
->dp_scan
->scn_restart_txg
= txg
;
955 dp
->dp_scan
->scn_restart_txg
= txg
;
957 zfs_dbgmsg("restarting resilver txg=%llu", (longlong_t
)txg
);
961 dsl_free(dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bp
)
963 zio_free(dp
->dp_spa
, txg
, bp
);
967 dsl_free_sync(zio_t
*pio
, dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bpp
)
969 ASSERT(dsl_pool_sync_context(dp
));
970 zio_nowait(zio_free_sync(pio
, dp
->dp_spa
, txg
, bpp
, pio
->io_flags
));
974 scan_ds_queue_compare(const void *a
, const void *b
)
976 const scan_ds_t
*sds_a
= a
, *sds_b
= b
;
978 if (sds_a
->sds_dsobj
< sds_b
->sds_dsobj
)
980 if (sds_a
->sds_dsobj
== sds_b
->sds_dsobj
)
986 scan_ds_queue_clear(dsl_scan_t
*scn
)
990 while ((sds
= avl_destroy_nodes(&scn
->scn_queue
, &cookie
)) != NULL
) {
991 kmem_free(sds
, sizeof (*sds
));
996 scan_ds_queue_contains(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t *txg
)
998 scan_ds_t srch
, *sds
;
1000 srch
.sds_dsobj
= dsobj
;
1001 sds
= avl_find(&scn
->scn_queue
, &srch
, NULL
);
1002 if (sds
!= NULL
&& txg
!= NULL
)
1003 *txg
= sds
->sds_txg
;
1004 return (sds
!= NULL
);
1008 scan_ds_queue_insert(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t txg
)
1013 sds
= kmem_zalloc(sizeof (*sds
), KM_SLEEP
);
1014 sds
->sds_dsobj
= dsobj
;
1017 VERIFY3P(avl_find(&scn
->scn_queue
, sds
, &where
), ==, NULL
);
1018 avl_insert(&scn
->scn_queue
, sds
, where
);
1022 scan_ds_queue_remove(dsl_scan_t
*scn
, uint64_t dsobj
)
1024 scan_ds_t srch
, *sds
;
1026 srch
.sds_dsobj
= dsobj
;
1028 sds
= avl_find(&scn
->scn_queue
, &srch
, NULL
);
1029 VERIFY(sds
!= NULL
);
1030 avl_remove(&scn
->scn_queue
, sds
);
1031 kmem_free(sds
, sizeof (*sds
));
1035 scan_ds_queue_sync(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
1037 dsl_pool_t
*dp
= scn
->scn_dp
;
1038 spa_t
*spa
= dp
->dp_spa
;
1039 dmu_object_type_t ot
= (spa_version(spa
) >= SPA_VERSION_DSL_SCRUB
) ?
1040 DMU_OT_SCAN_QUEUE
: DMU_OT_ZAP_OTHER
;
1042 ASSERT0(scn
->scn_bytes_pending
);
1043 ASSERT(scn
->scn_phys
.scn_queue_obj
!= 0);
1045 VERIFY0(dmu_object_free(dp
->dp_meta_objset
,
1046 scn
->scn_phys
.scn_queue_obj
, tx
));
1047 scn
->scn_phys
.scn_queue_obj
= zap_create(dp
->dp_meta_objset
, ot
,
1048 DMU_OT_NONE
, 0, tx
);
1049 for (scan_ds_t
*sds
= avl_first(&scn
->scn_queue
);
1050 sds
!= NULL
; sds
= AVL_NEXT(&scn
->scn_queue
, sds
)) {
1051 VERIFY0(zap_add_int_key(dp
->dp_meta_objset
,
1052 scn
->scn_phys
.scn_queue_obj
, sds
->sds_dsobj
,
1058 * Computes the memory limit state that we're currently in. A sorted scan
1059 * needs quite a bit of memory to hold the sorting queue, so we need to
1060 * reasonably constrain the size so it doesn't impact overall system
1061 * performance. We compute two limits:
1062 * 1) Hard memory limit: if the amount of memory used by the sorting
1063 * queues on a pool gets above this value, we stop the metadata
1064 * scanning portion and start issuing the queued up and sorted
1065 * I/Os to reduce memory usage.
1066 * This limit is calculated as a fraction of physmem (by default 5%).
1067 * We constrain the lower bound of the hard limit to an absolute
1068 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
1069 * the upper bound to 5% of the total pool size - no chance we'll
1070 * ever need that much memory, but just to keep the value in check.
1071 * 2) Soft memory limit: once we hit the hard memory limit, we start
1072 * issuing I/O to reduce queue memory usage, but we don't want to
1073 * completely empty out the queues, since we might be able to find I/Os
1074 * that will fill in the gaps of our non-sequential IOs at some point
1075 * in the future. So we stop the issuing of I/Os once the amount of
1076 * memory used drops below the soft limit (at which point we stop issuing
1077 * I/O and start scanning metadata again).
1079 * This limit is calculated by subtracting a fraction of the hard
1080 * limit from the hard limit. By default this fraction is 5%, so
1081 * the soft limit is 95% of the hard limit. We cap the size of the
1082 * difference between the hard and soft limits at an absolute
1083 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
1084 * sufficient to not cause too frequent switching between the
1085 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
1086 * worth of queues is about 1.2 GiB of on-pool data, so scanning
1087 * that should take at least a decent fraction of a second).
1090 dsl_scan_should_clear(dsl_scan_t
*scn
)
1092 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
1093 uint64_t mlim_hard
, mlim_soft
, mused
;
1094 uint64_t alloc
= metaslab_class_get_alloc(spa_normal_class(
1095 scn
->scn_dp
->dp_spa
));
1097 mlim_hard
= MAX((physmem
/ zfs_scan_mem_lim_fact
) * PAGESIZE
,
1098 zfs_scan_mem_lim_min
);
1099 mlim_hard
= MIN(mlim_hard
, alloc
/ 20);
1100 mlim_soft
= mlim_hard
- MIN(mlim_hard
/ zfs_scan_mem_lim_soft_fact
,
1101 zfs_scan_mem_lim_soft_max
);
1103 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
1104 vdev_t
*tvd
= rvd
->vdev_child
[i
];
1105 dsl_scan_io_queue_t
*queue
;
1107 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
1108 queue
= tvd
->vdev_scan_io_queue
;
1109 if (queue
!= NULL
) {
1110 /* #extents in exts_by_size = # in exts_by_addr */
1111 mused
+= avl_numnodes(&queue
->q_exts_by_size
) *
1112 sizeof (range_seg_t
) +
1113 avl_numnodes(&queue
->q_sios_by_addr
) *
1116 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
1119 dprintf("current scan memory usage: %llu bytes\n", (longlong_t
)mused
);
1122 ASSERT0(scn
->scn_bytes_pending
);
1125 * If we are above our hard limit, we need to clear out memory.
1126 * If we are below our soft limit, we need to accumulate sequential IOs.
1127 * Otherwise, we should keep doing whatever we are currently doing.
1129 if (mused
>= mlim_hard
)
1131 else if (mused
< mlim_soft
)
1134 return (scn
->scn_clearing
);
1138 dsl_scan_check_suspend(dsl_scan_t
*scn
, const zbookmark_phys_t
*zb
)
1140 /* we never skip user/group accounting objects */
1141 if (zb
&& (int64_t)zb
->zb_object
< 0)
1144 if (scn
->scn_suspending
)
1145 return (B_TRUE
); /* we're already suspending */
1147 if (!ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
))
1148 return (B_FALSE
); /* we're resuming */
1150 /* We only know how to resume from level-0 blocks. */
1151 if (zb
&& zb
->zb_level
!= 0)
1156 * - we have scanned for at least the minimum time (default 1 sec
1157 * for scrub, 3 sec for resilver), and either we have sufficient
1158 * dirty data that we are starting to write more quickly
1159 * (default 30%), someone is explicitly waiting for this txg
1160 * to complete, or we have used up all of the time in the txg
1161 * timeout (default 5 sec).
1163 * - the spa is shutting down because this pool is being exported
1164 * or the machine is rebooting.
1166 * - the scan queue has reached its memory use limit
1168 uint64_t curr_time_ns
= gethrtime();
1169 uint64_t scan_time_ns
= curr_time_ns
- scn
->scn_sync_start_time
;
1170 uint64_t sync_time_ns
= curr_time_ns
-
1171 scn
->scn_dp
->dp_spa
->spa_sync_starttime
;
1172 int dirty_pct
= scn
->scn_dp
->dp_dirty_total
* 100 / zfs_dirty_data_max
;
1173 int mintime
= (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) ?
1174 zfs_resilver_min_time_ms
: zfs_scrub_min_time_ms
;
1176 if ((NSEC2MSEC(scan_time_ns
) > mintime
&&
1177 (dirty_pct
>= zfs_vdev_async_write_active_min_dirty_percent
||
1178 txg_sync_waiting(scn
->scn_dp
) ||
1179 NSEC2SEC(sync_time_ns
) >= zfs_txg_timeout
)) ||
1180 spa_shutting_down(scn
->scn_dp
->dp_spa
) ||
1181 (zfs_scan_strict_mem_lim
&& dsl_scan_should_clear(scn
))) {
1183 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
1184 (longlong_t
)zb
->zb_objset
,
1185 (longlong_t
)zb
->zb_object
,
1186 (longlong_t
)zb
->zb_level
,
1187 (longlong_t
)zb
->zb_blkid
);
1188 scn
->scn_phys
.scn_bookmark
= *zb
;
1191 dsl_scan_phys_t
*scnp
= &scn
->scn_phys
;
1192 dprintf("suspending at at DDT bookmark "
1193 "%llx/%llx/%llx/%llx\n",
1194 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_class
,
1195 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_type
,
1196 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_checksum
,
1197 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_cursor
);
1200 scn
->scn_suspending
= B_TRUE
;
1206 typedef struct zil_scan_arg
{
1208 zil_header_t
*zsa_zh
;
1213 dsl_scan_zil_block(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
1215 zil_scan_arg_t
*zsa
= arg
;
1216 dsl_pool_t
*dp
= zsa
->zsa_dp
;
1217 dsl_scan_t
*scn
= dp
->dp_scan
;
1218 zil_header_t
*zh
= zsa
->zsa_zh
;
1219 zbookmark_phys_t zb
;
1221 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
)
1225 * One block ("stubby") can be allocated a long time ago; we
1226 * want to visit that one because it has been allocated
1227 * (on-disk) even if it hasn't been claimed (even though for
1228 * scrub there's nothing to do to it).
1230 if (claim_txg
== 0 && bp
->blk_birth
>= spa_min_claim_txg(dp
->dp_spa
))
1233 SET_BOOKMARK(&zb
, zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
1234 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
, bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
1236 VERIFY(0 == scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, &zb
));
1242 dsl_scan_zil_record(zilog_t
*zilog
, lr_t
*lrc
, void *arg
, uint64_t claim_txg
)
1244 if (lrc
->lrc_txtype
== TX_WRITE
) {
1245 zil_scan_arg_t
*zsa
= arg
;
1246 dsl_pool_t
*dp
= zsa
->zsa_dp
;
1247 dsl_scan_t
*scn
= dp
->dp_scan
;
1248 zil_header_t
*zh
= zsa
->zsa_zh
;
1249 lr_write_t
*lr
= (lr_write_t
*)lrc
;
1250 blkptr_t
*bp
= &lr
->lr_blkptr
;
1251 zbookmark_phys_t zb
;
1253 if (BP_IS_HOLE(bp
) ||
1254 bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
)
1258 * birth can be < claim_txg if this record's txg is
1259 * already txg sync'ed (but this log block contains
1260 * other records that are not synced)
1262 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
1265 SET_BOOKMARK(&zb
, zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
1266 lr
->lr_foid
, ZB_ZIL_LEVEL
,
1267 lr
->lr_offset
/ BP_GET_LSIZE(bp
));
1269 VERIFY(0 == scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, &zb
));
1275 dsl_scan_zil(dsl_pool_t
*dp
, zil_header_t
*zh
)
1277 uint64_t claim_txg
= zh
->zh_claim_txg
;
1278 zil_scan_arg_t zsa
= { dp
, zh
};
1281 ASSERT(spa_writeable(dp
->dp_spa
));
1284 * We only want to visit blocks that have been claimed but not yet
1285 * replayed (or, in read-only mode, blocks that *would* be claimed).
1290 zilog
= zil_alloc(dp
->dp_meta_objset
, zh
);
1292 (void) zil_parse(zilog
, dsl_scan_zil_block
, dsl_scan_zil_record
, &zsa
,
1293 claim_txg
, B_FALSE
);
1299 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
1300 * here is to sort the AVL tree by the order each block will be needed.
1303 scan_prefetch_queue_compare(const void *a
, const void *b
)
1305 const scan_prefetch_issue_ctx_t
*spic_a
= a
, *spic_b
= b
;
1306 const scan_prefetch_ctx_t
*spc_a
= spic_a
->spic_spc
;
1307 const scan_prefetch_ctx_t
*spc_b
= spic_b
->spic_spc
;
1309 return (zbookmark_compare(spc_a
->spc_datablkszsec
,
1310 spc_a
->spc_indblkshift
, spc_b
->spc_datablkszsec
,
1311 spc_b
->spc_indblkshift
, &spic_a
->spic_zb
, &spic_b
->spic_zb
));
1315 scan_prefetch_ctx_rele(scan_prefetch_ctx_t
*spc
, void *tag
)
1317 if (zfs_refcount_remove(&spc
->spc_refcnt
, tag
) == 0) {
1318 zfs_refcount_destroy(&spc
->spc_refcnt
);
1319 kmem_free(spc
, sizeof (scan_prefetch_ctx_t
));
1323 static scan_prefetch_ctx_t
*
1324 scan_prefetch_ctx_create(dsl_scan_t
*scn
, dnode_phys_t
*dnp
, void *tag
)
1326 scan_prefetch_ctx_t
*spc
;
1328 spc
= kmem_alloc(sizeof (scan_prefetch_ctx_t
), KM_SLEEP
);
1329 zfs_refcount_create(&spc
->spc_refcnt
);
1330 zfs_refcount_add(&spc
->spc_refcnt
, tag
);
1333 spc
->spc_datablkszsec
= dnp
->dn_datablkszsec
;
1334 spc
->spc_indblkshift
= dnp
->dn_indblkshift
;
1335 spc
->spc_root
= B_FALSE
;
1337 spc
->spc_datablkszsec
= 0;
1338 spc
->spc_indblkshift
= 0;
1339 spc
->spc_root
= B_TRUE
;
1346 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t
*spc
, void *tag
)
1348 zfs_refcount_add(&spc
->spc_refcnt
, tag
);
1352 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t
*spc
,
1353 const zbookmark_phys_t
*zb
)
1355 zbookmark_phys_t
*last_zb
= &spc
->spc_scn
->scn_prefetch_bookmark
;
1356 dnode_phys_t tmp_dnp
;
1357 dnode_phys_t
*dnp
= (spc
->spc_root
) ? NULL
: &tmp_dnp
;
1359 if (zb
->zb_objset
!= last_zb
->zb_objset
)
1361 if ((int64_t)zb
->zb_object
< 0)
1364 tmp_dnp
.dn_datablkszsec
= spc
->spc_datablkszsec
;
1365 tmp_dnp
.dn_indblkshift
= spc
->spc_indblkshift
;
1367 if (zbookmark_subtree_completed(dnp
, zb
, last_zb
))
1374 dsl_scan_prefetch(scan_prefetch_ctx_t
*spc
, blkptr_t
*bp
, zbookmark_phys_t
*zb
)
1377 dsl_scan_t
*scn
= spc
->spc_scn
;
1378 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1379 scan_prefetch_issue_ctx_t
*spic
;
1381 if (zfs_no_scrub_prefetch
)
1384 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
||
1385 (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_DNODE
&&
1386 BP_GET_TYPE(bp
) != DMU_OT_OBJSET
))
1389 if (dsl_scan_check_prefetch_resume(spc
, zb
))
1392 scan_prefetch_ctx_add_ref(spc
, scn
);
1393 spic
= kmem_alloc(sizeof (scan_prefetch_issue_ctx_t
), KM_SLEEP
);
1394 spic
->spic_spc
= spc
;
1395 spic
->spic_bp
= *bp
;
1396 spic
->spic_zb
= *zb
;
1399 * Add the IO to the queue of blocks to prefetch. This allows us to
1400 * prioritize blocks that we will need first for the main traversal
1403 mutex_enter(&spa
->spa_scrub_lock
);
1404 if (avl_find(&scn
->scn_prefetch_queue
, spic
, &idx
) != NULL
) {
1405 /* this block is already queued for prefetch */
1406 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1407 scan_prefetch_ctx_rele(spc
, scn
);
1408 mutex_exit(&spa
->spa_scrub_lock
);
1412 avl_insert(&scn
->scn_prefetch_queue
, spic
, idx
);
1413 cv_broadcast(&spa
->spa_scrub_io_cv
);
1414 mutex_exit(&spa
->spa_scrub_lock
);
1418 dsl_scan_prefetch_dnode(dsl_scan_t
*scn
, dnode_phys_t
*dnp
,
1419 uint64_t objset
, uint64_t object
)
1422 zbookmark_phys_t zb
;
1423 scan_prefetch_ctx_t
*spc
;
1425 if (dnp
->dn_nblkptr
== 0 && !(dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
))
1428 SET_BOOKMARK(&zb
, objset
, object
, 0, 0);
1430 spc
= scan_prefetch_ctx_create(scn
, dnp
, FTAG
);
1432 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
1433 zb
.zb_level
= BP_GET_LEVEL(&dnp
->dn_blkptr
[i
]);
1435 dsl_scan_prefetch(spc
, &dnp
->dn_blkptr
[i
], &zb
);
1438 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1440 zb
.zb_blkid
= DMU_SPILL_BLKID
;
1441 dsl_scan_prefetch(spc
, DN_SPILL_BLKPTR(dnp
), &zb
);
1444 scan_prefetch_ctx_rele(spc
, FTAG
);
1448 dsl_scan_prefetch_cb(zio_t
*zio
, const zbookmark_phys_t
*zb
, const blkptr_t
*bp
,
1449 arc_buf_t
*buf
, void *private)
1451 scan_prefetch_ctx_t
*spc
= private;
1452 dsl_scan_t
*scn
= spc
->spc_scn
;
1453 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1455 /* broadcast that the IO has completed for rate limiting purposes */
1456 mutex_enter(&spa
->spa_scrub_lock
);
1457 ASSERT3U(spa
->spa_scrub_inflight
, >=, BP_GET_PSIZE(bp
));
1458 spa
->spa_scrub_inflight
-= BP_GET_PSIZE(bp
);
1459 cv_broadcast(&spa
->spa_scrub_io_cv
);
1460 mutex_exit(&spa
->spa_scrub_lock
);
1462 /* if there was an error or we are done prefetching, just cleanup */
1463 if (buf
== NULL
|| scn
->scn_prefetch_stop
)
1466 if (BP_GET_LEVEL(bp
) > 0) {
1469 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
1470 zbookmark_phys_t czb
;
1472 for (i
= 0, cbp
= buf
->b_data
; i
< epb
; i
++, cbp
++) {
1473 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
1474 zb
->zb_level
- 1, zb
->zb_blkid
* epb
+ i
);
1475 dsl_scan_prefetch(spc
, cbp
, &czb
);
1477 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
1480 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
1482 for (i
= 0, cdnp
= buf
->b_data
; i
< epb
;
1483 i
+= cdnp
->dn_extra_slots
+ 1,
1484 cdnp
+= cdnp
->dn_extra_slots
+ 1) {
1485 dsl_scan_prefetch_dnode(scn
, cdnp
,
1486 zb
->zb_objset
, zb
->zb_blkid
* epb
+ i
);
1488 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
1489 objset_phys_t
*osp
= buf
->b_data
;
1491 dsl_scan_prefetch_dnode(scn
, &osp
->os_meta_dnode
,
1492 zb
->zb_objset
, DMU_META_DNODE_OBJECT
);
1494 if (OBJSET_BUF_HAS_USERUSED(buf
)) {
1495 dsl_scan_prefetch_dnode(scn
,
1496 &osp
->os_groupused_dnode
, zb
->zb_objset
,
1497 DMU_GROUPUSED_OBJECT
);
1498 dsl_scan_prefetch_dnode(scn
,
1499 &osp
->os_userused_dnode
, zb
->zb_objset
,
1500 DMU_USERUSED_OBJECT
);
1506 arc_buf_destroy(buf
, private);
1507 scan_prefetch_ctx_rele(spc
, scn
);
1512 dsl_scan_prefetch_thread(void *arg
)
1514 dsl_scan_t
*scn
= arg
;
1515 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1516 scan_prefetch_issue_ctx_t
*spic
;
1518 /* loop until we are told to stop */
1519 while (!scn
->scn_prefetch_stop
) {
1520 arc_flags_t flags
= ARC_FLAG_NOWAIT
|
1521 ARC_FLAG_PRESCIENT_PREFETCH
| ARC_FLAG_PREFETCH
;
1522 int zio_flags
= ZIO_FLAG_CANFAIL
| ZIO_FLAG_SCAN_THREAD
;
1524 mutex_enter(&spa
->spa_scrub_lock
);
1527 * Wait until we have an IO to issue and are not above our
1528 * maximum in flight limit.
1530 while (!scn
->scn_prefetch_stop
&&
1531 (avl_numnodes(&scn
->scn_prefetch_queue
) == 0 ||
1532 spa
->spa_scrub_inflight
>= scn
->scn_maxinflight_bytes
)) {
1533 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
1536 /* recheck if we should stop since we waited for the cv */
1537 if (scn
->scn_prefetch_stop
) {
1538 mutex_exit(&spa
->spa_scrub_lock
);
1542 /* remove the prefetch IO from the tree */
1543 spic
= avl_first(&scn
->scn_prefetch_queue
);
1544 spa
->spa_scrub_inflight
+= BP_GET_PSIZE(&spic
->spic_bp
);
1545 avl_remove(&scn
->scn_prefetch_queue
, spic
);
1547 mutex_exit(&spa
->spa_scrub_lock
);
1549 if (BP_IS_PROTECTED(&spic
->spic_bp
)) {
1550 ASSERT(BP_GET_TYPE(&spic
->spic_bp
) == DMU_OT_DNODE
||
1551 BP_GET_TYPE(&spic
->spic_bp
) == DMU_OT_OBJSET
);
1552 ASSERT3U(BP_GET_LEVEL(&spic
->spic_bp
), ==, 0);
1553 zio_flags
|= ZIO_FLAG_RAW
;
1556 /* issue the prefetch asynchronously */
1557 (void) arc_read(scn
->scn_zio_root
, scn
->scn_dp
->dp_spa
,
1558 &spic
->spic_bp
, dsl_scan_prefetch_cb
, spic
->spic_spc
,
1559 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, &spic
->spic_zb
);
1561 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1564 ASSERT(scn
->scn_prefetch_stop
);
1566 /* free any prefetches we didn't get to complete */
1567 mutex_enter(&spa
->spa_scrub_lock
);
1568 while ((spic
= avl_first(&scn
->scn_prefetch_queue
)) != NULL
) {
1569 avl_remove(&scn
->scn_prefetch_queue
, spic
);
1570 scan_prefetch_ctx_rele(spic
->spic_spc
, scn
);
1571 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1573 ASSERT0(avl_numnodes(&scn
->scn_prefetch_queue
));
1574 mutex_exit(&spa
->spa_scrub_lock
);
1578 dsl_scan_check_resume(dsl_scan_t
*scn
, const dnode_phys_t
*dnp
,
1579 const zbookmark_phys_t
*zb
)
1582 * We never skip over user/group accounting objects (obj<0)
1584 if (!ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
) &&
1585 (int64_t)zb
->zb_object
>= 0) {
1587 * If we already visited this bp & everything below (in
1588 * a prior txg sync), don't bother doing it again.
1590 if (zbookmark_subtree_completed(dnp
, zb
,
1591 &scn
->scn_phys
.scn_bookmark
))
1595 * If we found the block we're trying to resume from, or
1596 * we went past it to a different object, zero it out to
1597 * indicate that it's OK to start checking for suspending
1600 if (bcmp(zb
, &scn
->scn_phys
.scn_bookmark
, sizeof (*zb
)) == 0 ||
1601 zb
->zb_object
> scn
->scn_phys
.scn_bookmark
.zb_object
) {
1602 dprintf("resuming at %llx/%llx/%llx/%llx\n",
1603 (longlong_t
)zb
->zb_objset
,
1604 (longlong_t
)zb
->zb_object
,
1605 (longlong_t
)zb
->zb_level
,
1606 (longlong_t
)zb
->zb_blkid
);
1607 bzero(&scn
->scn_phys
.scn_bookmark
, sizeof (*zb
));
1613 static void dsl_scan_visitbp(blkptr_t
*bp
, const zbookmark_phys_t
*zb
,
1614 dnode_phys_t
*dnp
, dsl_dataset_t
*ds
, dsl_scan_t
*scn
,
1615 dmu_objset_type_t ostype
, dmu_tx_t
*tx
);
1616 inline __attribute__((always_inline
)) static void dsl_scan_visitdnode(
1617 dsl_scan_t
*, dsl_dataset_t
*ds
, dmu_objset_type_t ostype
,
1618 dnode_phys_t
*dnp
, uint64_t object
, dmu_tx_t
*tx
);
1621 * Return nonzero on i/o error.
1622 * Return new buf to write out in *bufp.
1624 inline __attribute__((always_inline
)) static int
1625 dsl_scan_recurse(dsl_scan_t
*scn
, dsl_dataset_t
*ds
, dmu_objset_type_t ostype
,
1626 dnode_phys_t
*dnp
, const blkptr_t
*bp
,
1627 const zbookmark_phys_t
*zb
, dmu_tx_t
*tx
)
1629 dsl_pool_t
*dp
= scn
->scn_dp
;
1630 int zio_flags
= ZIO_FLAG_CANFAIL
| ZIO_FLAG_SCAN_THREAD
;
1633 if (BP_GET_LEVEL(bp
) > 0) {
1634 arc_flags_t flags
= ARC_FLAG_WAIT
;
1637 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
1640 err
= arc_read(NULL
, dp
->dp_spa
, bp
, arc_getbuf_func
, &buf
,
1641 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
1643 scn
->scn_phys
.scn_errors
++;
1646 for (i
= 0, cbp
= buf
->b_data
; i
< epb
; i
++, cbp
++) {
1647 zbookmark_phys_t czb
;
1649 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
1651 zb
->zb_blkid
* epb
+ i
);
1652 dsl_scan_visitbp(cbp
, &czb
, dnp
,
1653 ds
, scn
, ostype
, tx
);
1655 arc_buf_destroy(buf
, &buf
);
1656 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
1657 arc_flags_t flags
= ARC_FLAG_WAIT
;
1660 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
1663 if (BP_IS_PROTECTED(bp
)) {
1664 ASSERT3U(BP_GET_COMPRESS(bp
), ==, ZIO_COMPRESS_OFF
);
1665 zio_flags
|= ZIO_FLAG_RAW
;
1668 err
= arc_read(NULL
, dp
->dp_spa
, bp
, arc_getbuf_func
, &buf
,
1669 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
1671 scn
->scn_phys
.scn_errors
++;
1674 for (i
= 0, cdnp
= buf
->b_data
; i
< epb
;
1675 i
+= cdnp
->dn_extra_slots
+ 1,
1676 cdnp
+= cdnp
->dn_extra_slots
+ 1) {
1677 dsl_scan_visitdnode(scn
, ds
, ostype
,
1678 cdnp
, zb
->zb_blkid
* epb
+ i
, tx
);
1681 arc_buf_destroy(buf
, &buf
);
1682 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
1683 arc_flags_t flags
= ARC_FLAG_WAIT
;
1687 err
= arc_read(NULL
, dp
->dp_spa
, bp
, arc_getbuf_func
, &buf
,
1688 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
1690 scn
->scn_phys
.scn_errors
++;
1696 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1697 &osp
->os_meta_dnode
, DMU_META_DNODE_OBJECT
, tx
);
1699 if (OBJSET_BUF_HAS_USERUSED(buf
)) {
1701 * We also always visit user/group/project accounting
1702 * objects, and never skip them, even if we are
1703 * suspending. This is necessary so that the
1704 * space deltas from this txg get integrated.
1706 if (OBJSET_BUF_HAS_PROJECTUSED(buf
))
1707 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1708 &osp
->os_projectused_dnode
,
1709 DMU_PROJECTUSED_OBJECT
, tx
);
1710 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1711 &osp
->os_groupused_dnode
,
1712 DMU_GROUPUSED_OBJECT
, tx
);
1713 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1714 &osp
->os_userused_dnode
,
1715 DMU_USERUSED_OBJECT
, tx
);
1717 arc_buf_destroy(buf
, &buf
);
1723 inline __attribute__((always_inline
)) static void
1724 dsl_scan_visitdnode(dsl_scan_t
*scn
, dsl_dataset_t
*ds
,
1725 dmu_objset_type_t ostype
, dnode_phys_t
*dnp
,
1726 uint64_t object
, dmu_tx_t
*tx
)
1730 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
1731 zbookmark_phys_t czb
;
1733 SET_BOOKMARK(&czb
, ds
? ds
->ds_object
: 0, object
,
1734 dnp
->dn_nlevels
- 1, j
);
1735 dsl_scan_visitbp(&dnp
->dn_blkptr
[j
],
1736 &czb
, dnp
, ds
, scn
, ostype
, tx
);
1739 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1740 zbookmark_phys_t czb
;
1741 SET_BOOKMARK(&czb
, ds
? ds
->ds_object
: 0, object
,
1742 0, DMU_SPILL_BLKID
);
1743 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp
),
1744 &czb
, dnp
, ds
, scn
, ostype
, tx
);
1749 * The arguments are in this order because mdb can only print the
1750 * first 5; we want them to be useful.
1753 dsl_scan_visitbp(blkptr_t
*bp
, const zbookmark_phys_t
*zb
,
1754 dnode_phys_t
*dnp
, dsl_dataset_t
*ds
, dsl_scan_t
*scn
,
1755 dmu_objset_type_t ostype
, dmu_tx_t
*tx
)
1757 dsl_pool_t
*dp
= scn
->scn_dp
;
1758 blkptr_t
*bp_toread
= NULL
;
1760 if (dsl_scan_check_suspend(scn
, zb
))
1763 if (dsl_scan_check_resume(scn
, dnp
, zb
))
1766 scn
->scn_visited_this_txg
++;
1769 * This debugging is commented out to conserve stack space. This
1770 * function is called recursively and the debugging addes several
1771 * bytes to the stack for each call. It can be commented back in
1772 * if required to debug an issue in dsl_scan_visitbp().
1775 * "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
1776 * ds, ds ? ds->ds_object : 0,
1777 * zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
1781 if (BP_IS_HOLE(bp
)) {
1782 scn
->scn_holes_this_txg
++;
1786 if (bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
) {
1787 scn
->scn_lt_min_this_txg
++;
1791 bp_toread
= kmem_alloc(sizeof (blkptr_t
), KM_SLEEP
);
1794 if (dsl_scan_recurse(scn
, ds
, ostype
, dnp
, bp_toread
, zb
, tx
) != 0)
1798 * If dsl_scan_ddt() has already visited this block, it will have
1799 * already done any translations or scrubbing, so don't call the
1802 if (ddt_class_contains(dp
->dp_spa
,
1803 scn
->scn_phys
.scn_ddt_class_max
, bp
)) {
1804 scn
->scn_ddt_contained_this_txg
++;
1809 * If this block is from the future (after cur_max_txg), then we
1810 * are doing this on behalf of a deleted snapshot, and we will
1811 * revisit the future block on the next pass of this dataset.
1812 * Don't scan it now unless we need to because something
1813 * under it was modified.
1815 if (BP_PHYSICAL_BIRTH(bp
) > scn
->scn_phys
.scn_cur_max_txg
) {
1816 scn
->scn_gt_max_this_txg
++;
1820 scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, zb
);
1823 kmem_free(bp_toread
, sizeof (blkptr_t
));
1827 dsl_scan_visit_rootbp(dsl_scan_t
*scn
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
1830 zbookmark_phys_t zb
;
1831 scan_prefetch_ctx_t
*spc
;
1833 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
1834 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1836 if (ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
)) {
1837 SET_BOOKMARK(&scn
->scn_prefetch_bookmark
,
1838 zb
.zb_objset
, 0, 0, 0);
1840 scn
->scn_prefetch_bookmark
= scn
->scn_phys
.scn_bookmark
;
1843 scn
->scn_objsets_visited_this_txg
++;
1845 spc
= scan_prefetch_ctx_create(scn
, NULL
, FTAG
);
1846 dsl_scan_prefetch(spc
, bp
, &zb
);
1847 scan_prefetch_ctx_rele(spc
, FTAG
);
1849 dsl_scan_visitbp(bp
, &zb
, NULL
, ds
, scn
, DMU_OST_NONE
, tx
);
1851 dprintf_ds(ds
, "finished scan%s", "");
1855 ds_destroyed_scn_phys(dsl_dataset_t
*ds
, dsl_scan_phys_t
*scn_phys
)
1857 if (scn_phys
->scn_bookmark
.zb_objset
== ds
->ds_object
) {
1858 if (ds
->ds_is_snapshot
) {
1861 * - scn_cur_{min,max}_txg stays the same.
1862 * - Setting the flag is not really necessary if
1863 * scn_cur_max_txg == scn_max_txg, because there
1864 * is nothing after this snapshot that we care
1865 * about. However, we set it anyway and then
1866 * ignore it when we retraverse it in
1867 * dsl_scan_visitds().
1869 scn_phys
->scn_bookmark
.zb_objset
=
1870 dsl_dataset_phys(ds
)->ds_next_snap_obj
;
1871 zfs_dbgmsg("destroying ds %llu; currently traversing; "
1872 "reset zb_objset to %llu",
1873 (u_longlong_t
)ds
->ds_object
,
1874 (u_longlong_t
)dsl_dataset_phys(ds
)->
1876 scn_phys
->scn_flags
|= DSF_VISIT_DS_AGAIN
;
1878 SET_BOOKMARK(&scn_phys
->scn_bookmark
,
1879 ZB_DESTROYED_OBJSET
, 0, 0, 0);
1880 zfs_dbgmsg("destroying ds %llu; currently traversing; "
1881 "reset bookmark to -1,0,0,0",
1882 (u_longlong_t
)ds
->ds_object
);
1888 * Invoked when a dataset is destroyed. We need to make sure that:
1890 * 1) If it is the dataset that was currently being scanned, we write
1891 * a new dsl_scan_phys_t and marking the objset reference in it
1893 * 2) Remove it from the work queue, if it was present.
1895 * If the dataset was actually a snapshot, instead of marking the dataset
1896 * as destroyed, we instead substitute the next snapshot in line.
1899 dsl_scan_ds_destroyed(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1901 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1902 dsl_scan_t
*scn
= dp
->dp_scan
;
1905 if (!dsl_scan_is_running(scn
))
1908 ds_destroyed_scn_phys(ds
, &scn
->scn_phys
);
1909 ds_destroyed_scn_phys(ds
, &scn
->scn_phys_cached
);
1911 if (scan_ds_queue_contains(scn
, ds
->ds_object
, &mintxg
)) {
1912 scan_ds_queue_remove(scn
, ds
->ds_object
);
1913 if (ds
->ds_is_snapshot
)
1914 scan_ds_queue_insert(scn
,
1915 dsl_dataset_phys(ds
)->ds_next_snap_obj
, mintxg
);
1918 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
1919 ds
->ds_object
, &mintxg
) == 0) {
1920 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
1921 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
1922 scn
->scn_phys
.scn_queue_obj
, ds
->ds_object
, tx
));
1923 if (ds
->ds_is_snapshot
) {
1925 * We keep the same mintxg; it could be >
1926 * ds_creation_txg if the previous snapshot was
1929 VERIFY(zap_add_int_key(dp
->dp_meta_objset
,
1930 scn
->scn_phys
.scn_queue_obj
,
1931 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
1933 zfs_dbgmsg("destroying ds %llu; in queue; "
1934 "replacing with %llu",
1935 (u_longlong_t
)ds
->ds_object
,
1936 (u_longlong_t
)dsl_dataset_phys(ds
)->
1939 zfs_dbgmsg("destroying ds %llu; in queue; removing",
1940 (u_longlong_t
)ds
->ds_object
);
1945 * dsl_scan_sync() should be called after this, and should sync
1946 * out our changed state, but just to be safe, do it here.
1948 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
1952 ds_snapshotted_bookmark(dsl_dataset_t
*ds
, zbookmark_phys_t
*scn_bookmark
)
1954 if (scn_bookmark
->zb_objset
== ds
->ds_object
) {
1955 scn_bookmark
->zb_objset
=
1956 dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1957 zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
1958 "reset zb_objset to %llu",
1959 (u_longlong_t
)ds
->ds_object
,
1960 (u_longlong_t
)dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
1965 * Called when a dataset is snapshotted. If we were currently traversing
1966 * this snapshot, we reset our bookmark to point at the newly created
1967 * snapshot. We also modify our work queue to remove the old snapshot and
1968 * replace with the new one.
1971 dsl_scan_ds_snapshotted(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1973 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1974 dsl_scan_t
*scn
= dp
->dp_scan
;
1977 if (!dsl_scan_is_running(scn
))
1980 ASSERT(dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0);
1982 ds_snapshotted_bookmark(ds
, &scn
->scn_phys
.scn_bookmark
);
1983 ds_snapshotted_bookmark(ds
, &scn
->scn_phys_cached
.scn_bookmark
);
1985 if (scan_ds_queue_contains(scn
, ds
->ds_object
, &mintxg
)) {
1986 scan_ds_queue_remove(scn
, ds
->ds_object
);
1987 scan_ds_queue_insert(scn
,
1988 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, mintxg
);
1991 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
1992 ds
->ds_object
, &mintxg
) == 0) {
1993 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
1994 scn
->scn_phys
.scn_queue_obj
, ds
->ds_object
, tx
));
1995 VERIFY(zap_add_int_key(dp
->dp_meta_objset
,
1996 scn
->scn_phys
.scn_queue_obj
,
1997 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, mintxg
, tx
) == 0);
1998 zfs_dbgmsg("snapshotting ds %llu; in queue; "
1999 "replacing with %llu",
2000 (u_longlong_t
)ds
->ds_object
,
2001 (u_longlong_t
)dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
2004 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2008 ds_clone_swapped_bookmark(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
,
2009 zbookmark_phys_t
*scn_bookmark
)
2011 if (scn_bookmark
->zb_objset
== ds1
->ds_object
) {
2012 scn_bookmark
->zb_objset
= ds2
->ds_object
;
2013 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2014 "reset zb_objset to %llu",
2015 (u_longlong_t
)ds1
->ds_object
,
2016 (u_longlong_t
)ds2
->ds_object
);
2017 } else if (scn_bookmark
->zb_objset
== ds2
->ds_object
) {
2018 scn_bookmark
->zb_objset
= ds1
->ds_object
;
2019 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2020 "reset zb_objset to %llu",
2021 (u_longlong_t
)ds2
->ds_object
,
2022 (u_longlong_t
)ds1
->ds_object
);
2027 * Called when a parent dataset and its clone are swapped. If we were
2028 * currently traversing the dataset, we need to switch to traversing the
2029 * newly promoted parent.
2032 dsl_scan_ds_clone_swapped(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
, dmu_tx_t
*tx
)
2034 dsl_pool_t
*dp
= ds1
->ds_dir
->dd_pool
;
2035 dsl_scan_t
*scn
= dp
->dp_scan
;
2038 if (!dsl_scan_is_running(scn
))
2041 ds_clone_swapped_bookmark(ds1
, ds2
, &scn
->scn_phys
.scn_bookmark
);
2042 ds_clone_swapped_bookmark(ds1
, ds2
, &scn
->scn_phys_cached
.scn_bookmark
);
2044 if (scan_ds_queue_contains(scn
, ds1
->ds_object
, &mintxg
)) {
2045 scan_ds_queue_remove(scn
, ds1
->ds_object
);
2046 scan_ds_queue_insert(scn
, ds2
->ds_object
, mintxg
);
2048 if (scan_ds_queue_contains(scn
, ds2
->ds_object
, &mintxg
)) {
2049 scan_ds_queue_remove(scn
, ds2
->ds_object
);
2050 scan_ds_queue_insert(scn
, ds1
->ds_object
, mintxg
);
2053 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
2054 ds1
->ds_object
, &mintxg
) == 0) {
2056 ASSERT3U(mintxg
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2057 ASSERT3U(mintxg
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2058 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2059 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, tx
));
2060 err
= zap_add_int_key(dp
->dp_meta_objset
,
2061 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, mintxg
, tx
);
2062 VERIFY(err
== 0 || err
== EEXIST
);
2063 if (err
== EEXIST
) {
2064 /* Both were there to begin with */
2065 VERIFY(0 == zap_add_int_key(dp
->dp_meta_objset
,
2066 scn
->scn_phys
.scn_queue_obj
,
2067 ds1
->ds_object
, mintxg
, tx
));
2069 zfs_dbgmsg("clone_swap ds %llu; in queue; "
2070 "replacing with %llu",
2071 (u_longlong_t
)ds1
->ds_object
,
2072 (u_longlong_t
)ds2
->ds_object
);
2074 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
2075 ds2
->ds_object
, &mintxg
) == 0) {
2076 ASSERT3U(mintxg
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2077 ASSERT3U(mintxg
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2078 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2079 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, tx
));
2080 VERIFY(0 == zap_add_int_key(dp
->dp_meta_objset
,
2081 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, mintxg
, tx
));
2082 zfs_dbgmsg("clone_swap ds %llu; in queue; "
2083 "replacing with %llu",
2084 (u_longlong_t
)ds2
->ds_object
,
2085 (u_longlong_t
)ds1
->ds_object
);
2088 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2093 enqueue_clones_cb(dsl_pool_t
*dp
, dsl_dataset_t
*hds
, void *arg
)
2095 uint64_t originobj
= *(uint64_t *)arg
;
2098 dsl_scan_t
*scn
= dp
->dp_scan
;
2100 if (dsl_dir_phys(hds
->ds_dir
)->dd_origin_obj
!= originobj
)
2103 err
= dsl_dataset_hold_obj(dp
, hds
->ds_object
, FTAG
, &ds
);
2107 while (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= originobj
) {
2108 dsl_dataset_t
*prev
;
2109 err
= dsl_dataset_hold_obj(dp
,
2110 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2112 dsl_dataset_rele(ds
, FTAG
);
2117 scan_ds_queue_insert(scn
, ds
->ds_object
,
2118 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2119 dsl_dataset_rele(ds
, FTAG
);
2124 dsl_scan_visitds(dsl_scan_t
*scn
, uint64_t dsobj
, dmu_tx_t
*tx
)
2126 dsl_pool_t
*dp
= scn
->scn_dp
;
2129 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
2131 if (scn
->scn_phys
.scn_cur_min_txg
>=
2132 scn
->scn_phys
.scn_max_txg
) {
2134 * This can happen if this snapshot was created after the
2135 * scan started, and we already completed a previous snapshot
2136 * that was created after the scan started. This snapshot
2137 * only references blocks with:
2139 * birth < our ds_creation_txg
2140 * cur_min_txg is no less than ds_creation_txg.
2141 * We have already visited these blocks.
2143 * birth > scn_max_txg
2144 * The scan requested not to visit these blocks.
2146 * Subsequent snapshots (and clones) can reference our
2147 * blocks, or blocks with even higher birth times.
2148 * Therefore we do not need to visit them either,
2149 * so we do not add them to the work queue.
2151 * Note that checking for cur_min_txg >= cur_max_txg
2152 * is not sufficient, because in that case we may need to
2153 * visit subsequent snapshots. This happens when min_txg > 0,
2154 * which raises cur_min_txg. In this case we will visit
2155 * this dataset but skip all of its blocks, because the
2156 * rootbp's birth time is < cur_min_txg. Then we will
2157 * add the next snapshots/clones to the work queue.
2159 char *dsname
= kmem_alloc(ZFS_MAX_DATASET_NAME_LEN
, KM_SLEEP
);
2160 dsl_dataset_name(ds
, dsname
);
2161 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
2162 "cur_min_txg (%llu) >= max_txg (%llu)",
2163 (longlong_t
)dsobj
, dsname
,
2164 (longlong_t
)scn
->scn_phys
.scn_cur_min_txg
,
2165 (longlong_t
)scn
->scn_phys
.scn_max_txg
);
2166 kmem_free(dsname
, MAXNAMELEN
);
2172 * Only the ZIL in the head (non-snapshot) is valid. Even though
2173 * snapshots can have ZIL block pointers (which may be the same
2174 * BP as in the head), they must be ignored. In addition, $ORIGIN
2175 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
2176 * need to look for a ZIL in it either. So we traverse the ZIL here,
2177 * rather than in scan_recurse(), because the regular snapshot
2178 * block-sharing rules don't apply to it.
2180 if (!dsl_dataset_is_snapshot(ds
) &&
2181 (dp
->dp_origin_snap
== NULL
||
2182 ds
->ds_dir
!= dp
->dp_origin_snap
->ds_dir
)) {
2184 if (dmu_objset_from_ds(ds
, &os
) != 0) {
2187 dsl_scan_zil(dp
, &os
->os_zil_header
);
2191 * Iterate over the bps in this ds.
2193 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2194 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2195 dsl_scan_visit_rootbp(scn
, ds
, &dsl_dataset_phys(ds
)->ds_bp
, tx
);
2196 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2198 char *dsname
= kmem_alloc(ZFS_MAX_DATASET_NAME_LEN
, KM_SLEEP
);
2199 dsl_dataset_name(ds
, dsname
);
2200 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
2202 (longlong_t
)dsobj
, dsname
,
2203 (longlong_t
)scn
->scn_phys
.scn_cur_min_txg
,
2204 (longlong_t
)scn
->scn_phys
.scn_cur_max_txg
,
2205 (int)scn
->scn_suspending
);
2206 kmem_free(dsname
, ZFS_MAX_DATASET_NAME_LEN
);
2208 if (scn
->scn_suspending
)
2212 * We've finished this pass over this dataset.
2216 * If we did not completely visit this dataset, do another pass.
2218 if (scn
->scn_phys
.scn_flags
& DSF_VISIT_DS_AGAIN
) {
2219 zfs_dbgmsg("incomplete pass; visiting again");
2220 scn
->scn_phys
.scn_flags
&= ~DSF_VISIT_DS_AGAIN
;
2221 scan_ds_queue_insert(scn
, ds
->ds_object
,
2222 scn
->scn_phys
.scn_cur_max_txg
);
2227 * Add descendant datasets to work queue.
2229 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0) {
2230 scan_ds_queue_insert(scn
,
2231 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
2232 dsl_dataset_phys(ds
)->ds_creation_txg
);
2234 if (dsl_dataset_phys(ds
)->ds_num_children
> 1) {
2235 boolean_t usenext
= B_FALSE
;
2236 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
2239 * A bug in a previous version of the code could
2240 * cause upgrade_clones_cb() to not set
2241 * ds_next_snap_obj when it should, leading to a
2242 * missing entry. Therefore we can only use the
2243 * next_clones_obj when its count is correct.
2245 int err
= zap_count(dp
->dp_meta_objset
,
2246 dsl_dataset_phys(ds
)->ds_next_clones_obj
, &count
);
2248 count
== dsl_dataset_phys(ds
)->ds_num_children
- 1)
2255 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2256 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
2257 zap_cursor_retrieve(&zc
, &za
) == 0;
2258 (void) zap_cursor_advance(&zc
)) {
2259 scan_ds_queue_insert(scn
,
2260 zfs_strtonum(za
.za_name
, NULL
),
2261 dsl_dataset_phys(ds
)->ds_creation_txg
);
2263 zap_cursor_fini(&zc
);
2265 VERIFY0(dmu_objset_find_dp(dp
, dp
->dp_root_dir_obj
,
2266 enqueue_clones_cb
, &ds
->ds_object
,
2272 dsl_dataset_rele(ds
, FTAG
);
2277 enqueue_cb(dsl_pool_t
*dp
, dsl_dataset_t
*hds
, void *arg
)
2281 dsl_scan_t
*scn
= dp
->dp_scan
;
2283 err
= dsl_dataset_hold_obj(dp
, hds
->ds_object
, FTAG
, &ds
);
2287 while (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
2288 dsl_dataset_t
*prev
;
2289 err
= dsl_dataset_hold_obj(dp
,
2290 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2292 dsl_dataset_rele(ds
, FTAG
);
2297 * If this is a clone, we don't need to worry about it for now.
2299 if (dsl_dataset_phys(prev
)->ds_next_snap_obj
!= ds
->ds_object
) {
2300 dsl_dataset_rele(ds
, FTAG
);
2301 dsl_dataset_rele(prev
, FTAG
);
2304 dsl_dataset_rele(ds
, FTAG
);
2308 scan_ds_queue_insert(scn
, ds
->ds_object
,
2309 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2310 dsl_dataset_rele(ds
, FTAG
);
2316 dsl_scan_ddt_entry(dsl_scan_t
*scn
, enum zio_checksum checksum
,
2317 ddt_entry_t
*dde
, dmu_tx_t
*tx
)
2319 const ddt_key_t
*ddk
= &dde
->dde_key
;
2320 ddt_phys_t
*ddp
= dde
->dde_phys
;
2322 zbookmark_phys_t zb
= { 0 };
2325 if (!dsl_scan_is_running(scn
))
2328 for (p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
2329 if (ddp
->ddp_phys_birth
== 0 ||
2330 ddp
->ddp_phys_birth
> scn
->scn_phys
.scn_max_txg
)
2332 ddt_bp_create(checksum
, ddk
, ddp
, &bp
);
2334 scn
->scn_visited_this_txg
++;
2335 scan_funcs
[scn
->scn_phys
.scn_func
](scn
->scn_dp
, &bp
, &zb
);
2340 * Scrub/dedup interaction.
2342 * If there are N references to a deduped block, we don't want to scrub it
2343 * N times -- ideally, we should scrub it exactly once.
2345 * We leverage the fact that the dde's replication class (enum ddt_class)
2346 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
2347 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
2349 * To prevent excess scrubbing, the scrub begins by walking the DDT
2350 * to find all blocks with refcnt > 1, and scrubs each of these once.
2351 * Since there are two replication classes which contain blocks with
2352 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
2353 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
2355 * There would be nothing more to say if a block's refcnt couldn't change
2356 * during a scrub, but of course it can so we must account for changes
2357 * in a block's replication class.
2359 * Here's an example of what can occur:
2361 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
2362 * when visited during the top-down scrub phase, it will be scrubbed twice.
2363 * This negates our scrub optimization, but is otherwise harmless.
2365 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
2366 * on each visit during the top-down scrub phase, it will never be scrubbed.
2367 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
2368 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
2369 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
2370 * while a scrub is in progress, it scrubs the block right then.
2373 dsl_scan_ddt(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
2375 ddt_bookmark_t
*ddb
= &scn
->scn_phys
.scn_ddt_bookmark
;
2380 bzero(&dde
, sizeof (ddt_entry_t
));
2382 while ((error
= ddt_walk(scn
->scn_dp
->dp_spa
, ddb
, &dde
)) == 0) {
2385 if (ddb
->ddb_class
> scn
->scn_phys
.scn_ddt_class_max
)
2387 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
2388 (longlong_t
)ddb
->ddb_class
,
2389 (longlong_t
)ddb
->ddb_type
,
2390 (longlong_t
)ddb
->ddb_checksum
,
2391 (longlong_t
)ddb
->ddb_cursor
);
2393 /* There should be no pending changes to the dedup table */
2394 ddt
= scn
->scn_dp
->dp_spa
->spa_ddt
[ddb
->ddb_checksum
];
2395 ASSERT(avl_first(&ddt
->ddt_tree
) == NULL
);
2397 dsl_scan_ddt_entry(scn
, ddb
->ddb_checksum
, &dde
, tx
);
2400 if (dsl_scan_check_suspend(scn
, NULL
))
2404 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; "
2405 "suspending=%u", (longlong_t
)n
,
2406 (int)scn
->scn_phys
.scn_ddt_class_max
, (int)scn
->scn_suspending
);
2408 ASSERT(error
== 0 || error
== ENOENT
);
2409 ASSERT(error
!= ENOENT
||
2410 ddb
->ddb_class
> scn
->scn_phys
.scn_ddt_class_max
);
2414 dsl_scan_ds_maxtxg(dsl_dataset_t
*ds
)
2416 uint64_t smt
= ds
->ds_dir
->dd_pool
->dp_scan
->scn_phys
.scn_max_txg
;
2417 if (ds
->ds_is_snapshot
)
2418 return (MIN(smt
, dsl_dataset_phys(ds
)->ds_creation_txg
));
2423 dsl_scan_visit(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
2426 dsl_pool_t
*dp
= scn
->scn_dp
;
2428 if (scn
->scn_phys
.scn_ddt_bookmark
.ddb_class
<=
2429 scn
->scn_phys
.scn_ddt_class_max
) {
2430 scn
->scn_phys
.scn_cur_min_txg
= scn
->scn_phys
.scn_min_txg
;
2431 scn
->scn_phys
.scn_cur_max_txg
= scn
->scn_phys
.scn_max_txg
;
2432 dsl_scan_ddt(scn
, tx
);
2433 if (scn
->scn_suspending
)
2437 if (scn
->scn_phys
.scn_bookmark
.zb_objset
== DMU_META_OBJSET
) {
2438 /* First do the MOS & ORIGIN */
2440 scn
->scn_phys
.scn_cur_min_txg
= scn
->scn_phys
.scn_min_txg
;
2441 scn
->scn_phys
.scn_cur_max_txg
= scn
->scn_phys
.scn_max_txg
;
2442 dsl_scan_visit_rootbp(scn
, NULL
,
2443 &dp
->dp_meta_rootbp
, tx
);
2444 spa_set_rootblkptr(dp
->dp_spa
, &dp
->dp_meta_rootbp
);
2445 if (scn
->scn_suspending
)
2448 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
) {
2449 VERIFY0(dmu_objset_find_dp(dp
, dp
->dp_root_dir_obj
,
2450 enqueue_cb
, NULL
, DS_FIND_CHILDREN
));
2452 dsl_scan_visitds(scn
,
2453 dp
->dp_origin_snap
->ds_object
, tx
);
2455 ASSERT(!scn
->scn_suspending
);
2456 } else if (scn
->scn_phys
.scn_bookmark
.zb_objset
!=
2457 ZB_DESTROYED_OBJSET
) {
2458 uint64_t dsobj
= scn
->scn_phys
.scn_bookmark
.zb_objset
;
2460 * If we were suspended, continue from here. Note if the
2461 * ds we were suspended on was deleted, the zb_objset may
2462 * be -1, so we will skip this and find a new objset
2465 dsl_scan_visitds(scn
, dsobj
, tx
);
2466 if (scn
->scn_suspending
)
2471 * In case we suspended right at the end of the ds, zero the
2472 * bookmark so we don't think that we're still trying to resume.
2474 bzero(&scn
->scn_phys
.scn_bookmark
, sizeof (zbookmark_phys_t
));
2477 * Keep pulling things out of the dataset avl queue. Updates to the
2478 * persistent zap-object-as-queue happen only at checkpoints.
2480 while ((sds
= avl_first(&scn
->scn_queue
)) != NULL
) {
2482 uint64_t dsobj
= sds
->sds_dsobj
;
2483 uint64_t txg
= sds
->sds_txg
;
2485 /* dequeue and free the ds from the queue */
2486 scan_ds_queue_remove(scn
, dsobj
);
2489 /* set up min / max txg */
2490 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
2492 scn
->scn_phys
.scn_cur_min_txg
=
2493 MAX(scn
->scn_phys
.scn_min_txg
, txg
);
2495 scn
->scn_phys
.scn_cur_min_txg
=
2496 MAX(scn
->scn_phys
.scn_min_txg
,
2497 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2499 scn
->scn_phys
.scn_cur_max_txg
= dsl_scan_ds_maxtxg(ds
);
2500 dsl_dataset_rele(ds
, FTAG
);
2502 dsl_scan_visitds(scn
, dsobj
, tx
);
2503 if (scn
->scn_suspending
)
2507 /* No more objsets to fetch, we're done */
2508 scn
->scn_phys
.scn_bookmark
.zb_objset
= ZB_DESTROYED_OBJSET
;
2509 ASSERT0(scn
->scn_suspending
);
2513 dsl_scan_count_leaves(vdev_t
*vd
)
2515 uint64_t i
, leaves
= 0;
2517 /* we only count leaves that belong to the main pool and are readable */
2518 if (vd
->vdev_islog
|| vd
->vdev_isspare
||
2519 vd
->vdev_isl2cache
|| !vdev_readable(vd
))
2522 if (vd
->vdev_ops
->vdev_op_leaf
)
2525 for (i
= 0; i
< vd
->vdev_children
; i
++) {
2526 leaves
+= dsl_scan_count_leaves(vd
->vdev_child
[i
]);
2533 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t
*q
, const blkptr_t
*bp
)
2536 uint64_t cur_size
= 0;
2538 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2539 cur_size
+= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2542 q
->q_total_zio_size_this_txg
+= cur_size
;
2543 q
->q_zios_this_txg
++;
2547 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t
*q
, uint64_t start
,
2550 q
->q_total_seg_size_this_txg
+= end
- start
;
2551 q
->q_segs_this_txg
++;
2555 scan_io_queue_check_suspend(dsl_scan_t
*scn
)
2557 /* See comment in dsl_scan_check_suspend() */
2558 uint64_t curr_time_ns
= gethrtime();
2559 uint64_t scan_time_ns
= curr_time_ns
- scn
->scn_sync_start_time
;
2560 uint64_t sync_time_ns
= curr_time_ns
-
2561 scn
->scn_dp
->dp_spa
->spa_sync_starttime
;
2562 int dirty_pct
= scn
->scn_dp
->dp_dirty_total
* 100 / zfs_dirty_data_max
;
2563 int mintime
= (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) ?
2564 zfs_resilver_min_time_ms
: zfs_scrub_min_time_ms
;
2566 return ((NSEC2MSEC(scan_time_ns
) > mintime
&&
2567 (dirty_pct
>= zfs_vdev_async_write_active_min_dirty_percent
||
2568 txg_sync_waiting(scn
->scn_dp
) ||
2569 NSEC2SEC(sync_time_ns
) >= zfs_txg_timeout
)) ||
2570 spa_shutting_down(scn
->scn_dp
->dp_spa
));
2574 * Given a list of scan_io_t's in io_list, this issues the I/Os out to
2575 * disk. This consumes the io_list and frees the scan_io_t's. This is
2576 * called when emptying queues, either when we're up against the memory
2577 * limit or when we have finished scanning. Returns B_TRUE if we stopped
2578 * processing the list before we finished. Any sios that were not issued
2579 * will remain in the io_list.
2582 scan_io_queue_issue(dsl_scan_io_queue_t
*queue
, list_t
*io_list
)
2584 dsl_scan_t
*scn
= queue
->q_scn
;
2586 int64_t bytes_issued
= 0;
2587 boolean_t suspended
= B_FALSE
;
2589 while ((sio
= list_head(io_list
)) != NULL
) {
2592 if (scan_io_queue_check_suspend(scn
)) {
2597 sio2bp(sio
, &bp
, queue
->q_vd
->vdev_id
);
2598 bytes_issued
+= sio
->sio_asize
;
2599 scan_exec_io(scn
->scn_dp
, &bp
, sio
->sio_flags
,
2600 &sio
->sio_zb
, queue
);
2601 (void) list_remove_head(io_list
);
2602 scan_io_queues_update_zio_stats(queue
, &bp
);
2603 kmem_cache_free(sio_cache
, sio
);
2606 atomic_add_64(&scn
->scn_bytes_pending
, -bytes_issued
);
2612 * This function removes sios from an IO queue which reside within a given
2613 * range_seg_t and inserts them (in offset order) into a list. Note that
2614 * we only ever return a maximum of 32 sios at once. If there are more sios
2615 * to process within this segment that did not make it onto the list we
2616 * return B_TRUE and otherwise B_FALSE.
2619 scan_io_queue_gather(dsl_scan_io_queue_t
*queue
, range_seg_t
*rs
, list_t
*list
)
2621 scan_io_t srch_sio
, *sio
, *next_sio
;
2623 uint_t num_sios
= 0;
2624 int64_t bytes_issued
= 0;
2627 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
2629 srch_sio
.sio_offset
= rs
->rs_start
;
2632 * The exact start of the extent might not contain any matching zios,
2633 * so if that's the case, examine the next one in the tree.
2635 sio
= avl_find(&queue
->q_sios_by_addr
, &srch_sio
, &idx
);
2637 sio
= avl_nearest(&queue
->q_sios_by_addr
, idx
, AVL_AFTER
);
2639 while (sio
!= NULL
&& sio
->sio_offset
< rs
->rs_end
&& num_sios
<= 32) {
2640 ASSERT3U(sio
->sio_offset
, >=, rs
->rs_start
);
2641 ASSERT3U(sio
->sio_offset
+ sio
->sio_asize
, <=, rs
->rs_end
);
2643 next_sio
= AVL_NEXT(&queue
->q_sios_by_addr
, sio
);
2644 avl_remove(&queue
->q_sios_by_addr
, sio
);
2646 bytes_issued
+= sio
->sio_asize
;
2648 list_insert_tail(list
, sio
);
2653 * We limit the number of sios we process at once to 32 to avoid
2654 * biting off more than we can chew. If we didn't take everything
2655 * in the segment we update it to reflect the work we were able to
2656 * complete. Otherwise, we remove it from the range tree entirely.
2658 if (sio
!= NULL
&& sio
->sio_offset
< rs
->rs_end
) {
2659 range_tree_adjust_fill(queue
->q_exts_by_addr
, rs
,
2661 range_tree_resize_segment(queue
->q_exts_by_addr
, rs
,
2662 sio
->sio_offset
, rs
->rs_end
- sio
->sio_offset
);
2666 range_tree_remove(queue
->q_exts_by_addr
, rs
->rs_start
,
2667 rs
->rs_end
- rs
->rs_start
);
2673 * This is called from the queue emptying thread and selects the next
2674 * extent from which we are to issue I/Os. The behavior of this function
2675 * depends on the state of the scan, the current memory consumption and
2676 * whether or not we are performing a scan shutdown.
2677 * 1) We select extents in an elevator algorithm (LBA-order) if the scan
2678 * needs to perform a checkpoint
2679 * 2) We select the largest available extent if we are up against the
2681 * 3) Otherwise we don't select any extents.
2683 static range_seg_t
*
2684 scan_io_queue_fetch_ext(dsl_scan_io_queue_t
*queue
)
2686 dsl_scan_t
*scn
= queue
->q_scn
;
2688 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
2689 ASSERT(scn
->scn_is_sorted
);
2691 /* handle tunable overrides */
2692 if (scn
->scn_checkpointing
|| scn
->scn_clearing
) {
2693 if (zfs_scan_issue_strategy
== 1) {
2694 return (range_tree_first(queue
->q_exts_by_addr
));
2695 } else if (zfs_scan_issue_strategy
== 2) {
2696 return (avl_first(&queue
->q_exts_by_size
));
2701 * During normal clearing, we want to issue our largest segments
2702 * first, keeping IO as sequential as possible, and leaving the
2703 * smaller extents for later with the hope that they might eventually
2704 * grow to larger sequential segments. However, when the scan is
2705 * checkpointing, no new extents will be added to the sorting queue,
2706 * so the way we are sorted now is as good as it will ever get.
2707 * In this case, we instead switch to issuing extents in LBA order.
2709 if (scn
->scn_checkpointing
) {
2710 return (range_tree_first(queue
->q_exts_by_addr
));
2711 } else if (scn
->scn_clearing
) {
2712 return (avl_first(&queue
->q_exts_by_size
));
2719 scan_io_queues_run_one(void *arg
)
2721 dsl_scan_io_queue_t
*queue
= arg
;
2722 kmutex_t
*q_lock
= &queue
->q_vd
->vdev_scan_io_queue_lock
;
2723 boolean_t suspended
= B_FALSE
;
2724 range_seg_t
*rs
= NULL
;
2725 scan_io_t
*sio
= NULL
;
2727 uint64_t bytes_per_leaf
= zfs_scan_vdev_limit
;
2728 uint64_t nr_leaves
= dsl_scan_count_leaves(queue
->q_vd
);
2730 ASSERT(queue
->q_scn
->scn_is_sorted
);
2732 list_create(&sio_list
, sizeof (scan_io_t
),
2733 offsetof(scan_io_t
, sio_nodes
.sio_list_node
));
2734 mutex_enter(q_lock
);
2736 /* calculate maximum in-flight bytes for this txg (min 1MB) */
2737 queue
->q_maxinflight_bytes
=
2738 MAX(nr_leaves
* bytes_per_leaf
, 1ULL << 20);
2740 /* reset per-queue scan statistics for this txg */
2741 queue
->q_total_seg_size_this_txg
= 0;
2742 queue
->q_segs_this_txg
= 0;
2743 queue
->q_total_zio_size_this_txg
= 0;
2744 queue
->q_zios_this_txg
= 0;
2746 /* loop until we run out of time or sios */
2747 while ((rs
= scan_io_queue_fetch_ext(queue
)) != NULL
) {
2748 uint64_t seg_start
= 0, seg_end
= 0;
2749 boolean_t more_left
= B_TRUE
;
2751 ASSERT(list_is_empty(&sio_list
));
2753 /* loop while we still have sios left to process in this rs */
2755 scan_io_t
*first_sio
, *last_sio
;
2758 * We have selected which extent needs to be
2759 * processed next. Gather up the corresponding sios.
2761 more_left
= scan_io_queue_gather(queue
, rs
, &sio_list
);
2762 ASSERT(!list_is_empty(&sio_list
));
2763 first_sio
= list_head(&sio_list
);
2764 last_sio
= list_tail(&sio_list
);
2766 seg_end
= last_sio
->sio_offset
+ last_sio
->sio_asize
;
2768 seg_start
= first_sio
->sio_offset
;
2771 * Issuing sios can take a long time so drop the
2772 * queue lock. The sio queue won't be updated by
2773 * other threads since we're in syncing context so
2774 * we can be sure that our trees will remain exactly
2778 suspended
= scan_io_queue_issue(queue
, &sio_list
);
2779 mutex_enter(q_lock
);
2785 /* update statistics for debugging purposes */
2786 scan_io_queues_update_seg_stats(queue
, seg_start
, seg_end
);
2793 * If we were suspended in the middle of processing,
2794 * requeue any unfinished sios and exit.
2796 while ((sio
= list_head(&sio_list
)) != NULL
) {
2797 list_remove(&sio_list
, sio
);
2798 scan_io_queue_insert_impl(queue
, sio
);
2802 list_destroy(&sio_list
);
2806 * Performs an emptying run on all scan queues in the pool. This just
2807 * punches out one thread per top-level vdev, each of which processes
2808 * only that vdev's scan queue. We can parallelize the I/O here because
2809 * we know that each queue's I/Os only affect its own top-level vdev.
2811 * This function waits for the queue runs to complete, and must be
2812 * called from dsl_scan_sync (or in general, syncing context).
2815 scan_io_queues_run(dsl_scan_t
*scn
)
2817 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
2819 ASSERT(scn
->scn_is_sorted
);
2820 ASSERT(spa_config_held(spa
, SCL_CONFIG
, RW_READER
));
2822 if (scn
->scn_bytes_pending
== 0)
2825 if (scn
->scn_taskq
== NULL
) {
2826 int nthreads
= spa
->spa_root_vdev
->vdev_children
;
2829 * We need to make this taskq *always* execute as many
2830 * threads in parallel as we have top-level vdevs and no
2831 * less, otherwise strange serialization of the calls to
2832 * scan_io_queues_run_one can occur during spa_sync runs
2833 * and that significantly impacts performance.
2835 scn
->scn_taskq
= taskq_create("dsl_scan_iss", nthreads
,
2836 minclsyspri
, nthreads
, nthreads
, TASKQ_PREPOPULATE
);
2839 for (uint64_t i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
2840 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
2842 mutex_enter(&vd
->vdev_scan_io_queue_lock
);
2843 if (vd
->vdev_scan_io_queue
!= NULL
) {
2844 VERIFY(taskq_dispatch(scn
->scn_taskq
,
2845 scan_io_queues_run_one
, vd
->vdev_scan_io_queue
,
2846 TQ_SLEEP
) != TASKQID_INVALID
);
2848 mutex_exit(&vd
->vdev_scan_io_queue_lock
);
2852 * Wait for the queues to finish issuing their IOs for this run
2853 * before we return. There may still be IOs in flight at this
2856 taskq_wait(scn
->scn_taskq
);
2860 dsl_scan_async_block_should_pause(dsl_scan_t
*scn
)
2862 uint64_t elapsed_nanosecs
;
2867 if (scn
->scn_visited_this_txg
>= zfs_async_block_max_blocks
)
2870 elapsed_nanosecs
= gethrtime() - scn
->scn_sync_start_time
;
2871 return (elapsed_nanosecs
/ NANOSEC
> zfs_txg_timeout
||
2872 (NSEC2MSEC(elapsed_nanosecs
) > scn
->scn_async_block_min_time_ms
&&
2873 txg_sync_waiting(scn
->scn_dp
)) ||
2874 spa_shutting_down(scn
->scn_dp
->dp_spa
));
2878 dsl_scan_free_block_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
2880 dsl_scan_t
*scn
= arg
;
2882 if (!scn
->scn_is_bptree
||
2883 (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_OBJSET
)) {
2884 if (dsl_scan_async_block_should_pause(scn
))
2885 return (SET_ERROR(ERESTART
));
2888 zio_nowait(zio_free_sync(scn
->scn_zio_root
, scn
->scn_dp
->dp_spa
,
2889 dmu_tx_get_txg(tx
), bp
, 0));
2890 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
, DD_USED_HEAD
,
2891 -bp_get_dsize_sync(scn
->scn_dp
->dp_spa
, bp
),
2892 -BP_GET_PSIZE(bp
), -BP_GET_UCSIZE(bp
), tx
);
2893 scn
->scn_visited_this_txg
++;
2898 dsl_scan_update_stats(dsl_scan_t
*scn
)
2900 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
2902 uint64_t seg_size_total
= 0, zio_size_total
= 0;
2903 uint64_t seg_count_total
= 0, zio_count_total
= 0;
2905 for (i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
2906 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
2907 dsl_scan_io_queue_t
*queue
= vd
->vdev_scan_io_queue
;
2912 seg_size_total
+= queue
->q_total_seg_size_this_txg
;
2913 zio_size_total
+= queue
->q_total_zio_size_this_txg
;
2914 seg_count_total
+= queue
->q_segs_this_txg
;
2915 zio_count_total
+= queue
->q_zios_this_txg
;
2918 if (seg_count_total
== 0 || zio_count_total
== 0) {
2919 scn
->scn_avg_seg_size_this_txg
= 0;
2920 scn
->scn_avg_zio_size_this_txg
= 0;
2921 scn
->scn_segs_this_txg
= 0;
2922 scn
->scn_zios_this_txg
= 0;
2926 scn
->scn_avg_seg_size_this_txg
= seg_size_total
/ seg_count_total
;
2927 scn
->scn_avg_zio_size_this_txg
= zio_size_total
/ zio_count_total
;
2928 scn
->scn_segs_this_txg
= seg_count_total
;
2929 scn
->scn_zios_this_txg
= zio_count_total
;
2933 dsl_scan_obsolete_block_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
2935 dsl_scan_t
*scn
= arg
;
2936 const dva_t
*dva
= &bp
->blk_dva
[0];
2938 if (dsl_scan_async_block_should_pause(scn
))
2939 return (SET_ERROR(ERESTART
));
2941 spa_vdev_indirect_mark_obsolete(scn
->scn_dp
->dp_spa
,
2942 DVA_GET_VDEV(dva
), DVA_GET_OFFSET(dva
),
2943 DVA_GET_ASIZE(dva
), tx
);
2944 scn
->scn_visited_this_txg
++;
2949 dsl_scan_active(dsl_scan_t
*scn
)
2951 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
2952 uint64_t used
= 0, comp
, uncomp
;
2954 if (spa
->spa_load_state
!= SPA_LOAD_NONE
)
2956 if (spa_shutting_down(spa
))
2958 if ((dsl_scan_is_running(scn
) && !dsl_scan_is_paused_scrub(scn
)) ||
2959 (scn
->scn_async_destroying
&& !scn
->scn_async_stalled
))
2962 if (spa_version(scn
->scn_dp
->dp_spa
) >= SPA_VERSION_DEADLISTS
) {
2963 (void) bpobj_space(&scn
->scn_dp
->dp_free_bpobj
,
2964 &used
, &comp
, &uncomp
);
2970 dsl_scan_need_resilver(spa_t
*spa
, const dva_t
*dva
, size_t psize
,
2971 uint64_t phys_birth
)
2975 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
2977 if (vd
->vdev_ops
== &vdev_indirect_ops
) {
2979 * The indirect vdev can point to multiple
2980 * vdevs. For simplicity, always create
2981 * the resilver zio_t. zio_vdev_io_start()
2982 * will bypass the child resilver i/o's if
2983 * they are on vdevs that don't have DTL's.
2988 if (DVA_GET_GANG(dva
)) {
2990 * Gang members may be spread across multiple
2991 * vdevs, so the best estimate we have is the
2992 * scrub range, which has already been checked.
2993 * XXX -- it would be better to change our
2994 * allocation policy to ensure that all
2995 * gang members reside on the same vdev.
3001 * Check if the txg falls within the range which must be
3002 * resilvered. DVAs outside this range can always be skipped.
3004 if (!vdev_dtl_contains(vd
, DTL_PARTIAL
, phys_birth
, 1))
3008 * Check if the top-level vdev must resilver this offset.
3009 * When the offset does not intersect with a dirty leaf DTL
3010 * then it may be possible to skip the resilver IO. The psize
3011 * is provided instead of asize to simplify the check for RAIDZ.
3013 if (!vdev_dtl_need_resilver(vd
, DVA_GET_OFFSET(dva
), psize
))
3020 dsl_process_async_destroys(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
3022 dsl_scan_t
*scn
= dp
->dp_scan
;
3023 spa_t
*spa
= dp
->dp_spa
;
3026 if (spa_suspend_async_destroy(spa
))
3029 if (zfs_free_bpobj_enabled
&&
3030 spa_version(spa
) >= SPA_VERSION_DEADLISTS
) {
3031 scn
->scn_is_bptree
= B_FALSE
;
3032 scn
->scn_async_block_min_time_ms
= zfs_free_min_time_ms
;
3033 scn
->scn_zio_root
= zio_root(spa
, NULL
,
3034 NULL
, ZIO_FLAG_MUSTSUCCEED
);
3035 err
= bpobj_iterate(&dp
->dp_free_bpobj
,
3036 dsl_scan_free_block_cb
, scn
, tx
);
3037 VERIFY0(zio_wait(scn
->scn_zio_root
));
3038 scn
->scn_zio_root
= NULL
;
3040 if (err
!= 0 && err
!= ERESTART
)
3041 zfs_panic_recover("error %u from bpobj_iterate()", err
);
3044 if (err
== 0 && spa_feature_is_active(spa
, SPA_FEATURE_ASYNC_DESTROY
)) {
3045 ASSERT(scn
->scn_async_destroying
);
3046 scn
->scn_is_bptree
= B_TRUE
;
3047 scn
->scn_zio_root
= zio_root(spa
, NULL
,
3048 NULL
, ZIO_FLAG_MUSTSUCCEED
);
3049 err
= bptree_iterate(dp
->dp_meta_objset
,
3050 dp
->dp_bptree_obj
, B_TRUE
, dsl_scan_free_block_cb
, scn
, tx
);
3051 VERIFY0(zio_wait(scn
->scn_zio_root
));
3052 scn
->scn_zio_root
= NULL
;
3054 if (err
== EIO
|| err
== ECKSUM
) {
3056 } else if (err
!= 0 && err
!= ERESTART
) {
3057 zfs_panic_recover("error %u from "
3058 "traverse_dataset_destroyed()", err
);
3061 if (bptree_is_empty(dp
->dp_meta_objset
, dp
->dp_bptree_obj
)) {
3062 /* finished; deactivate async destroy feature */
3063 spa_feature_decr(spa
, SPA_FEATURE_ASYNC_DESTROY
, tx
);
3064 ASSERT(!spa_feature_is_active(spa
,
3065 SPA_FEATURE_ASYNC_DESTROY
));
3066 VERIFY0(zap_remove(dp
->dp_meta_objset
,
3067 DMU_POOL_DIRECTORY_OBJECT
,
3068 DMU_POOL_BPTREE_OBJ
, tx
));
3069 VERIFY0(bptree_free(dp
->dp_meta_objset
,
3070 dp
->dp_bptree_obj
, tx
));
3071 dp
->dp_bptree_obj
= 0;
3072 scn
->scn_async_destroying
= B_FALSE
;
3073 scn
->scn_async_stalled
= B_FALSE
;
3076 * If we didn't make progress, mark the async
3077 * destroy as stalled, so that we will not initiate
3078 * a spa_sync() on its behalf. Note that we only
3079 * check this if we are not finished, because if the
3080 * bptree had no blocks for us to visit, we can
3081 * finish without "making progress".
3083 scn
->scn_async_stalled
=
3084 (scn
->scn_visited_this_txg
== 0);
3087 if (scn
->scn_visited_this_txg
) {
3088 zfs_dbgmsg("freed %llu blocks in %llums from "
3089 "free_bpobj/bptree txg %llu; err=%u",
3090 (longlong_t
)scn
->scn_visited_this_txg
,
3092 NSEC2MSEC(gethrtime() - scn
->scn_sync_start_time
),
3093 (longlong_t
)tx
->tx_txg
, err
);
3094 scn
->scn_visited_this_txg
= 0;
3097 * Write out changes to the DDT that may be required as a
3098 * result of the blocks freed. This ensures that the DDT
3099 * is clean when a scrub/resilver runs.
3101 ddt_sync(spa
, tx
->tx_txg
);
3105 if (dp
->dp_free_dir
!= NULL
&& !scn
->scn_async_destroying
&&
3106 zfs_free_leak_on_eio
&&
3107 (dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
!= 0 ||
3108 dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
!= 0 ||
3109 dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
!= 0)) {
3111 * We have finished background destroying, but there is still
3112 * some space left in the dp_free_dir. Transfer this leaked
3113 * space to the dp_leak_dir.
3115 if (dp
->dp_leak_dir
== NULL
) {
3116 rrw_enter(&dp
->dp_config_rwlock
, RW_WRITER
, FTAG
);
3117 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
,
3119 VERIFY0(dsl_pool_open_special_dir(dp
,
3120 LEAK_DIR_NAME
, &dp
->dp_leak_dir
));
3121 rrw_exit(&dp
->dp_config_rwlock
, FTAG
);
3123 dsl_dir_diduse_space(dp
->dp_leak_dir
, DD_USED_HEAD
,
3124 dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
,
3125 dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
,
3126 dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
, tx
);
3127 dsl_dir_diduse_space(dp
->dp_free_dir
, DD_USED_HEAD
,
3128 -dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
,
3129 -dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
,
3130 -dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
, tx
);
3133 if (dp
->dp_free_dir
!= NULL
&& !scn
->scn_async_destroying
) {
3134 /* finished; verify that space accounting went to zero */
3135 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
);
3136 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
);
3137 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
);
3140 EQUIV(bpobj_is_open(&dp
->dp_obsolete_bpobj
),
3141 0 == zap_contains(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
3142 DMU_POOL_OBSOLETE_BPOBJ
));
3143 if (err
== 0 && bpobj_is_open(&dp
->dp_obsolete_bpobj
)) {
3144 ASSERT(spa_feature_is_active(dp
->dp_spa
,
3145 SPA_FEATURE_OBSOLETE_COUNTS
));
3147 scn
->scn_is_bptree
= B_FALSE
;
3148 scn
->scn_async_block_min_time_ms
= zfs_obsolete_min_time_ms
;
3149 err
= bpobj_iterate(&dp
->dp_obsolete_bpobj
,
3150 dsl_scan_obsolete_block_cb
, scn
, tx
);
3151 if (err
!= 0 && err
!= ERESTART
)
3152 zfs_panic_recover("error %u from bpobj_iterate()", err
);
3154 if (bpobj_is_empty(&dp
->dp_obsolete_bpobj
))
3155 dsl_pool_destroy_obsolete_bpobj(dp
, tx
);
3161 * This is the primary entry point for scans that is called from syncing
3162 * context. Scans must happen entirely during syncing context so that we
3163 * cna guarantee that blocks we are currently scanning will not change out
3164 * from under us. While a scan is active, this function controls how quickly
3165 * transaction groups proceed, instead of the normal handling provided by
3166 * txg_sync_thread().
3169 dsl_scan_sync(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
3172 dsl_scan_t
*scn
= dp
->dp_scan
;
3173 spa_t
*spa
= dp
->dp_spa
;
3174 state_sync_type_t sync_type
= SYNC_OPTIONAL
;
3177 * Check for scn_restart_txg before checking spa_load_state, so
3178 * that we can restart an old-style scan while the pool is being
3179 * imported (see dsl_scan_init).
3181 if (dsl_scan_restarting(scn
, tx
)) {
3182 pool_scan_func_t func
= POOL_SCAN_SCRUB
;
3183 dsl_scan_done(scn
, B_FALSE
, tx
);
3184 if (vdev_resilver_needed(spa
->spa_root_vdev
, NULL
, NULL
))
3185 func
= POOL_SCAN_RESILVER
;
3186 zfs_dbgmsg("restarting scan func=%u txg=%llu",
3187 func
, (longlong_t
)tx
->tx_txg
);
3188 dsl_scan_setup_sync(&func
, tx
);
3192 * Only process scans in sync pass 1.
3194 if (spa_sync_pass(spa
) > 1)
3198 * If the spa is shutting down, then stop scanning. This will
3199 * ensure that the scan does not dirty any new data during the
3202 if (spa_shutting_down(spa
))
3206 * If the scan is inactive due to a stalled async destroy, try again.
3208 if (!scn
->scn_async_stalled
&& !dsl_scan_active(scn
))
3211 /* reset scan statistics */
3212 scn
->scn_visited_this_txg
= 0;
3213 scn
->scn_holes_this_txg
= 0;
3214 scn
->scn_lt_min_this_txg
= 0;
3215 scn
->scn_gt_max_this_txg
= 0;
3216 scn
->scn_ddt_contained_this_txg
= 0;
3217 scn
->scn_objsets_visited_this_txg
= 0;
3218 scn
->scn_avg_seg_size_this_txg
= 0;
3219 scn
->scn_segs_this_txg
= 0;
3220 scn
->scn_avg_zio_size_this_txg
= 0;
3221 scn
->scn_zios_this_txg
= 0;
3222 scn
->scn_suspending
= B_FALSE
;
3223 scn
->scn_sync_start_time
= gethrtime();
3224 spa
->spa_scrub_active
= B_TRUE
;
3227 * First process the async destroys. If we suspend, don't do
3228 * any scrubbing or resilvering. This ensures that there are no
3229 * async destroys while we are scanning, so the scan code doesn't
3230 * have to worry about traversing it. It is also faster to free the
3231 * blocks than to scrub them.
3233 err
= dsl_process_async_destroys(dp
, tx
);
3237 if (!dsl_scan_is_running(scn
) || dsl_scan_is_paused_scrub(scn
))
3241 * Wait a few txgs after importing to begin scanning so that
3242 * we can get the pool imported quickly.
3244 if (spa
->spa_syncing_txg
< spa
->spa_first_txg
+ SCAN_IMPORT_WAIT_TXGS
)
3248 * It is possible to switch from unsorted to sorted at any time,
3249 * but afterwards the scan will remain sorted unless reloaded from
3250 * a checkpoint after a reboot.
3252 if (!zfs_scan_legacy
) {
3253 scn
->scn_is_sorted
= B_TRUE
;
3254 if (scn
->scn_last_checkpoint
== 0)
3255 scn
->scn_last_checkpoint
= ddi_get_lbolt();
3259 * For sorted scans, determine what kind of work we will be doing
3260 * this txg based on our memory limitations and whether or not we
3261 * need to perform a checkpoint.
3263 if (scn
->scn_is_sorted
) {
3265 * If we are over our checkpoint interval, set scn_clearing
3266 * so that we can begin checkpointing immediately. The
3267 * checkpoint allows us to save a consistent bookmark
3268 * representing how much data we have scrubbed so far.
3269 * Otherwise, use the memory limit to determine if we should
3270 * scan for metadata or start issue scrub IOs. We accumulate
3271 * metadata until we hit our hard memory limit at which point
3272 * we issue scrub IOs until we are at our soft memory limit.
3274 if (scn
->scn_checkpointing
||
3275 ddi_get_lbolt() - scn
->scn_last_checkpoint
>
3276 SEC_TO_TICK(zfs_scan_checkpoint_intval
)) {
3277 if (!scn
->scn_checkpointing
)
3278 zfs_dbgmsg("begin scan checkpoint");
3280 scn
->scn_checkpointing
= B_TRUE
;
3281 scn
->scn_clearing
= B_TRUE
;
3283 boolean_t should_clear
= dsl_scan_should_clear(scn
);
3284 if (should_clear
&& !scn
->scn_clearing
) {
3285 zfs_dbgmsg("begin scan clearing");
3286 scn
->scn_clearing
= B_TRUE
;
3287 } else if (!should_clear
&& scn
->scn_clearing
) {
3288 zfs_dbgmsg("finish scan clearing");
3289 scn
->scn_clearing
= B_FALSE
;
3293 ASSERT0(scn
->scn_checkpointing
);
3294 ASSERT0(scn
->scn_clearing
);
3297 if (!scn
->scn_clearing
&& scn
->scn_done_txg
== 0) {
3298 /* Need to scan metadata for more blocks to scrub */
3299 dsl_scan_phys_t
*scnp
= &scn
->scn_phys
;
3300 taskqid_t prefetch_tqid
;
3301 uint64_t bytes_per_leaf
= zfs_scan_vdev_limit
;
3302 uint64_t nr_leaves
= dsl_scan_count_leaves(spa
->spa_root_vdev
);
3305 * Recalculate the max number of in-flight bytes for pool-wide
3306 * scanning operations (minimum 1MB). Limits for the issuing
3307 * phase are done per top-level vdev and are handled separately.
3309 scn
->scn_maxinflight_bytes
=
3310 MAX(nr_leaves
* bytes_per_leaf
, 1ULL << 20);
3312 if (scnp
->scn_ddt_bookmark
.ddb_class
<=
3313 scnp
->scn_ddt_class_max
) {
3314 ASSERT(ZB_IS_ZERO(&scnp
->scn_bookmark
));
3315 zfs_dbgmsg("doing scan sync txg %llu; "
3316 "ddt bm=%llu/%llu/%llu/%llx",
3317 (longlong_t
)tx
->tx_txg
,
3318 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_class
,
3319 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_type
,
3320 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_checksum
,
3321 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_cursor
);
3323 zfs_dbgmsg("doing scan sync txg %llu; "
3324 "bm=%llu/%llu/%llu/%llu",
3325 (longlong_t
)tx
->tx_txg
,
3326 (longlong_t
)scnp
->scn_bookmark
.zb_objset
,
3327 (longlong_t
)scnp
->scn_bookmark
.zb_object
,
3328 (longlong_t
)scnp
->scn_bookmark
.zb_level
,
3329 (longlong_t
)scnp
->scn_bookmark
.zb_blkid
);
3332 scn
->scn_zio_root
= zio_root(dp
->dp_spa
, NULL
,
3333 NULL
, ZIO_FLAG_CANFAIL
);
3335 scn
->scn_prefetch_stop
= B_FALSE
;
3336 prefetch_tqid
= taskq_dispatch(dp
->dp_sync_taskq
,
3337 dsl_scan_prefetch_thread
, scn
, TQ_SLEEP
);
3338 ASSERT(prefetch_tqid
!= TASKQID_INVALID
);
3340 dsl_pool_config_enter(dp
, FTAG
);
3341 dsl_scan_visit(scn
, tx
);
3342 dsl_pool_config_exit(dp
, FTAG
);
3344 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
3345 scn
->scn_prefetch_stop
= B_TRUE
;
3346 cv_broadcast(&spa
->spa_scrub_io_cv
);
3347 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
3349 taskq_wait_id(dp
->dp_sync_taskq
, prefetch_tqid
);
3350 (void) zio_wait(scn
->scn_zio_root
);
3351 scn
->scn_zio_root
= NULL
;
3353 zfs_dbgmsg("scan visited %llu blocks in %llums "
3354 "(%llu os's, %llu holes, %llu < mintxg, "
3355 "%llu in ddt, %llu > maxtxg)",
3356 (longlong_t
)scn
->scn_visited_this_txg
,
3357 (longlong_t
)NSEC2MSEC(gethrtime() -
3358 scn
->scn_sync_start_time
),
3359 (longlong_t
)scn
->scn_objsets_visited_this_txg
,
3360 (longlong_t
)scn
->scn_holes_this_txg
,
3361 (longlong_t
)scn
->scn_lt_min_this_txg
,
3362 (longlong_t
)scn
->scn_ddt_contained_this_txg
,
3363 (longlong_t
)scn
->scn_gt_max_this_txg
);
3365 if (!scn
->scn_suspending
) {
3366 ASSERT0(avl_numnodes(&scn
->scn_queue
));
3367 scn
->scn_done_txg
= tx
->tx_txg
+ 1;
3368 if (scn
->scn_is_sorted
) {
3369 scn
->scn_checkpointing
= B_TRUE
;
3370 scn
->scn_clearing
= B_TRUE
;
3372 zfs_dbgmsg("scan complete txg %llu",
3373 (longlong_t
)tx
->tx_txg
);
3375 } else if (scn
->scn_is_sorted
&& scn
->scn_bytes_pending
!= 0) {
3376 /* need to issue scrubbing IOs from per-vdev queues */
3377 scn
->scn_zio_root
= zio_root(dp
->dp_spa
, NULL
,
3378 NULL
, ZIO_FLAG_CANFAIL
);
3379 scan_io_queues_run(scn
);
3380 (void) zio_wait(scn
->scn_zio_root
);
3381 scn
->scn_zio_root
= NULL
;
3383 /* calculate and dprintf the current memory usage */
3384 (void) dsl_scan_should_clear(scn
);
3385 dsl_scan_update_stats(scn
);
3387 zfs_dbgmsg("scan issued %llu blocks (%llu segs) in %llums "
3388 "(avg_block_size = %llu, avg_seg_size = %llu)",
3389 (longlong_t
)scn
->scn_zios_this_txg
,
3390 (longlong_t
)scn
->scn_segs_this_txg
,
3391 (longlong_t
)NSEC2MSEC(gethrtime() -
3392 scn
->scn_sync_start_time
),
3393 (longlong_t
)scn
->scn_avg_zio_size_this_txg
,
3394 (longlong_t
)scn
->scn_avg_seg_size_this_txg
);
3395 } else if (scn
->scn_done_txg
!= 0 && scn
->scn_done_txg
<= tx
->tx_txg
) {
3396 /* Finished with everything. Mark the scrub as complete */
3397 zfs_dbgmsg("scan issuing complete txg %llu",
3398 (longlong_t
)tx
->tx_txg
);
3399 ASSERT3U(scn
->scn_done_txg
, !=, 0);
3400 ASSERT0(spa
->spa_scrub_inflight
);
3401 ASSERT0(scn
->scn_bytes_pending
);
3402 dsl_scan_done(scn
, B_TRUE
, tx
);
3403 sync_type
= SYNC_MANDATORY
;
3406 dsl_scan_sync_state(scn
, tx
, sync_type
);
3410 count_block(dsl_scan_t
*scn
, zfs_all_blkstats_t
*zab
, const blkptr_t
*bp
)
3414 /* update the spa's stats on how many bytes we have issued */
3415 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
3416 atomic_add_64(&scn
->scn_dp
->dp_spa
->spa_scan_pass_issued
,
3417 DVA_GET_ASIZE(&bp
->blk_dva
[i
]));
3421 * If we resume after a reboot, zab will be NULL; don't record
3422 * incomplete stats in that case.
3427 mutex_enter(&zab
->zab_lock
);
3429 for (i
= 0; i
< 4; i
++) {
3430 int l
= (i
< 2) ? BP_GET_LEVEL(bp
) : DN_MAX_LEVELS
;
3431 int t
= (i
& 1) ? BP_GET_TYPE(bp
) : DMU_OT_TOTAL
;
3433 if (t
& DMU_OT_NEWTYPE
)
3435 zfs_blkstat_t
*zb
= &zab
->zab_type
[l
][t
];
3439 zb
->zb_asize
+= BP_GET_ASIZE(bp
);
3440 zb
->zb_lsize
+= BP_GET_LSIZE(bp
);
3441 zb
->zb_psize
+= BP_GET_PSIZE(bp
);
3442 zb
->zb_gangs
+= BP_COUNT_GANG(bp
);
3444 switch (BP_GET_NDVAS(bp
)) {
3446 if (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
3447 DVA_GET_VDEV(&bp
->blk_dva
[1]))
3448 zb
->zb_ditto_2_of_2_samevdev
++;
3451 equal
= (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
3452 DVA_GET_VDEV(&bp
->blk_dva
[1])) +
3453 (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
3454 DVA_GET_VDEV(&bp
->blk_dva
[2])) +
3455 (DVA_GET_VDEV(&bp
->blk_dva
[1]) ==
3456 DVA_GET_VDEV(&bp
->blk_dva
[2]));
3458 zb
->zb_ditto_2_of_3_samevdev
++;
3459 else if (equal
== 3)
3460 zb
->zb_ditto_3_of_3_samevdev
++;
3465 mutex_exit(&zab
->zab_lock
);
3469 scan_io_queue_insert_impl(dsl_scan_io_queue_t
*queue
, scan_io_t
*sio
)
3472 int64_t asize
= sio
->sio_asize
;
3473 dsl_scan_t
*scn
= queue
->q_scn
;
3475 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
3477 if (avl_find(&queue
->q_sios_by_addr
, sio
, &idx
) != NULL
) {
3478 /* block is already scheduled for reading */
3479 atomic_add_64(&scn
->scn_bytes_pending
, -asize
);
3480 kmem_cache_free(sio_cache
, sio
);
3483 avl_insert(&queue
->q_sios_by_addr
, sio
, idx
);
3484 range_tree_add(queue
->q_exts_by_addr
, sio
->sio_offset
, asize
);
3488 * Given all the info we got from our metadata scanning process, we
3489 * construct a scan_io_t and insert it into the scan sorting queue. The
3490 * I/O must already be suitable for us to process. This is controlled
3491 * by dsl_scan_enqueue().
3494 scan_io_queue_insert(dsl_scan_io_queue_t
*queue
, const blkptr_t
*bp
, int dva_i
,
3495 int zio_flags
, const zbookmark_phys_t
*zb
)
3497 dsl_scan_t
*scn
= queue
->q_scn
;
3498 scan_io_t
*sio
= kmem_cache_alloc(sio_cache
, KM_SLEEP
);
3500 ASSERT0(BP_IS_GANG(bp
));
3501 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
3503 bp2sio(bp
, sio
, dva_i
);
3504 sio
->sio_flags
= zio_flags
;
3508 * Increment the bytes pending counter now so that we can't
3509 * get an integer underflow in case the worker processes the
3510 * zio before we get to incrementing this counter.
3512 atomic_add_64(&scn
->scn_bytes_pending
, sio
->sio_asize
);
3514 scan_io_queue_insert_impl(queue
, sio
);
3518 * Given a set of I/O parameters as discovered by the metadata traversal
3519 * process, attempts to place the I/O into the sorted queues (if allowed),
3520 * or immediately executes the I/O.
3523 dsl_scan_enqueue(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
3524 const zbookmark_phys_t
*zb
)
3526 spa_t
*spa
= dp
->dp_spa
;
3528 ASSERT(!BP_IS_EMBEDDED(bp
));
3531 * Gang blocks are hard to issue sequentially, so we just issue them
3532 * here immediately instead of queuing them.
3534 if (!dp
->dp_scan
->scn_is_sorted
|| BP_IS_GANG(bp
)) {
3535 scan_exec_io(dp
, bp
, zio_flags
, zb
, NULL
);
3539 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
3543 dva
= bp
->blk_dva
[i
];
3544 vdev
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
));
3545 ASSERT(vdev
!= NULL
);
3547 mutex_enter(&vdev
->vdev_scan_io_queue_lock
);
3548 if (vdev
->vdev_scan_io_queue
== NULL
)
3549 vdev
->vdev_scan_io_queue
= scan_io_queue_create(vdev
);
3550 ASSERT(dp
->dp_scan
!= NULL
);
3551 scan_io_queue_insert(vdev
->vdev_scan_io_queue
, bp
,
3553 mutex_exit(&vdev
->vdev_scan_io_queue_lock
);
3558 dsl_scan_scrub_cb(dsl_pool_t
*dp
,
3559 const blkptr_t
*bp
, const zbookmark_phys_t
*zb
)
3561 dsl_scan_t
*scn
= dp
->dp_scan
;
3562 spa_t
*spa
= dp
->dp_spa
;
3563 uint64_t phys_birth
= BP_PHYSICAL_BIRTH(bp
);
3564 size_t psize
= BP_GET_PSIZE(bp
);
3565 boolean_t needs_io
= B_FALSE
;
3566 int zio_flags
= ZIO_FLAG_SCAN_THREAD
| ZIO_FLAG_RAW
| ZIO_FLAG_CANFAIL
;
3569 if (phys_birth
<= scn
->scn_phys
.scn_min_txg
||
3570 phys_birth
>= scn
->scn_phys
.scn_max_txg
) {
3571 count_block(scn
, dp
->dp_blkstats
, bp
);
3575 /* Embedded BP's have phys_birth==0, so we reject them above. */
3576 ASSERT(!BP_IS_EMBEDDED(bp
));
3578 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn
));
3579 if (scn
->scn_phys
.scn_func
== POOL_SCAN_SCRUB
) {
3580 zio_flags
|= ZIO_FLAG_SCRUB
;
3583 ASSERT3U(scn
->scn_phys
.scn_func
, ==, POOL_SCAN_RESILVER
);
3584 zio_flags
|= ZIO_FLAG_RESILVER
;
3588 /* If it's an intent log block, failure is expected. */
3589 if (zb
->zb_level
== ZB_ZIL_LEVEL
)
3590 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
3592 for (int d
= 0; d
< BP_GET_NDVAS(bp
); d
++) {
3593 const dva_t
*dva
= &bp
->blk_dva
[d
];
3596 * Keep track of how much data we've examined so that
3597 * zpool(1M) status can make useful progress reports.
3599 scn
->scn_phys
.scn_examined
+= DVA_GET_ASIZE(dva
);
3600 spa
->spa_scan_pass_exam
+= DVA_GET_ASIZE(dva
);
3602 /* if it's a resilver, this may not be in the target range */
3604 needs_io
= dsl_scan_need_resilver(spa
, dva
, psize
,
3608 if (needs_io
&& !zfs_no_scrub_io
) {
3609 dsl_scan_enqueue(dp
, bp
, zio_flags
, zb
);
3611 count_block(scn
, dp
->dp_blkstats
, bp
);
3614 /* do not relocate this block */
3619 dsl_scan_scrub_done(zio_t
*zio
)
3621 spa_t
*spa
= zio
->io_spa
;
3622 blkptr_t
*bp
= zio
->io_bp
;
3623 dsl_scan_io_queue_t
*queue
= zio
->io_private
;
3625 abd_free(zio
->io_abd
);
3627 if (queue
== NULL
) {
3628 mutex_enter(&spa
->spa_scrub_lock
);
3629 ASSERT3U(spa
->spa_scrub_inflight
, >=, BP_GET_PSIZE(bp
));
3630 spa
->spa_scrub_inflight
-= BP_GET_PSIZE(bp
);
3631 cv_broadcast(&spa
->spa_scrub_io_cv
);
3632 mutex_exit(&spa
->spa_scrub_lock
);
3634 mutex_enter(&queue
->q_vd
->vdev_scan_io_queue_lock
);
3635 ASSERT3U(queue
->q_inflight_bytes
, >=, BP_GET_PSIZE(bp
));
3636 queue
->q_inflight_bytes
-= BP_GET_PSIZE(bp
);
3637 cv_broadcast(&queue
->q_zio_cv
);
3638 mutex_exit(&queue
->q_vd
->vdev_scan_io_queue_lock
);
3641 if (zio
->io_error
&& (zio
->io_error
!= ECKSUM
||
3642 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
))) {
3643 atomic_inc_64(&spa
->spa_dsl_pool
->dp_scan
->scn_phys
.scn_errors
);
3648 * Given a scanning zio's information, executes the zio. The zio need
3649 * not necessarily be only sortable, this function simply executes the
3650 * zio, no matter what it is. The optional queue argument allows the
3651 * caller to specify that they want per top level vdev IO rate limiting
3652 * instead of the legacy global limiting.
3655 scan_exec_io(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
3656 const zbookmark_phys_t
*zb
, dsl_scan_io_queue_t
*queue
)
3658 spa_t
*spa
= dp
->dp_spa
;
3659 dsl_scan_t
*scn
= dp
->dp_scan
;
3660 size_t size
= BP_GET_PSIZE(bp
);
3661 abd_t
*data
= abd_alloc_for_io(size
, B_FALSE
);
3663 ASSERT3U(scn
->scn_maxinflight_bytes
, >, 0);
3665 if (queue
== NULL
) {
3666 mutex_enter(&spa
->spa_scrub_lock
);
3667 while (spa
->spa_scrub_inflight
>= scn
->scn_maxinflight_bytes
)
3668 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
3669 spa
->spa_scrub_inflight
+= BP_GET_PSIZE(bp
);
3670 mutex_exit(&spa
->spa_scrub_lock
);
3672 kmutex_t
*q_lock
= &queue
->q_vd
->vdev_scan_io_queue_lock
;
3674 mutex_enter(q_lock
);
3675 while (queue
->q_inflight_bytes
>= queue
->q_maxinflight_bytes
)
3676 cv_wait(&queue
->q_zio_cv
, q_lock
);
3677 queue
->q_inflight_bytes
+= BP_GET_PSIZE(bp
);
3681 count_block(scn
, dp
->dp_blkstats
, bp
);
3682 zio_nowait(zio_read(scn
->scn_zio_root
, spa
, bp
, data
, size
,
3683 dsl_scan_scrub_done
, queue
, ZIO_PRIORITY_SCRUB
, zio_flags
, zb
));
3687 * This is the primary extent sorting algorithm. We balance two parameters:
3688 * 1) how many bytes of I/O are in an extent
3689 * 2) how well the extent is filled with I/O (as a fraction of its total size)
3690 * Since we allow extents to have gaps between their constituent I/Os, it's
3691 * possible to have a fairly large extent that contains the same amount of
3692 * I/O bytes than a much smaller extent, which just packs the I/O more tightly.
3693 * The algorithm sorts based on a score calculated from the extent's size,
3694 * the relative fill volume (in %) and a "fill weight" parameter that controls
3695 * the split between whether we prefer larger extents or more well populated
3698 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
3701 * 1) assume extsz = 64 MiB
3702 * 2) assume fill = 32 MiB (extent is half full)
3703 * 3) assume fill_weight = 3
3704 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
3705 * SCORE = 32M + (50 * 3 * 32M) / 100
3706 * SCORE = 32M + (4800M / 100)
3709 * | +--- final total relative fill-based score
3710 * +--------- final total fill-based score
3713 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
3714 * extents that are more completely filled (in a 3:2 ratio) vs just larger.
3715 * Note that as an optimization, we replace multiplication and division by
3716 * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128).
3719 ext_size_compare(const void *x
, const void *y
)
3721 const range_seg_t
*rsa
= x
, *rsb
= y
;
3722 uint64_t sa
= rsa
->rs_end
- rsa
->rs_start
,
3723 sb
= rsb
->rs_end
- rsb
->rs_start
;
3724 uint64_t score_a
, score_b
;
3726 score_a
= rsa
->rs_fill
+ ((((rsa
->rs_fill
<< 7) / sa
) *
3727 fill_weight
* rsa
->rs_fill
) >> 7);
3728 score_b
= rsb
->rs_fill
+ ((((rsb
->rs_fill
<< 7) / sb
) *
3729 fill_weight
* rsb
->rs_fill
) >> 7);
3731 if (score_a
> score_b
)
3733 if (score_a
== score_b
) {
3734 if (rsa
->rs_start
< rsb
->rs_start
)
3736 if (rsa
->rs_start
== rsb
->rs_start
)
3744 * Comparator for the q_sios_by_addr tree. Sorting is simply performed
3745 * based on LBA-order (from lowest to highest).
3748 sio_addr_compare(const void *x
, const void *y
)
3750 const scan_io_t
*a
= x
, *b
= y
;
3752 if (a
->sio_offset
< b
->sio_offset
)
3754 if (a
->sio_offset
== b
->sio_offset
)
3759 /* IO queues are created on demand when they are needed. */
3760 static dsl_scan_io_queue_t
*
3761 scan_io_queue_create(vdev_t
*vd
)
3763 dsl_scan_t
*scn
= vd
->vdev_spa
->spa_dsl_pool
->dp_scan
;
3764 dsl_scan_io_queue_t
*q
= kmem_zalloc(sizeof (*q
), KM_SLEEP
);
3768 cv_init(&q
->q_zio_cv
, NULL
, CV_DEFAULT
, NULL
);
3769 q
->q_exts_by_addr
= range_tree_create_impl(&rt_avl_ops
,
3770 &q
->q_exts_by_size
, ext_size_compare
, zfs_scan_max_ext_gap
);
3771 avl_create(&q
->q_sios_by_addr
, sio_addr_compare
,
3772 sizeof (scan_io_t
), offsetof(scan_io_t
, sio_nodes
.sio_addr_node
));
3778 * Destroys a scan queue and all segments and scan_io_t's contained in it.
3779 * No further execution of I/O occurs, anything pending in the queue is
3780 * simply freed without being executed.
3783 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t
*queue
)
3785 dsl_scan_t
*scn
= queue
->q_scn
;
3787 void *cookie
= NULL
;
3788 int64_t bytes_dequeued
= 0;
3790 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
3792 while ((sio
= avl_destroy_nodes(&queue
->q_sios_by_addr
, &cookie
)) !=
3794 ASSERT(range_tree_contains(queue
->q_exts_by_addr
,
3795 sio
->sio_offset
, sio
->sio_asize
));
3796 bytes_dequeued
+= sio
->sio_asize
;
3797 kmem_cache_free(sio_cache
, sio
);
3800 atomic_add_64(&scn
->scn_bytes_pending
, -bytes_dequeued
);
3801 range_tree_vacate(queue
->q_exts_by_addr
, NULL
, queue
);
3802 range_tree_destroy(queue
->q_exts_by_addr
);
3803 avl_destroy(&queue
->q_sios_by_addr
);
3804 cv_destroy(&queue
->q_zio_cv
);
3806 kmem_free(queue
, sizeof (*queue
));
3810 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
3811 * called on behalf of vdev_top_transfer when creating or destroying
3812 * a mirror vdev due to zpool attach/detach.
3815 dsl_scan_io_queue_vdev_xfer(vdev_t
*svd
, vdev_t
*tvd
)
3817 mutex_enter(&svd
->vdev_scan_io_queue_lock
);
3818 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
3820 VERIFY3P(tvd
->vdev_scan_io_queue
, ==, NULL
);
3821 tvd
->vdev_scan_io_queue
= svd
->vdev_scan_io_queue
;
3822 svd
->vdev_scan_io_queue
= NULL
;
3823 if (tvd
->vdev_scan_io_queue
!= NULL
)
3824 tvd
->vdev_scan_io_queue
->q_vd
= tvd
;
3826 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
3827 mutex_exit(&svd
->vdev_scan_io_queue_lock
);
3831 scan_io_queues_destroy(dsl_scan_t
*scn
)
3833 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
3835 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
3836 vdev_t
*tvd
= rvd
->vdev_child
[i
];
3838 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
3839 if (tvd
->vdev_scan_io_queue
!= NULL
)
3840 dsl_scan_io_queue_destroy(tvd
->vdev_scan_io_queue
);
3841 tvd
->vdev_scan_io_queue
= NULL
;
3842 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
3847 dsl_scan_freed_dva(spa_t
*spa
, const blkptr_t
*bp
, int dva_i
)
3849 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
3850 dsl_scan_t
*scn
= dp
->dp_scan
;
3853 dsl_scan_io_queue_t
*queue
;
3854 scan_io_t srch
, *sio
;
3856 uint64_t start
, size
;
3858 vdev
= vdev_lookup_top(spa
, DVA_GET_VDEV(&bp
->blk_dva
[dva_i
]));
3859 ASSERT(vdev
!= NULL
);
3860 q_lock
= &vdev
->vdev_scan_io_queue_lock
;
3861 queue
= vdev
->vdev_scan_io_queue
;
3863 mutex_enter(q_lock
);
3864 if (queue
== NULL
) {
3869 bp2sio(bp
, &srch
, dva_i
);
3870 start
= srch
.sio_offset
;
3871 size
= srch
.sio_asize
;
3874 * We can find the zio in two states:
3875 * 1) Cold, just sitting in the queue of zio's to be issued at
3876 * some point in the future. In this case, all we do is
3877 * remove the zio from the q_sios_by_addr tree, decrement
3878 * its data volume from the containing range_seg_t and
3879 * resort the q_exts_by_size tree to reflect that the
3880 * range_seg_t has lost some of its 'fill'. We don't shorten
3881 * the range_seg_t - this is usually rare enough not to be
3882 * worth the extra hassle of trying keep track of precise
3883 * extent boundaries.
3884 * 2) Hot, where the zio is currently in-flight in
3885 * dsl_scan_issue_ios. In this case, we can't simply
3886 * reach in and stop the in-flight zio's, so we instead
3887 * block the caller. Eventually, dsl_scan_issue_ios will
3888 * be done with issuing the zio's it gathered and will
3891 sio
= avl_find(&queue
->q_sios_by_addr
, &srch
, &idx
);
3893 int64_t asize
= sio
->sio_asize
;
3896 /* Got it while it was cold in the queue */
3897 ASSERT3U(start
, ==, sio
->sio_offset
);
3898 ASSERT3U(size
, ==, asize
);
3899 avl_remove(&queue
->q_sios_by_addr
, sio
);
3901 ASSERT(range_tree_contains(queue
->q_exts_by_addr
, start
, size
));
3902 range_tree_remove_fill(queue
->q_exts_by_addr
, start
, size
);
3905 * We only update scn_bytes_pending in the cold path,
3906 * otherwise it will already have been accounted for as
3907 * part of the zio's execution.
3909 atomic_add_64(&scn
->scn_bytes_pending
, -asize
);
3911 /* count the block as though we issued it */
3912 sio2bp(sio
, &tmpbp
, dva_i
);
3913 count_block(scn
, dp
->dp_blkstats
, &tmpbp
);
3915 kmem_cache_free(sio_cache
, sio
);
3921 * Callback invoked when a zio_free() zio is executing. This needs to be
3922 * intercepted to prevent the zio from deallocating a particular portion
3923 * of disk space and it then getting reallocated and written to, while we
3924 * still have it queued up for processing.
3927 dsl_scan_freed(spa_t
*spa
, const blkptr_t
*bp
)
3929 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
3930 dsl_scan_t
*scn
= dp
->dp_scan
;
3932 ASSERT(!BP_IS_EMBEDDED(bp
));
3933 ASSERT(scn
!= NULL
);
3934 if (!dsl_scan_is_running(scn
))
3937 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++)
3938 dsl_scan_freed_dva(spa
, bp
, i
);
3941 #if defined(_KERNEL)
3943 module_param(zfs_scan_vdev_limit
, ulong
, 0644);
3944 MODULE_PARM_DESC(zfs_scan_vdev_limit
,
3945 "Max bytes in flight per leaf vdev for scrubs and resilvers");
3947 module_param(zfs_scrub_min_time_ms
, int, 0644);
3948 MODULE_PARM_DESC(zfs_scrub_min_time_ms
, "Min millisecs to scrub per txg");
3950 module_param(zfs_obsolete_min_time_ms
, int, 0644);
3951 MODULE_PARM_DESC(zfs_obsolete_min_time_ms
, "Min millisecs to obsolete per txg");
3953 module_param(zfs_free_min_time_ms
, int, 0644);
3954 MODULE_PARM_DESC(zfs_free_min_time_ms
, "Min millisecs to free per txg");
3956 module_param(zfs_resilver_min_time_ms
, int, 0644);
3957 MODULE_PARM_DESC(zfs_resilver_min_time_ms
, "Min millisecs to resilver per txg");
3959 module_param(zfs_no_scrub_io
, int, 0644);
3960 MODULE_PARM_DESC(zfs_no_scrub_io
, "Set to disable scrub I/O");
3962 module_param(zfs_no_scrub_prefetch
, int, 0644);
3963 MODULE_PARM_DESC(zfs_no_scrub_prefetch
, "Set to disable scrub prefetching");
3966 module_param(zfs_async_block_max_blocks
, ulong
, 0644);
3967 MODULE_PARM_DESC(zfs_async_block_max_blocks
,
3968 "Max number of blocks freed in one txg");
3970 module_param(zfs_free_bpobj_enabled
, int, 0644);
3971 MODULE_PARM_DESC(zfs_free_bpobj_enabled
, "Enable processing of the free_bpobj");
3973 module_param(zfs_scan_mem_lim_fact
, int, 0644);
3974 MODULE_PARM_DESC(zfs_scan_mem_lim_fact
, "Fraction of RAM for scan hard limit");
3976 module_param(zfs_scan_issue_strategy
, int, 0644);
3977 MODULE_PARM_DESC(zfs_scan_issue_strategy
,
3978 "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
3980 module_param(zfs_scan_legacy
, int, 0644);
3981 MODULE_PARM_DESC(zfs_scan_legacy
, "Scrub using legacy non-sequential method");
3983 module_param(zfs_scan_checkpoint_intval
, int, 0644);
3984 MODULE_PARM_DESC(zfs_scan_checkpoint_intval
,
3985 "Scan progress on-disk checkpointing interval");
3988 module_param(zfs_scan_max_ext_gap
, ulong
, 0644);
3989 MODULE_PARM_DESC(zfs_scan_max_ext_gap
,
3990 "Max gap in bytes between sequential scrub / resilver I/Os");
3992 module_param(zfs_scan_mem_lim_soft_fact
, int, 0644);
3993 MODULE_PARM_DESC(zfs_scan_mem_lim_soft_fact
,
3994 "Fraction of hard limit used as soft limit");
3996 module_param(zfs_scan_strict_mem_lim
, int, 0644);
3997 MODULE_PARM_DESC(zfs_scan_strict_mem_lim
,
3998 "Tunable to attempt to reduce lock contention");
4000 module_param(zfs_scan_fill_weight
, int, 0644);
4001 MODULE_PARM_DESC(zfs_scan_fill_weight
,
4002 "Tunable to adjust bias towards more filled segments during scans");