4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
24 * Copyright 2016 Gary Mills
25 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2019 Joyent, Inc.
30 #include <sys/dsl_scan.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_synctask.h>
36 #include <sys/dnode.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/dmu_objset.h>
40 #include <sys/arc_impl.h>
43 #include <sys/zfs_context.h>
44 #include <sys/fs/zfs.h>
45 #include <sys/zfs_znode.h>
46 #include <sys/spa_impl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/zil_impl.h>
49 #include <sys/zio_checksum.h>
53 #include <sys/sa_impl.h>
54 #include <sys/zfeature.h>
56 #include <sys/range_tree.h>
59 #include <sys/zfs_vfsops.h>
63 * Grand theory statement on scan queue sorting
65 * Scanning is implemented by recursively traversing all indirection levels
66 * in an object and reading all blocks referenced from said objects. This
67 * results in us approximately traversing the object from lowest logical
68 * offset to the highest. For best performance, we would want the logical
69 * blocks to be physically contiguous. However, this is frequently not the
70 * case with pools given the allocation patterns of copy-on-write filesystems.
71 * So instead, we put the I/Os into a reordering queue and issue them in a
72 * way that will most benefit physical disks (LBA-order).
76 * Ideally, we would want to scan all metadata and queue up all block I/O
77 * prior to starting to issue it, because that allows us to do an optimal
78 * sorting job. This can however consume large amounts of memory. Therefore
79 * we continuously monitor the size of the queues and constrain them to 5%
80 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
81 * limit, we clear out a few of the largest extents at the head of the queues
82 * to make room for more scanning. Hopefully, these extents will be fairly
83 * large and contiguous, allowing us to approach sequential I/O throughput
84 * even without a fully sorted tree.
86 * Metadata scanning takes place in dsl_scan_visit(), which is called from
87 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all
88 * metadata on the pool, or we need to make room in memory because our
89 * queues are too large, dsl_scan_visit() is postponed and
90 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
91 * that metadata scanning and queued I/O issuing are mutually exclusive. This
92 * allows us to provide maximum sequential I/O throughput for the majority of
93 * I/O's issued since sequential I/O performance is significantly negatively
94 * impacted if it is interleaved with random I/O.
96 * Implementation Notes
98 * One side effect of the queued scanning algorithm is that the scanning code
99 * needs to be notified whenever a block is freed. This is needed to allow
100 * the scanning code to remove these I/Os from the issuing queue. Additionally,
101 * we do not attempt to queue gang blocks to be issued sequentially since this
102 * is very hard to do and would have an extremely limited performance benefit.
103 * Instead, we simply issue gang I/Os as soon as we find them using the legacy
106 * Backwards compatibility
108 * This new algorithm is backwards compatible with the legacy on-disk data
109 * structures (and therefore does not require a new feature flag).
110 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
111 * will stop scanning metadata (in logical order) and wait for all outstanding
112 * sorted I/O to complete. Once this is done, we write out a checkpoint
113 * bookmark, indicating that we have scanned everything logically before it.
114 * If the pool is imported on a machine without the new sorting algorithm,
115 * the scan simply resumes from the last checkpoint using the legacy algorithm.
118 typedef int (scan_cb_t
)(dsl_pool_t
*, const blkptr_t
*,
119 const zbookmark_phys_t
*);
121 static scan_cb_t dsl_scan_scrub_cb
;
123 static int scan_ds_queue_compare(const void *a
, const void *b
);
124 static int scan_prefetch_queue_compare(const void *a
, const void *b
);
125 static void scan_ds_queue_clear(dsl_scan_t
*scn
);
126 static void scan_ds_prefetch_queue_clear(dsl_scan_t
*scn
);
127 static boolean_t
scan_ds_queue_contains(dsl_scan_t
*scn
, uint64_t dsobj
,
129 static void scan_ds_queue_insert(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t txg
);
130 static void scan_ds_queue_remove(dsl_scan_t
*scn
, uint64_t dsobj
);
131 static void scan_ds_queue_sync(dsl_scan_t
*scn
, dmu_tx_t
*tx
);
132 static uint64_t dsl_scan_count_data_disks(spa_t
*spa
);
133 static void read_by_block_level(dsl_scan_t
*scn
, zbookmark_phys_t zb
);
135 extern uint_t zfs_vdev_async_write_active_min_dirty_percent
;
136 static int zfs_scan_blkstats
= 0;
139 * 'zpool status' uses bytes processed per pass to report throughput and
140 * estimate time remaining. We define a pass to start when the scanning
141 * phase completes for a sequential resilver. Optionally, this value
142 * may be used to reset the pass statistics every N txgs to provide an
143 * estimated completion time based on currently observed performance.
145 static uint_t zfs_scan_report_txgs
= 0;
148 * By default zfs will check to ensure it is not over the hard memory
149 * limit before each txg. If finer-grained control of this is needed
150 * this value can be set to 1 to enable checking before scanning each
153 static int zfs_scan_strict_mem_lim
= B_FALSE
;
156 * Maximum number of parallelly executed bytes per leaf vdev. We attempt
157 * to strike a balance here between keeping the vdev queues full of I/Os
158 * at all times and not overflowing the queues to cause long latency,
159 * which would cause long txg sync times. No matter what, we will not
160 * overload the drives with I/O, since that is protected by
161 * zfs_vdev_scrub_max_active.
163 static uint64_t zfs_scan_vdev_limit
= 16 << 20;
165 static uint_t zfs_scan_issue_strategy
= 0;
167 /* don't queue & sort zios, go direct */
168 static int zfs_scan_legacy
= B_FALSE
;
169 static uint64_t zfs_scan_max_ext_gap
= 2 << 20; /* in bytes */
172 * fill_weight is non-tunable at runtime, so we copy it at module init from
173 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
174 * break queue sorting.
176 static uint_t zfs_scan_fill_weight
= 3;
177 static uint64_t fill_weight
;
179 /* See dsl_scan_should_clear() for details on the memory limit tunables */
180 static const uint64_t zfs_scan_mem_lim_min
= 16 << 20; /* bytes */
181 static const uint64_t zfs_scan_mem_lim_soft_max
= 128 << 20; /* bytes */
184 /* fraction of physmem */
185 static uint_t zfs_scan_mem_lim_fact
= 20;
187 /* fraction of mem lim above */
188 static uint_t zfs_scan_mem_lim_soft_fact
= 20;
190 /* minimum milliseconds to scrub per txg */
191 static uint_t zfs_scrub_min_time_ms
= 1000;
193 /* minimum milliseconds to obsolete per txg */
194 static uint_t zfs_obsolete_min_time_ms
= 500;
196 /* minimum milliseconds to free per txg */
197 static uint_t zfs_free_min_time_ms
= 1000;
199 /* minimum milliseconds to resilver per txg */
200 static uint_t zfs_resilver_min_time_ms
= 3000;
202 static uint_t zfs_scan_checkpoint_intval
= 7200; /* in seconds */
203 int zfs_scan_suspend_progress
= 0; /* set to prevent scans from progressing */
204 static int zfs_no_scrub_io
= B_FALSE
; /* set to disable scrub i/o */
205 static int zfs_no_scrub_prefetch
= B_FALSE
; /* set to disable scrub prefetch */
206 static const enum ddt_class zfs_scrub_ddt_class_max
= DDT_CLASS_DUPLICATE
;
207 /* max number of blocks to free in a single TXG */
208 static uint64_t zfs_async_block_max_blocks
= UINT64_MAX
;
209 /* max number of dedup blocks to free in a single TXG */
210 static uint64_t zfs_max_async_dedup_frees
= 100000;
212 /* set to disable resilver deferring */
213 static int zfs_resilver_disable_defer
= B_FALSE
;
216 * We wait a few txgs after importing a pool to begin scanning so that
217 * the import / mounting code isn't held up by scrub / resilver IO.
218 * Unfortunately, it is a bit difficult to determine exactly how long
219 * this will take since userspace will trigger fs mounts asynchronously
220 * and the kernel will create zvol minors asynchronously. As a result,
221 * the value provided here is a bit arbitrary, but represents a
222 * reasonable estimate of how many txgs it will take to finish fully
225 #define SCAN_IMPORT_WAIT_TXGS 5
227 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
228 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
229 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
232 * Enable/disable the processing of the free_bpobj object.
234 static int zfs_free_bpobj_enabled
= 1;
236 /* Error blocks to be scrubbed in one txg. */
237 unsigned long zfs_scrub_error_blocks_per_txg
= 1 << 12;
239 /* the order has to match pool_scan_type */
240 static scan_cb_t
*scan_funcs
[POOL_SCAN_FUNCS
] = {
242 dsl_scan_scrub_cb
, /* POOL_SCAN_SCRUB */
243 dsl_scan_scrub_cb
, /* POOL_SCAN_RESILVER */
246 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */
254 * This controls what conditions are placed on dsl_scan_sync_state():
255 * SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0
256 * SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0.
257 * SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise
258 * write out the scn_phys_cached version.
259 * See dsl_scan_sync_state for details.
268 * This struct represents the minimum information needed to reconstruct a
269 * zio for sequential scanning. This is useful because many of these will
270 * accumulate in the sequential IO queues before being issued, so saving
271 * memory matters here.
273 typedef struct scan_io
{
274 /* fields from blkptr_t */
275 uint64_t sio_blk_prop
;
276 uint64_t sio_phys_birth
;
278 zio_cksum_t sio_cksum
;
279 uint32_t sio_nr_dvas
;
281 /* fields from zio_t */
283 zbookmark_phys_t sio_zb
;
285 /* members for queue sorting */
287 avl_node_t sio_addr_node
; /* link into issuing queue */
288 list_node_t sio_list_node
; /* link for issuing to disk */
292 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
293 * depending on how many were in the original bp. Only the
294 * first DVA is really used for sorting and issuing purposes.
295 * The other DVAs (if provided) simply exist so that the zio
296 * layer can find additional copies to repair from in the
297 * event of an error. This array must go at the end of the
298 * struct to allow this for the variable number of elements.
303 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
304 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
305 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
306 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
307 #define SIO_GET_END_OFFSET(sio) \
308 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
309 #define SIO_GET_MUSED(sio) \
310 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
312 struct dsl_scan_io_queue
{
313 dsl_scan_t
*q_scn
; /* associated dsl_scan_t */
314 vdev_t
*q_vd
; /* top-level vdev that this queue represents */
315 zio_t
*q_zio
; /* scn_zio_root child for waiting on IO */
317 /* trees used for sorting I/Os and extents of I/Os */
318 range_tree_t
*q_exts_by_addr
;
319 zfs_btree_t q_exts_by_size
;
320 avl_tree_t q_sios_by_addr
;
321 uint64_t q_sio_memused
;
322 uint64_t q_last_ext_addr
;
324 /* members for zio rate limiting */
325 uint64_t q_maxinflight_bytes
;
326 uint64_t q_inflight_bytes
;
327 kcondvar_t q_zio_cv
; /* used under vd->vdev_scan_io_queue_lock */
329 /* per txg statistics */
330 uint64_t q_total_seg_size_this_txg
;
331 uint64_t q_segs_this_txg
;
332 uint64_t q_total_zio_size_this_txg
;
333 uint64_t q_zios_this_txg
;
336 /* private data for dsl_scan_prefetch_cb() */
337 typedef struct scan_prefetch_ctx
{
338 zfs_refcount_t spc_refcnt
; /* refcount for memory management */
339 dsl_scan_t
*spc_scn
; /* dsl_scan_t for the pool */
340 boolean_t spc_root
; /* is this prefetch for an objset? */
341 uint8_t spc_indblkshift
; /* dn_indblkshift of current dnode */
342 uint16_t spc_datablkszsec
; /* dn_idatablkszsec of current dnode */
343 } scan_prefetch_ctx_t
;
345 /* private data for dsl_scan_prefetch() */
346 typedef struct scan_prefetch_issue_ctx
{
347 avl_node_t spic_avl_node
; /* link into scn->scn_prefetch_queue */
348 scan_prefetch_ctx_t
*spic_spc
; /* spc for the callback */
349 blkptr_t spic_bp
; /* bp to prefetch */
350 zbookmark_phys_t spic_zb
; /* bookmark to prefetch */
351 } scan_prefetch_issue_ctx_t
;
353 static void scan_exec_io(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
354 const zbookmark_phys_t
*zb
, dsl_scan_io_queue_t
*queue
);
355 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t
*queue
,
358 static dsl_scan_io_queue_t
*scan_io_queue_create(vdev_t
*vd
);
359 static void scan_io_queues_destroy(dsl_scan_t
*scn
);
361 static kmem_cache_t
*sio_cache
[SPA_DVAS_PER_BP
];
363 /* sio->sio_nr_dvas must be set so we know which cache to free from */
365 sio_free(scan_io_t
*sio
)
367 ASSERT3U(sio
->sio_nr_dvas
, >, 0);
368 ASSERT3U(sio
->sio_nr_dvas
, <=, SPA_DVAS_PER_BP
);
370 kmem_cache_free(sio_cache
[sio
->sio_nr_dvas
- 1], sio
);
373 /* It is up to the caller to set sio->sio_nr_dvas for freeing */
375 sio_alloc(unsigned short nr_dvas
)
377 ASSERT3U(nr_dvas
, >, 0);
378 ASSERT3U(nr_dvas
, <=, SPA_DVAS_PER_BP
);
380 return (kmem_cache_alloc(sio_cache
[nr_dvas
- 1], KM_SLEEP
));
387 * This is used in ext_size_compare() to weight segments
388 * based on how sparse they are. This cannot be changed
389 * mid-scan and the tree comparison functions don't currently
390 * have a mechanism for passing additional context to the
391 * compare functions. Thus we store this value globally and
392 * we only allow it to be set at module initialization time
394 fill_weight
= zfs_scan_fill_weight
;
396 for (int i
= 0; i
< SPA_DVAS_PER_BP
; i
++) {
399 (void) snprintf(name
, sizeof (name
), "sio_cache_%d", i
);
400 sio_cache
[i
] = kmem_cache_create(name
,
401 (sizeof (scan_io_t
) + ((i
+ 1) * sizeof (dva_t
))),
402 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
409 for (int i
= 0; i
< SPA_DVAS_PER_BP
; i
++) {
410 kmem_cache_destroy(sio_cache
[i
]);
414 static inline boolean_t
415 dsl_scan_is_running(const dsl_scan_t
*scn
)
417 return (scn
->scn_phys
.scn_state
== DSS_SCANNING
);
421 dsl_scan_resilvering(dsl_pool_t
*dp
)
423 return (dsl_scan_is_running(dp
->dp_scan
) &&
424 dp
->dp_scan
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
);
428 sio2bp(const scan_io_t
*sio
, blkptr_t
*bp
)
430 memset(bp
, 0, sizeof (*bp
));
431 bp
->blk_prop
= sio
->sio_blk_prop
;
432 bp
->blk_phys_birth
= sio
->sio_phys_birth
;
433 bp
->blk_birth
= sio
->sio_birth
;
434 bp
->blk_fill
= 1; /* we always only work with data pointers */
435 bp
->blk_cksum
= sio
->sio_cksum
;
437 ASSERT3U(sio
->sio_nr_dvas
, >, 0);
438 ASSERT3U(sio
->sio_nr_dvas
, <=, SPA_DVAS_PER_BP
);
440 memcpy(bp
->blk_dva
, sio
->sio_dva
, sio
->sio_nr_dvas
* sizeof (dva_t
));
444 bp2sio(const blkptr_t
*bp
, scan_io_t
*sio
, int dva_i
)
446 sio
->sio_blk_prop
= bp
->blk_prop
;
447 sio
->sio_phys_birth
= bp
->blk_phys_birth
;
448 sio
->sio_birth
= bp
->blk_birth
;
449 sio
->sio_cksum
= bp
->blk_cksum
;
450 sio
->sio_nr_dvas
= BP_GET_NDVAS(bp
);
453 * Copy the DVAs to the sio. We need all copies of the block so
454 * that the self healing code can use the alternate copies if the
455 * first is corrupted. We want the DVA at index dva_i to be first
456 * in the sio since this is the primary one that we want to issue.
458 for (int i
= 0, j
= dva_i
; i
< sio
->sio_nr_dvas
; i
++, j
++) {
459 sio
->sio_dva
[i
] = bp
->blk_dva
[j
% sio
->sio_nr_dvas
];
464 dsl_scan_init(dsl_pool_t
*dp
, uint64_t txg
)
468 spa_t
*spa
= dp
->dp_spa
;
471 scn
= dp
->dp_scan
= kmem_zalloc(sizeof (dsl_scan_t
), KM_SLEEP
);
475 * It's possible that we're resuming a scan after a reboot so
476 * make sure that the scan_async_destroying flag is initialized
479 ASSERT(!scn
->scn_async_destroying
);
480 scn
->scn_async_destroying
= spa_feature_is_active(dp
->dp_spa
,
481 SPA_FEATURE_ASYNC_DESTROY
);
484 * Calculate the max number of in-flight bytes for pool-wide
485 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
486 * Limits for the issuing phase are done per top-level vdev and
487 * are handled separately.
489 scn
->scn_maxinflight_bytes
= MIN(arc_c_max
/ 4, MAX(1ULL << 20,
490 zfs_scan_vdev_limit
* dsl_scan_count_data_disks(spa
)));
492 avl_create(&scn
->scn_queue
, scan_ds_queue_compare
, sizeof (scan_ds_t
),
493 offsetof(scan_ds_t
, sds_node
));
494 avl_create(&scn
->scn_prefetch_queue
, scan_prefetch_queue_compare
,
495 sizeof (scan_prefetch_issue_ctx_t
),
496 offsetof(scan_prefetch_issue_ctx_t
, spic_avl_node
));
498 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
499 "scrub_func", sizeof (uint64_t), 1, &f
);
502 * There was an old-style scrub in progress. Restart a
503 * new-style scrub from the beginning.
505 scn
->scn_restart_txg
= txg
;
506 zfs_dbgmsg("old-style scrub was in progress for %s; "
507 "restarting new-style scrub in txg %llu",
509 (longlong_t
)scn
->scn_restart_txg
);
512 * Load the queue obj from the old location so that it
513 * can be freed by dsl_scan_done().
515 (void) zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
516 "scrub_queue", sizeof (uint64_t), 1,
517 &scn
->scn_phys
.scn_queue_obj
);
519 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
520 DMU_POOL_ERRORSCRUB
, sizeof (uint64_t),
521 ERRORSCRUB_PHYS_NUMINTS
, &scn
->errorscrub_phys
);
523 if (err
!= 0 && err
!= ENOENT
)
526 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
527 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
531 * Detect if the pool contains the signature of #2094. If it
532 * does properly update the scn->scn_phys structure and notify
533 * the administrator by setting an errata for the pool.
535 if (err
== EOVERFLOW
) {
536 uint64_t zaptmp
[SCAN_PHYS_NUMINTS
+ 1];
537 VERIFY3S(SCAN_PHYS_NUMINTS
, ==, 24);
538 VERIFY3S(offsetof(dsl_scan_phys_t
, scn_flags
), ==,
539 (23 * sizeof (uint64_t)));
541 err
= zap_lookup(dp
->dp_meta_objset
,
542 DMU_POOL_DIRECTORY_OBJECT
, DMU_POOL_SCAN
,
543 sizeof (uint64_t), SCAN_PHYS_NUMINTS
+ 1, &zaptmp
);
545 uint64_t overflow
= zaptmp
[SCAN_PHYS_NUMINTS
];
547 if (overflow
& ~DSL_SCAN_FLAGS_MASK
||
548 scn
->scn_async_destroying
) {
550 ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY
;
554 memcpy(&scn
->scn_phys
, zaptmp
,
555 SCAN_PHYS_NUMINTS
* sizeof (uint64_t));
556 scn
->scn_phys
.scn_flags
= overflow
;
558 /* Required scrub already in progress. */
559 if (scn
->scn_phys
.scn_state
== DSS_FINISHED
||
560 scn
->scn_phys
.scn_state
== DSS_CANCELED
)
562 ZPOOL_ERRATA_ZOL_2094_SCRUB
;
572 * We might be restarting after a reboot, so jump the issued
573 * counter to how far we've scanned. We know we're consistent
576 scn
->scn_issued_before_pass
= scn
->scn_phys
.scn_examined
;
578 if (dsl_scan_is_running(scn
) &&
579 spa_prev_software_version(dp
->dp_spa
) < SPA_VERSION_SCAN
) {
581 * A new-type scrub was in progress on an old
582 * pool, and the pool was accessed by old
583 * software. Restart from the beginning, since
584 * the old software may have changed the pool in
587 scn
->scn_restart_txg
= txg
;
588 zfs_dbgmsg("new-style scrub for %s was modified "
589 "by old software; restarting in txg %llu",
591 (longlong_t
)scn
->scn_restart_txg
);
592 } else if (dsl_scan_resilvering(dp
)) {
594 * If a resilver is in progress and there are already
595 * errors, restart it instead of finishing this scan and
596 * then restarting it. If there haven't been any errors
597 * then remember that the incore DTL is valid.
599 if (scn
->scn_phys
.scn_errors
> 0) {
600 scn
->scn_restart_txg
= txg
;
601 zfs_dbgmsg("resilver can't excise DTL_MISSING "
602 "when finished; restarting on %s in txg "
605 (u_longlong_t
)scn
->scn_restart_txg
);
607 /* it's safe to excise DTL when finished */
608 spa
->spa_scrub_started
= B_TRUE
;
613 memcpy(&scn
->scn_phys_cached
, &scn
->scn_phys
, sizeof (scn
->scn_phys
));
615 /* reload the queue into the in-core state */
616 if (scn
->scn_phys
.scn_queue_obj
!= 0) {
620 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
621 scn
->scn_phys
.scn_queue_obj
);
622 zap_cursor_retrieve(&zc
, &za
) == 0;
623 (void) zap_cursor_advance(&zc
)) {
624 scan_ds_queue_insert(scn
,
625 zfs_strtonum(za
.za_name
, NULL
),
626 za
.za_first_integer
);
628 zap_cursor_fini(&zc
);
631 spa_scan_stat_init(spa
);
632 vdev_scan_stat_init(spa
->spa_root_vdev
);
638 dsl_scan_fini(dsl_pool_t
*dp
)
640 if (dp
->dp_scan
!= NULL
) {
641 dsl_scan_t
*scn
= dp
->dp_scan
;
643 if (scn
->scn_taskq
!= NULL
)
644 taskq_destroy(scn
->scn_taskq
);
646 scan_ds_queue_clear(scn
);
647 avl_destroy(&scn
->scn_queue
);
648 scan_ds_prefetch_queue_clear(scn
);
649 avl_destroy(&scn
->scn_prefetch_queue
);
651 kmem_free(dp
->dp_scan
, sizeof (dsl_scan_t
));
657 dsl_scan_restarting(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
659 return (scn
->scn_restart_txg
!= 0 &&
660 scn
->scn_restart_txg
<= tx
->tx_txg
);
664 dsl_scan_resilver_scheduled(dsl_pool_t
*dp
)
666 return ((dp
->dp_scan
&& dp
->dp_scan
->scn_restart_txg
!= 0) ||
667 (spa_async_tasks(dp
->dp_spa
) & SPA_ASYNC_RESILVER
));
671 dsl_scan_scrubbing(const dsl_pool_t
*dp
)
673 dsl_scan_phys_t
*scn_phys
= &dp
->dp_scan
->scn_phys
;
675 return (scn_phys
->scn_state
== DSS_SCANNING
&&
676 scn_phys
->scn_func
== POOL_SCAN_SCRUB
);
680 dsl_errorscrubbing(const dsl_pool_t
*dp
)
682 dsl_errorscrub_phys_t
*errorscrub_phys
= &dp
->dp_scan
->errorscrub_phys
;
684 return (errorscrub_phys
->dep_state
== DSS_ERRORSCRUBBING
&&
685 errorscrub_phys
->dep_func
== POOL_SCAN_ERRORSCRUB
);
689 dsl_errorscrub_is_paused(const dsl_scan_t
*scn
)
691 return (dsl_errorscrubbing(scn
->scn_dp
) &&
692 scn
->errorscrub_phys
.dep_paused_flags
);
696 dsl_scan_is_paused_scrub(const dsl_scan_t
*scn
)
698 return (dsl_scan_scrubbing(scn
->scn_dp
) &&
699 scn
->scn_phys
.scn_flags
& DSF_SCRUB_PAUSED
);
703 dsl_errorscrub_sync_state(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
705 scn
->errorscrub_phys
.dep_cursor
=
706 zap_cursor_serialize(&scn
->errorscrub_cursor
);
708 VERIFY0(zap_update(scn
->scn_dp
->dp_meta_objset
,
709 DMU_POOL_DIRECTORY_OBJECT
,
710 DMU_POOL_ERRORSCRUB
, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS
,
711 &scn
->errorscrub_phys
, tx
));
715 dsl_errorscrub_setup_sync(void *arg
, dmu_tx_t
*tx
)
717 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
718 pool_scan_func_t
*funcp
= arg
;
719 dsl_pool_t
*dp
= scn
->scn_dp
;
720 spa_t
*spa
= dp
->dp_spa
;
722 ASSERT(!dsl_scan_is_running(scn
));
723 ASSERT(!dsl_errorscrubbing(scn
->scn_dp
));
724 ASSERT(*funcp
> POOL_SCAN_NONE
&& *funcp
< POOL_SCAN_FUNCS
);
726 memset(&scn
->errorscrub_phys
, 0, sizeof (scn
->errorscrub_phys
));
727 scn
->errorscrub_phys
.dep_func
= *funcp
;
728 scn
->errorscrub_phys
.dep_state
= DSS_ERRORSCRUBBING
;
729 scn
->errorscrub_phys
.dep_start_time
= gethrestime_sec();
730 scn
->errorscrub_phys
.dep_to_examine
= spa_get_last_errlog_size(spa
);
731 scn
->errorscrub_phys
.dep_examined
= 0;
732 scn
->errorscrub_phys
.dep_errors
= 0;
733 scn
->errorscrub_phys
.dep_cursor
= 0;
734 zap_cursor_init_serialized(&scn
->errorscrub_cursor
,
735 spa
->spa_meta_objset
, spa
->spa_errlog_last
,
736 scn
->errorscrub_phys
.dep_cursor
);
738 vdev_config_dirty(spa
->spa_root_vdev
);
739 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_ERRORSCRUB_START
);
741 dsl_errorscrub_sync_state(scn
, tx
);
743 spa_history_log_internal(spa
, "error scrub setup", tx
,
744 "func=%u mintxg=%u maxtxg=%llu",
745 *funcp
, 0, (u_longlong_t
)tx
->tx_txg
);
749 dsl_errorscrub_setup_check(void *arg
, dmu_tx_t
*tx
)
752 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
754 if (dsl_scan_is_running(scn
) || (dsl_errorscrubbing(scn
->scn_dp
))) {
755 return (SET_ERROR(EBUSY
));
758 if (spa_get_last_errlog_size(scn
->scn_dp
->dp_spa
) == 0) {
765 * Writes out a persistent dsl_scan_phys_t record to the pool directory.
766 * Because we can be running in the block sorting algorithm, we do not always
767 * want to write out the record, only when it is "safe" to do so. This safety
768 * condition is achieved by making sure that the sorting queues are empty
769 * (scn_queues_pending == 0). When this condition is not true, the sync'd state
770 * is inconsistent with how much actual scanning progress has been made. The
771 * kind of sync to be performed is specified by the sync_type argument. If the
772 * sync is optional, we only sync if the queues are empty. If the sync is
773 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The
774 * third possible state is a "cached" sync. This is done in response to:
775 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
776 * destroyed, so we wouldn't be able to restart scanning from it.
777 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
778 * superseded by a newer snapshot.
779 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
780 * swapped with its clone.
781 * In all cases, a cached sync simply rewrites the last record we've written,
782 * just slightly modified. For the modifications that are performed to the
783 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
784 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
787 dsl_scan_sync_state(dsl_scan_t
*scn
, dmu_tx_t
*tx
, state_sync_type_t sync_type
)
790 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
792 ASSERT(sync_type
!= SYNC_MANDATORY
|| scn
->scn_queues_pending
== 0);
793 if (scn
->scn_queues_pending
== 0) {
794 for (i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
795 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
796 dsl_scan_io_queue_t
*q
= vd
->vdev_scan_io_queue
;
801 mutex_enter(&vd
->vdev_scan_io_queue_lock
);
802 ASSERT3P(avl_first(&q
->q_sios_by_addr
), ==, NULL
);
803 ASSERT3P(zfs_btree_first(&q
->q_exts_by_size
, NULL
), ==,
805 ASSERT3P(range_tree_first(q
->q_exts_by_addr
), ==, NULL
);
806 mutex_exit(&vd
->vdev_scan_io_queue_lock
);
809 if (scn
->scn_phys
.scn_queue_obj
!= 0)
810 scan_ds_queue_sync(scn
, tx
);
811 VERIFY0(zap_update(scn
->scn_dp
->dp_meta_objset
,
812 DMU_POOL_DIRECTORY_OBJECT
,
813 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
814 &scn
->scn_phys
, tx
));
815 memcpy(&scn
->scn_phys_cached
, &scn
->scn_phys
,
816 sizeof (scn
->scn_phys
));
818 if (scn
->scn_checkpointing
)
819 zfs_dbgmsg("finish scan checkpoint for %s",
822 scn
->scn_checkpointing
= B_FALSE
;
823 scn
->scn_last_checkpoint
= ddi_get_lbolt();
824 } else if (sync_type
== SYNC_CACHED
) {
825 VERIFY0(zap_update(scn
->scn_dp
->dp_meta_objset
,
826 DMU_POOL_DIRECTORY_OBJECT
,
827 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
828 &scn
->scn_phys_cached
, tx
));
833 dsl_scan_setup_check(void *arg
, dmu_tx_t
*tx
)
836 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
837 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
839 if (dsl_scan_is_running(scn
) || vdev_rebuild_active(rvd
) ||
840 dsl_errorscrubbing(scn
->scn_dp
))
841 return (SET_ERROR(EBUSY
));
847 dsl_scan_setup_sync(void *arg
, dmu_tx_t
*tx
)
850 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
851 pool_scan_func_t
*funcp
= arg
;
852 dmu_object_type_t ot
= 0;
853 dsl_pool_t
*dp
= scn
->scn_dp
;
854 spa_t
*spa
= dp
->dp_spa
;
856 ASSERT(!dsl_scan_is_running(scn
));
857 ASSERT(*funcp
> POOL_SCAN_NONE
&& *funcp
< POOL_SCAN_FUNCS
);
858 memset(&scn
->scn_phys
, 0, sizeof (scn
->scn_phys
));
861 * If we are starting a fresh scrub, we erase the error scrub
862 * information from disk.
864 memset(&scn
->errorscrub_phys
, 0, sizeof (scn
->errorscrub_phys
));
865 dsl_errorscrub_sync_state(scn
, tx
);
867 scn
->scn_phys
.scn_func
= *funcp
;
868 scn
->scn_phys
.scn_state
= DSS_SCANNING
;
869 scn
->scn_phys
.scn_min_txg
= 0;
870 scn
->scn_phys
.scn_max_txg
= tx
->tx_txg
;
871 scn
->scn_phys
.scn_ddt_class_max
= DDT_CLASSES
- 1; /* the entire DDT */
872 scn
->scn_phys
.scn_start_time
= gethrestime_sec();
873 scn
->scn_phys
.scn_errors
= 0;
874 scn
->scn_phys
.scn_to_examine
= spa
->spa_root_vdev
->vdev_stat
.vs_alloc
;
875 scn
->scn_issued_before_pass
= 0;
876 scn
->scn_restart_txg
= 0;
877 scn
->scn_done_txg
= 0;
878 scn
->scn_last_checkpoint
= 0;
879 scn
->scn_checkpointing
= B_FALSE
;
880 spa_scan_stat_init(spa
);
881 vdev_scan_stat_init(spa
->spa_root_vdev
);
883 if (DSL_SCAN_IS_SCRUB_RESILVER(scn
)) {
884 scn
->scn_phys
.scn_ddt_class_max
= zfs_scrub_ddt_class_max
;
886 /* rewrite all disk labels */
887 vdev_config_dirty(spa
->spa_root_vdev
);
889 if (vdev_resilver_needed(spa
->spa_root_vdev
,
890 &scn
->scn_phys
.scn_min_txg
, &scn
->scn_phys
.scn_max_txg
)) {
891 nvlist_t
*aux
= fnvlist_alloc();
892 fnvlist_add_string(aux
, ZFS_EV_RESILVER_TYPE
,
894 spa_event_notify(spa
, NULL
, aux
,
895 ESC_ZFS_RESILVER_START
);
898 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_START
);
901 spa
->spa_scrub_started
= B_TRUE
;
903 * If this is an incremental scrub, limit the DDT scrub phase
904 * to just the auto-ditto class (for correctness); the rest
905 * of the scrub should go faster using top-down pruning.
907 if (scn
->scn_phys
.scn_min_txg
> TXG_INITIAL
)
908 scn
->scn_phys
.scn_ddt_class_max
= DDT_CLASS_DITTO
;
911 * When starting a resilver clear any existing rebuild state.
912 * This is required to prevent stale rebuild status from
913 * being reported when a rebuild is run, then a resilver and
914 * finally a scrub. In which case only the scrub status
915 * should be reported by 'zpool status'.
917 if (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) {
918 vdev_t
*rvd
= spa
->spa_root_vdev
;
919 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
920 vdev_t
*vd
= rvd
->vdev_child
[i
];
921 vdev_rebuild_clear_sync(
922 (void *)(uintptr_t)vd
->vdev_id
, tx
);
927 /* back to the generic stuff */
929 if (zfs_scan_blkstats
) {
930 if (dp
->dp_blkstats
== NULL
) {
932 vmem_alloc(sizeof (zfs_all_blkstats_t
), KM_SLEEP
);
934 memset(&dp
->dp_blkstats
->zab_type
, 0,
935 sizeof (dp
->dp_blkstats
->zab_type
));
937 if (dp
->dp_blkstats
) {
938 vmem_free(dp
->dp_blkstats
, sizeof (zfs_all_blkstats_t
));
939 dp
->dp_blkstats
= NULL
;
943 if (spa_version(spa
) < SPA_VERSION_DSL_SCRUB
)
944 ot
= DMU_OT_ZAP_OTHER
;
946 scn
->scn_phys
.scn_queue_obj
= zap_create(dp
->dp_meta_objset
,
947 ot
? ot
: DMU_OT_SCAN_QUEUE
, DMU_OT_NONE
, 0, tx
);
949 memcpy(&scn
->scn_phys_cached
, &scn
->scn_phys
, sizeof (scn
->scn_phys
));
951 dsl_scan_sync_state(scn
, tx
, SYNC_MANDATORY
);
953 spa_history_log_internal(spa
, "scan setup", tx
,
954 "func=%u mintxg=%llu maxtxg=%llu",
955 *funcp
, (u_longlong_t
)scn
->scn_phys
.scn_min_txg
,
956 (u_longlong_t
)scn
->scn_phys
.scn_max_txg
);
960 * Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub,
961 * error scrub or resilver. Can also be called to resume a paused scrub or
965 dsl_scan(dsl_pool_t
*dp
, pool_scan_func_t func
)
967 spa_t
*spa
= dp
->dp_spa
;
968 dsl_scan_t
*scn
= dp
->dp_scan
;
971 * Purge all vdev caches and probe all devices. We do this here
972 * rather than in sync context because this requires a writer lock
973 * on the spa_config lock, which we can't do from sync context. The
974 * spa_scrub_reopen flag indicates that vdev_open() should not
975 * attempt to start another scrub.
977 spa_vdev_state_enter(spa
, SCL_NONE
);
978 spa
->spa_scrub_reopen
= B_TRUE
;
979 vdev_reopen(spa
->spa_root_vdev
);
980 spa
->spa_scrub_reopen
= B_FALSE
;
981 (void) spa_vdev_state_exit(spa
, NULL
, 0);
983 if (func
== POOL_SCAN_RESILVER
) {
984 dsl_scan_restart_resilver(spa
->spa_dsl_pool
, 0);
988 if (func
== POOL_SCAN_ERRORSCRUB
) {
989 if (dsl_errorscrub_is_paused(dp
->dp_scan
)) {
991 * got error scrub start cmd, resume paused error scrub.
993 int err
= dsl_scrub_set_pause_resume(scn
->scn_dp
,
996 spa_event_notify(spa
, NULL
, NULL
,
997 ESC_ZFS_ERRORSCRUB_RESUME
);
1000 return (SET_ERROR(err
));
1003 return (dsl_sync_task(spa_name(dp
->dp_spa
),
1004 dsl_errorscrub_setup_check
, dsl_errorscrub_setup_sync
,
1005 &func
, 0, ZFS_SPACE_CHECK_RESERVED
));
1008 if (func
== POOL_SCAN_SCRUB
&& dsl_scan_is_paused_scrub(scn
)) {
1009 /* got scrub start cmd, resume paused scrub */
1010 int err
= dsl_scrub_set_pause_resume(scn
->scn_dp
,
1013 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_RESUME
);
1014 return (SET_ERROR(ECANCELED
));
1016 return (SET_ERROR(err
));
1019 return (dsl_sync_task(spa_name(spa
), dsl_scan_setup_check
,
1020 dsl_scan_setup_sync
, &func
, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED
));
1024 dsl_errorscrub_done(dsl_scan_t
*scn
, boolean_t complete
, dmu_tx_t
*tx
)
1026 dsl_pool_t
*dp
= scn
->scn_dp
;
1027 spa_t
*spa
= dp
->dp_spa
;
1030 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_ERRORSCRUB_FINISH
);
1031 spa_history_log_internal(spa
, "error scrub done", tx
,
1032 "errors=%llu", (u_longlong_t
)spa_approx_errlog_size(spa
));
1034 spa_history_log_internal(spa
, "error scrub canceled", tx
,
1035 "errors=%llu", (u_longlong_t
)spa_approx_errlog_size(spa
));
1038 scn
->errorscrub_phys
.dep_state
= complete
? DSS_FINISHED
: DSS_CANCELED
;
1039 spa
->spa_scrub_active
= B_FALSE
;
1040 spa_errlog_rotate(spa
);
1041 scn
->errorscrub_phys
.dep_end_time
= gethrestime_sec();
1042 zap_cursor_fini(&scn
->errorscrub_cursor
);
1044 if (spa
->spa_errata
== ZPOOL_ERRATA_ZOL_2094_SCRUB
)
1045 spa
->spa_errata
= 0;
1047 ASSERT(!dsl_errorscrubbing(scn
->scn_dp
));
1051 dsl_scan_done(dsl_scan_t
*scn
, boolean_t complete
, dmu_tx_t
*tx
)
1053 static const char *old_names
[] = {
1055 "scrub_ddt_bookmark",
1056 "scrub_ddt_class_max",
1065 dsl_pool_t
*dp
= scn
->scn_dp
;
1066 spa_t
*spa
= dp
->dp_spa
;
1069 /* Remove any remnants of an old-style scrub. */
1070 for (i
= 0; old_names
[i
]; i
++) {
1071 (void) zap_remove(dp
->dp_meta_objset
,
1072 DMU_POOL_DIRECTORY_OBJECT
, old_names
[i
], tx
);
1075 if (scn
->scn_phys
.scn_queue_obj
!= 0) {
1076 VERIFY0(dmu_object_free(dp
->dp_meta_objset
,
1077 scn
->scn_phys
.scn_queue_obj
, tx
));
1078 scn
->scn_phys
.scn_queue_obj
= 0;
1080 scan_ds_queue_clear(scn
);
1081 scan_ds_prefetch_queue_clear(scn
);
1083 scn
->scn_phys
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
1086 * If we were "restarted" from a stopped state, don't bother
1087 * with anything else.
1089 if (!dsl_scan_is_running(scn
)) {
1090 ASSERT(!scn
->scn_is_sorted
);
1094 if (scn
->scn_is_sorted
) {
1095 scan_io_queues_destroy(scn
);
1096 scn
->scn_is_sorted
= B_FALSE
;
1098 if (scn
->scn_taskq
!= NULL
) {
1099 taskq_destroy(scn
->scn_taskq
);
1100 scn
->scn_taskq
= NULL
;
1104 scn
->scn_phys
.scn_state
= complete
? DSS_FINISHED
: DSS_CANCELED
;
1106 spa_notify_waiters(spa
);
1108 if (dsl_scan_restarting(scn
, tx
))
1109 spa_history_log_internal(spa
, "scan aborted, restarting", tx
,
1110 "errors=%llu", (u_longlong_t
)spa_approx_errlog_size(spa
));
1112 spa_history_log_internal(spa
, "scan cancelled", tx
,
1113 "errors=%llu", (u_longlong_t
)spa_approx_errlog_size(spa
));
1115 spa_history_log_internal(spa
, "scan done", tx
,
1116 "errors=%llu", (u_longlong_t
)spa_approx_errlog_size(spa
));
1118 if (DSL_SCAN_IS_SCRUB_RESILVER(scn
)) {
1119 spa
->spa_scrub_active
= B_FALSE
;
1122 * If the scrub/resilver completed, update all DTLs to
1123 * reflect this. Whether it succeeded or not, vacate
1124 * all temporary scrub DTLs.
1126 * As the scrub does not currently support traversing
1127 * data that have been freed but are part of a checkpoint,
1128 * we don't mark the scrub as done in the DTLs as faults
1129 * may still exist in those vdevs.
1132 !spa_feature_is_active(spa
, SPA_FEATURE_POOL_CHECKPOINT
)) {
1133 vdev_dtl_reassess(spa
->spa_root_vdev
, tx
->tx_txg
,
1134 scn
->scn_phys
.scn_max_txg
, B_TRUE
, B_FALSE
);
1136 if (scn
->scn_phys
.scn_min_txg
) {
1137 nvlist_t
*aux
= fnvlist_alloc();
1138 fnvlist_add_string(aux
, ZFS_EV_RESILVER_TYPE
,
1140 spa_event_notify(spa
, NULL
, aux
,
1141 ESC_ZFS_RESILVER_FINISH
);
1144 spa_event_notify(spa
, NULL
, NULL
,
1145 ESC_ZFS_SCRUB_FINISH
);
1148 vdev_dtl_reassess(spa
->spa_root_vdev
, tx
->tx_txg
,
1149 0, B_TRUE
, B_FALSE
);
1151 spa_errlog_rotate(spa
);
1154 * Don't clear flag until after vdev_dtl_reassess to ensure that
1155 * DTL_MISSING will get updated when possible.
1157 spa
->spa_scrub_started
= B_FALSE
;
1160 * We may have finished replacing a device.
1161 * Let the async thread assess this and handle the detach.
1163 spa_async_request(spa
, SPA_ASYNC_RESILVER_DONE
);
1166 * Clear any resilver_deferred flags in the config.
1167 * If there are drives that need resilvering, kick
1168 * off an asynchronous request to start resilver.
1169 * vdev_clear_resilver_deferred() may update the config
1170 * before the resilver can restart. In the event of
1171 * a crash during this period, the spa loading code
1172 * will find the drives that need to be resilvered
1173 * and start the resilver then.
1175 if (spa_feature_is_enabled(spa
, SPA_FEATURE_RESILVER_DEFER
) &&
1176 vdev_clear_resilver_deferred(spa
->spa_root_vdev
, tx
)) {
1177 spa_history_log_internal(spa
,
1178 "starting deferred resilver", tx
, "errors=%llu",
1179 (u_longlong_t
)spa_approx_errlog_size(spa
));
1180 spa_async_request(spa
, SPA_ASYNC_RESILVER
);
1183 /* Clear recent error events (i.e. duplicate events tracking) */
1185 zfs_ereport_clear(spa
, NULL
);
1188 scn
->scn_phys
.scn_end_time
= gethrestime_sec();
1190 if (spa
->spa_errata
== ZPOOL_ERRATA_ZOL_2094_SCRUB
)
1191 spa
->spa_errata
= 0;
1193 ASSERT(!dsl_scan_is_running(scn
));
1197 dsl_errorscrub_pause_resume_check(void *arg
, dmu_tx_t
*tx
)
1199 pool_scrub_cmd_t
*cmd
= arg
;
1200 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1201 dsl_scan_t
*scn
= dp
->dp_scan
;
1203 if (*cmd
== POOL_SCRUB_PAUSE
) {
1205 * can't pause a error scrub when there is no in-progress
1208 if (!dsl_errorscrubbing(dp
))
1209 return (SET_ERROR(ENOENT
));
1211 /* can't pause a paused error scrub */
1212 if (dsl_errorscrub_is_paused(scn
))
1213 return (SET_ERROR(EBUSY
));
1214 } else if (*cmd
!= POOL_SCRUB_NORMAL
) {
1215 return (SET_ERROR(ENOTSUP
));
1222 dsl_errorscrub_pause_resume_sync(void *arg
, dmu_tx_t
*tx
)
1224 pool_scrub_cmd_t
*cmd
= arg
;
1225 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1226 spa_t
*spa
= dp
->dp_spa
;
1227 dsl_scan_t
*scn
= dp
->dp_scan
;
1229 if (*cmd
== POOL_SCRUB_PAUSE
) {
1230 spa
->spa_scan_pass_errorscrub_pause
= gethrestime_sec();
1231 scn
->errorscrub_phys
.dep_paused_flags
= B_TRUE
;
1232 dsl_errorscrub_sync_state(scn
, tx
);
1233 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_ERRORSCRUB_PAUSED
);
1235 ASSERT3U(*cmd
, ==, POOL_SCRUB_NORMAL
);
1236 if (dsl_errorscrub_is_paused(scn
)) {
1238 * We need to keep track of how much time we spend
1239 * paused per pass so that we can adjust the error scrub
1240 * rate shown in the output of 'zpool status'.
1242 spa
->spa_scan_pass_errorscrub_spent_paused
+=
1244 spa
->spa_scan_pass_errorscrub_pause
;
1246 spa
->spa_scan_pass_errorscrub_pause
= 0;
1247 scn
->errorscrub_phys
.dep_paused_flags
= B_FALSE
;
1249 zap_cursor_init_serialized(
1250 &scn
->errorscrub_cursor
,
1251 spa
->spa_meta_objset
, spa
->spa_errlog_last
,
1252 scn
->errorscrub_phys
.dep_cursor
);
1254 dsl_errorscrub_sync_state(scn
, tx
);
1260 dsl_errorscrub_cancel_check(void *arg
, dmu_tx_t
*tx
)
1263 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
1264 /* can't cancel a error scrub when there is no one in-progress */
1265 if (!dsl_errorscrubbing(scn
->scn_dp
))
1266 return (SET_ERROR(ENOENT
));
1271 dsl_errorscrub_cancel_sync(void *arg
, dmu_tx_t
*tx
)
1274 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
1276 dsl_errorscrub_done(scn
, B_FALSE
, tx
);
1277 dsl_errorscrub_sync_state(scn
, tx
);
1278 spa_event_notify(scn
->scn_dp
->dp_spa
, NULL
, NULL
,
1279 ESC_ZFS_ERRORSCRUB_ABORT
);
1283 dsl_scan_cancel_check(void *arg
, dmu_tx_t
*tx
)
1286 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
1288 if (!dsl_scan_is_running(scn
))
1289 return (SET_ERROR(ENOENT
));
1294 dsl_scan_cancel_sync(void *arg
, dmu_tx_t
*tx
)
1297 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
1299 dsl_scan_done(scn
, B_FALSE
, tx
);
1300 dsl_scan_sync_state(scn
, tx
, SYNC_MANDATORY
);
1301 spa_event_notify(scn
->scn_dp
->dp_spa
, NULL
, NULL
, ESC_ZFS_SCRUB_ABORT
);
1305 dsl_scan_cancel(dsl_pool_t
*dp
)
1307 if (dsl_errorscrubbing(dp
)) {
1308 return (dsl_sync_task(spa_name(dp
->dp_spa
),
1309 dsl_errorscrub_cancel_check
, dsl_errorscrub_cancel_sync
,
1310 NULL
, 3, ZFS_SPACE_CHECK_RESERVED
));
1312 return (dsl_sync_task(spa_name(dp
->dp_spa
), dsl_scan_cancel_check
,
1313 dsl_scan_cancel_sync
, NULL
, 3, ZFS_SPACE_CHECK_RESERVED
));
1317 dsl_scrub_pause_resume_check(void *arg
, dmu_tx_t
*tx
)
1319 pool_scrub_cmd_t
*cmd
= arg
;
1320 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1321 dsl_scan_t
*scn
= dp
->dp_scan
;
1323 if (*cmd
== POOL_SCRUB_PAUSE
) {
1324 /* can't pause a scrub when there is no in-progress scrub */
1325 if (!dsl_scan_scrubbing(dp
))
1326 return (SET_ERROR(ENOENT
));
1328 /* can't pause a paused scrub */
1329 if (dsl_scan_is_paused_scrub(scn
))
1330 return (SET_ERROR(EBUSY
));
1331 } else if (*cmd
!= POOL_SCRUB_NORMAL
) {
1332 return (SET_ERROR(ENOTSUP
));
1339 dsl_scrub_pause_resume_sync(void *arg
, dmu_tx_t
*tx
)
1341 pool_scrub_cmd_t
*cmd
= arg
;
1342 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1343 spa_t
*spa
= dp
->dp_spa
;
1344 dsl_scan_t
*scn
= dp
->dp_scan
;
1346 if (*cmd
== POOL_SCRUB_PAUSE
) {
1347 /* can't pause a scrub when there is no in-progress scrub */
1348 spa
->spa_scan_pass_scrub_pause
= gethrestime_sec();
1349 scn
->scn_phys
.scn_flags
|= DSF_SCRUB_PAUSED
;
1350 scn
->scn_phys_cached
.scn_flags
|= DSF_SCRUB_PAUSED
;
1351 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
1352 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_PAUSED
);
1353 spa_notify_waiters(spa
);
1355 ASSERT3U(*cmd
, ==, POOL_SCRUB_NORMAL
);
1356 if (dsl_scan_is_paused_scrub(scn
)) {
1358 * We need to keep track of how much time we spend
1359 * paused per pass so that we can adjust the scrub rate
1360 * shown in the output of 'zpool status'
1362 spa
->spa_scan_pass_scrub_spent_paused
+=
1363 gethrestime_sec() - spa
->spa_scan_pass_scrub_pause
;
1364 spa
->spa_scan_pass_scrub_pause
= 0;
1365 scn
->scn_phys
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
1366 scn
->scn_phys_cached
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
1367 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
1373 * Set scrub pause/resume state if it makes sense to do so
1376 dsl_scrub_set_pause_resume(const dsl_pool_t
*dp
, pool_scrub_cmd_t cmd
)
1378 if (dsl_errorscrubbing(dp
)) {
1379 return (dsl_sync_task(spa_name(dp
->dp_spa
),
1380 dsl_errorscrub_pause_resume_check
,
1381 dsl_errorscrub_pause_resume_sync
, &cmd
, 3,
1382 ZFS_SPACE_CHECK_RESERVED
));
1384 return (dsl_sync_task(spa_name(dp
->dp_spa
),
1385 dsl_scrub_pause_resume_check
, dsl_scrub_pause_resume_sync
, &cmd
, 3,
1386 ZFS_SPACE_CHECK_RESERVED
));
1390 /* start a new scan, or restart an existing one. */
1392 dsl_scan_restart_resilver(dsl_pool_t
*dp
, uint64_t txg
)
1396 tx
= dmu_tx_create_dd(dp
->dp_mos_dir
);
1397 VERIFY(0 == dmu_tx_assign(tx
, TXG_WAIT
));
1399 txg
= dmu_tx_get_txg(tx
);
1400 dp
->dp_scan
->scn_restart_txg
= txg
;
1403 dp
->dp_scan
->scn_restart_txg
= txg
;
1405 zfs_dbgmsg("restarting resilver for %s at txg=%llu",
1406 dp
->dp_spa
->spa_name
, (longlong_t
)txg
);
1410 dsl_free(dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bp
)
1412 zio_free(dp
->dp_spa
, txg
, bp
);
1416 dsl_free_sync(zio_t
*pio
, dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bpp
)
1418 ASSERT(dsl_pool_sync_context(dp
));
1419 zio_nowait(zio_free_sync(pio
, dp
->dp_spa
, txg
, bpp
, pio
->io_flags
));
1423 scan_ds_queue_compare(const void *a
, const void *b
)
1425 const scan_ds_t
*sds_a
= a
, *sds_b
= b
;
1427 if (sds_a
->sds_dsobj
< sds_b
->sds_dsobj
)
1429 if (sds_a
->sds_dsobj
== sds_b
->sds_dsobj
)
1435 scan_ds_queue_clear(dsl_scan_t
*scn
)
1437 void *cookie
= NULL
;
1439 while ((sds
= avl_destroy_nodes(&scn
->scn_queue
, &cookie
)) != NULL
) {
1440 kmem_free(sds
, sizeof (*sds
));
1445 scan_ds_queue_contains(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t *txg
)
1447 scan_ds_t srch
, *sds
;
1449 srch
.sds_dsobj
= dsobj
;
1450 sds
= avl_find(&scn
->scn_queue
, &srch
, NULL
);
1451 if (sds
!= NULL
&& txg
!= NULL
)
1452 *txg
= sds
->sds_txg
;
1453 return (sds
!= NULL
);
1457 scan_ds_queue_insert(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t txg
)
1462 sds
= kmem_zalloc(sizeof (*sds
), KM_SLEEP
);
1463 sds
->sds_dsobj
= dsobj
;
1466 VERIFY3P(avl_find(&scn
->scn_queue
, sds
, &where
), ==, NULL
);
1467 avl_insert(&scn
->scn_queue
, sds
, where
);
1471 scan_ds_queue_remove(dsl_scan_t
*scn
, uint64_t dsobj
)
1473 scan_ds_t srch
, *sds
;
1475 srch
.sds_dsobj
= dsobj
;
1477 sds
= avl_find(&scn
->scn_queue
, &srch
, NULL
);
1478 VERIFY(sds
!= NULL
);
1479 avl_remove(&scn
->scn_queue
, sds
);
1480 kmem_free(sds
, sizeof (*sds
));
1484 scan_ds_queue_sync(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
1486 dsl_pool_t
*dp
= scn
->scn_dp
;
1487 spa_t
*spa
= dp
->dp_spa
;
1488 dmu_object_type_t ot
= (spa_version(spa
) >= SPA_VERSION_DSL_SCRUB
) ?
1489 DMU_OT_SCAN_QUEUE
: DMU_OT_ZAP_OTHER
;
1491 ASSERT0(scn
->scn_queues_pending
);
1492 ASSERT(scn
->scn_phys
.scn_queue_obj
!= 0);
1494 VERIFY0(dmu_object_free(dp
->dp_meta_objset
,
1495 scn
->scn_phys
.scn_queue_obj
, tx
));
1496 scn
->scn_phys
.scn_queue_obj
= zap_create(dp
->dp_meta_objset
, ot
,
1497 DMU_OT_NONE
, 0, tx
);
1498 for (scan_ds_t
*sds
= avl_first(&scn
->scn_queue
);
1499 sds
!= NULL
; sds
= AVL_NEXT(&scn
->scn_queue
, sds
)) {
1500 VERIFY0(zap_add_int_key(dp
->dp_meta_objset
,
1501 scn
->scn_phys
.scn_queue_obj
, sds
->sds_dsobj
,
1507 * Computes the memory limit state that we're currently in. A sorted scan
1508 * needs quite a bit of memory to hold the sorting queue, so we need to
1509 * reasonably constrain the size so it doesn't impact overall system
1510 * performance. We compute two limits:
1511 * 1) Hard memory limit: if the amount of memory used by the sorting
1512 * queues on a pool gets above this value, we stop the metadata
1513 * scanning portion and start issuing the queued up and sorted
1514 * I/Os to reduce memory usage.
1515 * This limit is calculated as a fraction of physmem (by default 5%).
1516 * We constrain the lower bound of the hard limit to an absolute
1517 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
1518 * the upper bound to 5% of the total pool size - no chance we'll
1519 * ever need that much memory, but just to keep the value in check.
1520 * 2) Soft memory limit: once we hit the hard memory limit, we start
1521 * issuing I/O to reduce queue memory usage, but we don't want to
1522 * completely empty out the queues, since we might be able to find I/Os
1523 * that will fill in the gaps of our non-sequential IOs at some point
1524 * in the future. So we stop the issuing of I/Os once the amount of
1525 * memory used drops below the soft limit (at which point we stop issuing
1526 * I/O and start scanning metadata again).
1528 * This limit is calculated by subtracting a fraction of the hard
1529 * limit from the hard limit. By default this fraction is 5%, so
1530 * the soft limit is 95% of the hard limit. We cap the size of the
1531 * difference between the hard and soft limits at an absolute
1532 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
1533 * sufficient to not cause too frequent switching between the
1534 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
1535 * worth of queues is about 1.2 GiB of on-pool data, so scanning
1536 * that should take at least a decent fraction of a second).
1539 dsl_scan_should_clear(dsl_scan_t
*scn
)
1541 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1542 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
1543 uint64_t alloc
, mlim_hard
, mlim_soft
, mused
;
1545 alloc
= metaslab_class_get_alloc(spa_normal_class(spa
));
1546 alloc
+= metaslab_class_get_alloc(spa_special_class(spa
));
1547 alloc
+= metaslab_class_get_alloc(spa_dedup_class(spa
));
1549 mlim_hard
= MAX((physmem
/ zfs_scan_mem_lim_fact
) * PAGESIZE
,
1550 zfs_scan_mem_lim_min
);
1551 mlim_hard
= MIN(mlim_hard
, alloc
/ 20);
1552 mlim_soft
= mlim_hard
- MIN(mlim_hard
/ zfs_scan_mem_lim_soft_fact
,
1553 zfs_scan_mem_lim_soft_max
);
1555 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
1556 vdev_t
*tvd
= rvd
->vdev_child
[i
];
1557 dsl_scan_io_queue_t
*queue
;
1559 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
1560 queue
= tvd
->vdev_scan_io_queue
;
1561 if (queue
!= NULL
) {
1563 * # of extents in exts_by_addr = # in exts_by_size.
1564 * B-tree efficiency is ~75%, but can be as low as 50%.
1566 mused
+= zfs_btree_numnodes(&queue
->q_exts_by_size
) *
1567 ((sizeof (range_seg_gap_t
) + sizeof (uint64_t)) *
1568 3 / 2) + queue
->q_sio_memused
;
1570 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
1573 dprintf("current scan memory usage: %llu bytes\n", (longlong_t
)mused
);
1576 ASSERT0(scn
->scn_queues_pending
);
1579 * If we are above our hard limit, we need to clear out memory.
1580 * If we are below our soft limit, we need to accumulate sequential IOs.
1581 * Otherwise, we should keep doing whatever we are currently doing.
1583 if (mused
>= mlim_hard
)
1585 else if (mused
< mlim_soft
)
1588 return (scn
->scn_clearing
);
1592 dsl_scan_check_suspend(dsl_scan_t
*scn
, const zbookmark_phys_t
*zb
)
1594 /* we never skip user/group accounting objects */
1595 if (zb
&& (int64_t)zb
->zb_object
< 0)
1598 if (scn
->scn_suspending
)
1599 return (B_TRUE
); /* we're already suspending */
1601 if (!ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
))
1602 return (B_FALSE
); /* we're resuming */
1604 /* We only know how to resume from level-0 and objset blocks. */
1605 if (zb
&& (zb
->zb_level
!= 0 && zb
->zb_level
!= ZB_ROOT_LEVEL
))
1610 * - we have scanned for at least the minimum time (default 1 sec
1611 * for scrub, 3 sec for resilver), and either we have sufficient
1612 * dirty data that we are starting to write more quickly
1613 * (default 30%), someone is explicitly waiting for this txg
1614 * to complete, or we have used up all of the time in the txg
1615 * timeout (default 5 sec).
1617 * - the spa is shutting down because this pool is being exported
1618 * or the machine is rebooting.
1620 * - the scan queue has reached its memory use limit
1622 uint64_t curr_time_ns
= gethrtime();
1623 uint64_t scan_time_ns
= curr_time_ns
- scn
->scn_sync_start_time
;
1624 uint64_t sync_time_ns
= curr_time_ns
-
1625 scn
->scn_dp
->dp_spa
->spa_sync_starttime
;
1626 uint64_t dirty_min_bytes
= zfs_dirty_data_max
*
1627 zfs_vdev_async_write_active_min_dirty_percent
/ 100;
1628 uint_t mintime
= (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) ?
1629 zfs_resilver_min_time_ms
: zfs_scrub_min_time_ms
;
1631 if ((NSEC2MSEC(scan_time_ns
) > mintime
&&
1632 (scn
->scn_dp
->dp_dirty_total
>= dirty_min_bytes
||
1633 txg_sync_waiting(scn
->scn_dp
) ||
1634 NSEC2SEC(sync_time_ns
) >= zfs_txg_timeout
)) ||
1635 spa_shutting_down(scn
->scn_dp
->dp_spa
) ||
1636 (zfs_scan_strict_mem_lim
&& dsl_scan_should_clear(scn
))) {
1637 if (zb
&& zb
->zb_level
== ZB_ROOT_LEVEL
) {
1638 dprintf("suspending at first available bookmark "
1639 "%llx/%llx/%llx/%llx\n",
1640 (longlong_t
)zb
->zb_objset
,
1641 (longlong_t
)zb
->zb_object
,
1642 (longlong_t
)zb
->zb_level
,
1643 (longlong_t
)zb
->zb_blkid
);
1644 SET_BOOKMARK(&scn
->scn_phys
.scn_bookmark
,
1645 zb
->zb_objset
, 0, 0, 0);
1646 } else if (zb
!= NULL
) {
1647 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
1648 (longlong_t
)zb
->zb_objset
,
1649 (longlong_t
)zb
->zb_object
,
1650 (longlong_t
)zb
->zb_level
,
1651 (longlong_t
)zb
->zb_blkid
);
1652 scn
->scn_phys
.scn_bookmark
= *zb
;
1655 dsl_scan_phys_t
*scnp
= &scn
->scn_phys
;
1656 dprintf("suspending at at DDT bookmark "
1657 "%llx/%llx/%llx/%llx\n",
1658 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_class
,
1659 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_type
,
1660 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_checksum
,
1661 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_cursor
);
1664 scn
->scn_suspending
= B_TRUE
;
1671 dsl_error_scrub_check_suspend(dsl_scan_t
*scn
, const zbookmark_phys_t
*zb
)
1675 * - we have scrubbed for at least the minimum time (default 1 sec
1676 * for error scrub), someone is explicitly waiting for this txg
1677 * to complete, or we have used up all of the time in the txg
1678 * timeout (default 5 sec).
1680 * - the spa is shutting down because this pool is being exported
1681 * or the machine is rebooting.
1683 uint64_t curr_time_ns
= gethrtime();
1684 uint64_t error_scrub_time_ns
= curr_time_ns
- scn
->scn_sync_start_time
;
1685 uint64_t sync_time_ns
= curr_time_ns
-
1686 scn
->scn_dp
->dp_spa
->spa_sync_starttime
;
1687 int mintime
= zfs_scrub_min_time_ms
;
1689 if ((NSEC2MSEC(error_scrub_time_ns
) > mintime
&&
1690 (txg_sync_waiting(scn
->scn_dp
) ||
1691 NSEC2SEC(sync_time_ns
) >= zfs_txg_timeout
)) ||
1692 spa_shutting_down(scn
->scn_dp
->dp_spa
)) {
1694 dprintf("error scrub suspending at bookmark "
1695 "%llx/%llx/%llx/%llx\n",
1696 (longlong_t
)zb
->zb_objset
,
1697 (longlong_t
)zb
->zb_object
,
1698 (longlong_t
)zb
->zb_level
,
1699 (longlong_t
)zb
->zb_blkid
);
1706 typedef struct zil_scan_arg
{
1708 zil_header_t
*zsa_zh
;
1712 dsl_scan_zil_block(zilog_t
*zilog
, const blkptr_t
*bp
, void *arg
,
1716 zil_scan_arg_t
*zsa
= arg
;
1717 dsl_pool_t
*dp
= zsa
->zsa_dp
;
1718 dsl_scan_t
*scn
= dp
->dp_scan
;
1719 zil_header_t
*zh
= zsa
->zsa_zh
;
1720 zbookmark_phys_t zb
;
1722 ASSERT(!BP_IS_REDACTED(bp
));
1723 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
)
1727 * One block ("stubby") can be allocated a long time ago; we
1728 * want to visit that one because it has been allocated
1729 * (on-disk) even if it hasn't been claimed (even though for
1730 * scrub there's nothing to do to it).
1732 if (claim_txg
== 0 && bp
->blk_birth
>= spa_min_claim_txg(dp
->dp_spa
))
1735 SET_BOOKMARK(&zb
, zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
1736 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
, bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
1738 VERIFY(0 == scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, &zb
));
1743 dsl_scan_zil_record(zilog_t
*zilog
, const lr_t
*lrc
, void *arg
,
1747 if (lrc
->lrc_txtype
== TX_WRITE
) {
1748 zil_scan_arg_t
*zsa
= arg
;
1749 dsl_pool_t
*dp
= zsa
->zsa_dp
;
1750 dsl_scan_t
*scn
= dp
->dp_scan
;
1751 zil_header_t
*zh
= zsa
->zsa_zh
;
1752 const lr_write_t
*lr
= (const lr_write_t
*)lrc
;
1753 const blkptr_t
*bp
= &lr
->lr_blkptr
;
1754 zbookmark_phys_t zb
;
1756 ASSERT(!BP_IS_REDACTED(bp
));
1757 if (BP_IS_HOLE(bp
) ||
1758 bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
)
1762 * birth can be < claim_txg if this record's txg is
1763 * already txg sync'ed (but this log block contains
1764 * other records that are not synced)
1766 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
1769 ASSERT3U(BP_GET_LSIZE(bp
), !=, 0);
1770 SET_BOOKMARK(&zb
, zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
1771 lr
->lr_foid
, ZB_ZIL_LEVEL
,
1772 lr
->lr_offset
/ BP_GET_LSIZE(bp
));
1774 VERIFY(0 == scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, &zb
));
1780 dsl_scan_zil(dsl_pool_t
*dp
, zil_header_t
*zh
)
1782 uint64_t claim_txg
= zh
->zh_claim_txg
;
1783 zil_scan_arg_t zsa
= { dp
, zh
};
1786 ASSERT(spa_writeable(dp
->dp_spa
));
1789 * We only want to visit blocks that have been claimed but not yet
1790 * replayed (or, in read-only mode, blocks that *would* be claimed).
1795 zilog
= zil_alloc(dp
->dp_meta_objset
, zh
);
1797 (void) zil_parse(zilog
, dsl_scan_zil_block
, dsl_scan_zil_record
, &zsa
,
1798 claim_txg
, B_FALSE
);
1804 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
1805 * here is to sort the AVL tree by the order each block will be needed.
1808 scan_prefetch_queue_compare(const void *a
, const void *b
)
1810 const scan_prefetch_issue_ctx_t
*spic_a
= a
, *spic_b
= b
;
1811 const scan_prefetch_ctx_t
*spc_a
= spic_a
->spic_spc
;
1812 const scan_prefetch_ctx_t
*spc_b
= spic_b
->spic_spc
;
1814 return (zbookmark_compare(spc_a
->spc_datablkszsec
,
1815 spc_a
->spc_indblkshift
, spc_b
->spc_datablkszsec
,
1816 spc_b
->spc_indblkshift
, &spic_a
->spic_zb
, &spic_b
->spic_zb
));
1820 scan_prefetch_ctx_rele(scan_prefetch_ctx_t
*spc
, const void *tag
)
1822 if (zfs_refcount_remove(&spc
->spc_refcnt
, tag
) == 0) {
1823 zfs_refcount_destroy(&spc
->spc_refcnt
);
1824 kmem_free(spc
, sizeof (scan_prefetch_ctx_t
));
1828 static scan_prefetch_ctx_t
*
1829 scan_prefetch_ctx_create(dsl_scan_t
*scn
, dnode_phys_t
*dnp
, const void *tag
)
1831 scan_prefetch_ctx_t
*spc
;
1833 spc
= kmem_alloc(sizeof (scan_prefetch_ctx_t
), KM_SLEEP
);
1834 zfs_refcount_create(&spc
->spc_refcnt
);
1835 zfs_refcount_add(&spc
->spc_refcnt
, tag
);
1838 spc
->spc_datablkszsec
= dnp
->dn_datablkszsec
;
1839 spc
->spc_indblkshift
= dnp
->dn_indblkshift
;
1840 spc
->spc_root
= B_FALSE
;
1842 spc
->spc_datablkszsec
= 0;
1843 spc
->spc_indblkshift
= 0;
1844 spc
->spc_root
= B_TRUE
;
1851 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t
*spc
, const void *tag
)
1853 zfs_refcount_add(&spc
->spc_refcnt
, tag
);
1857 scan_ds_prefetch_queue_clear(dsl_scan_t
*scn
)
1859 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1860 void *cookie
= NULL
;
1861 scan_prefetch_issue_ctx_t
*spic
= NULL
;
1863 mutex_enter(&spa
->spa_scrub_lock
);
1864 while ((spic
= avl_destroy_nodes(&scn
->scn_prefetch_queue
,
1865 &cookie
)) != NULL
) {
1866 scan_prefetch_ctx_rele(spic
->spic_spc
, scn
);
1867 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1869 mutex_exit(&spa
->spa_scrub_lock
);
1873 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t
*spc
,
1874 const zbookmark_phys_t
*zb
)
1876 zbookmark_phys_t
*last_zb
= &spc
->spc_scn
->scn_prefetch_bookmark
;
1877 dnode_phys_t tmp_dnp
;
1878 dnode_phys_t
*dnp
= (spc
->spc_root
) ? NULL
: &tmp_dnp
;
1880 if (zb
->zb_objset
!= last_zb
->zb_objset
)
1882 if ((int64_t)zb
->zb_object
< 0)
1885 tmp_dnp
.dn_datablkszsec
= spc
->spc_datablkszsec
;
1886 tmp_dnp
.dn_indblkshift
= spc
->spc_indblkshift
;
1888 if (zbookmark_subtree_completed(dnp
, zb
, last_zb
))
1895 dsl_scan_prefetch(scan_prefetch_ctx_t
*spc
, blkptr_t
*bp
, zbookmark_phys_t
*zb
)
1898 dsl_scan_t
*scn
= spc
->spc_scn
;
1899 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1900 scan_prefetch_issue_ctx_t
*spic
;
1902 if (zfs_no_scrub_prefetch
|| BP_IS_REDACTED(bp
))
1905 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
||
1906 (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_DNODE
&&
1907 BP_GET_TYPE(bp
) != DMU_OT_OBJSET
))
1910 if (dsl_scan_check_prefetch_resume(spc
, zb
))
1913 scan_prefetch_ctx_add_ref(spc
, scn
);
1914 spic
= kmem_alloc(sizeof (scan_prefetch_issue_ctx_t
), KM_SLEEP
);
1915 spic
->spic_spc
= spc
;
1916 spic
->spic_bp
= *bp
;
1917 spic
->spic_zb
= *zb
;
1920 * Add the IO to the queue of blocks to prefetch. This allows us to
1921 * prioritize blocks that we will need first for the main traversal
1924 mutex_enter(&spa
->spa_scrub_lock
);
1925 if (avl_find(&scn
->scn_prefetch_queue
, spic
, &idx
) != NULL
) {
1926 /* this block is already queued for prefetch */
1927 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1928 scan_prefetch_ctx_rele(spc
, scn
);
1929 mutex_exit(&spa
->spa_scrub_lock
);
1933 avl_insert(&scn
->scn_prefetch_queue
, spic
, idx
);
1934 cv_broadcast(&spa
->spa_scrub_io_cv
);
1935 mutex_exit(&spa
->spa_scrub_lock
);
1939 dsl_scan_prefetch_dnode(dsl_scan_t
*scn
, dnode_phys_t
*dnp
,
1940 uint64_t objset
, uint64_t object
)
1943 zbookmark_phys_t zb
;
1944 scan_prefetch_ctx_t
*spc
;
1946 if (dnp
->dn_nblkptr
== 0 && !(dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
))
1949 SET_BOOKMARK(&zb
, objset
, object
, 0, 0);
1951 spc
= scan_prefetch_ctx_create(scn
, dnp
, FTAG
);
1953 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
1954 zb
.zb_level
= BP_GET_LEVEL(&dnp
->dn_blkptr
[i
]);
1956 dsl_scan_prefetch(spc
, &dnp
->dn_blkptr
[i
], &zb
);
1959 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1961 zb
.zb_blkid
= DMU_SPILL_BLKID
;
1962 dsl_scan_prefetch(spc
, DN_SPILL_BLKPTR(dnp
), &zb
);
1965 scan_prefetch_ctx_rele(spc
, FTAG
);
1969 dsl_scan_prefetch_cb(zio_t
*zio
, const zbookmark_phys_t
*zb
, const blkptr_t
*bp
,
1970 arc_buf_t
*buf
, void *private)
1973 scan_prefetch_ctx_t
*spc
= private;
1974 dsl_scan_t
*scn
= spc
->spc_scn
;
1975 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1977 /* broadcast that the IO has completed for rate limiting purposes */
1978 mutex_enter(&spa
->spa_scrub_lock
);
1979 ASSERT3U(spa
->spa_scrub_inflight
, >=, BP_GET_PSIZE(bp
));
1980 spa
->spa_scrub_inflight
-= BP_GET_PSIZE(bp
);
1981 cv_broadcast(&spa
->spa_scrub_io_cv
);
1982 mutex_exit(&spa
->spa_scrub_lock
);
1984 /* if there was an error or we are done prefetching, just cleanup */
1985 if (buf
== NULL
|| scn
->scn_prefetch_stop
)
1988 if (BP_GET_LEVEL(bp
) > 0) {
1991 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
1992 zbookmark_phys_t czb
;
1994 for (i
= 0, cbp
= buf
->b_data
; i
< epb
; i
++, cbp
++) {
1995 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
1996 zb
->zb_level
- 1, zb
->zb_blkid
* epb
+ i
);
1997 dsl_scan_prefetch(spc
, cbp
, &czb
);
1999 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
2002 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
2004 for (i
= 0, cdnp
= buf
->b_data
; i
< epb
;
2005 i
+= cdnp
->dn_extra_slots
+ 1,
2006 cdnp
+= cdnp
->dn_extra_slots
+ 1) {
2007 dsl_scan_prefetch_dnode(scn
, cdnp
,
2008 zb
->zb_objset
, zb
->zb_blkid
* epb
+ i
);
2010 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
2011 objset_phys_t
*osp
= buf
->b_data
;
2013 dsl_scan_prefetch_dnode(scn
, &osp
->os_meta_dnode
,
2014 zb
->zb_objset
, DMU_META_DNODE_OBJECT
);
2016 if (OBJSET_BUF_HAS_USERUSED(buf
)) {
2017 dsl_scan_prefetch_dnode(scn
,
2018 &osp
->os_groupused_dnode
, zb
->zb_objset
,
2019 DMU_GROUPUSED_OBJECT
);
2020 dsl_scan_prefetch_dnode(scn
,
2021 &osp
->os_userused_dnode
, zb
->zb_objset
,
2022 DMU_USERUSED_OBJECT
);
2028 arc_buf_destroy(buf
, private);
2029 scan_prefetch_ctx_rele(spc
, scn
);
2033 dsl_scan_prefetch_thread(void *arg
)
2035 dsl_scan_t
*scn
= arg
;
2036 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
2037 scan_prefetch_issue_ctx_t
*spic
;
2039 /* loop until we are told to stop */
2040 while (!scn
->scn_prefetch_stop
) {
2041 arc_flags_t flags
= ARC_FLAG_NOWAIT
|
2042 ARC_FLAG_PRESCIENT_PREFETCH
| ARC_FLAG_PREFETCH
;
2043 int zio_flags
= ZIO_FLAG_CANFAIL
| ZIO_FLAG_SCAN_THREAD
;
2045 mutex_enter(&spa
->spa_scrub_lock
);
2048 * Wait until we have an IO to issue and are not above our
2049 * maximum in flight limit.
2051 while (!scn
->scn_prefetch_stop
&&
2052 (avl_numnodes(&scn
->scn_prefetch_queue
) == 0 ||
2053 spa
->spa_scrub_inflight
>= scn
->scn_maxinflight_bytes
)) {
2054 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
2057 /* recheck if we should stop since we waited for the cv */
2058 if (scn
->scn_prefetch_stop
) {
2059 mutex_exit(&spa
->spa_scrub_lock
);
2063 /* remove the prefetch IO from the tree */
2064 spic
= avl_first(&scn
->scn_prefetch_queue
);
2065 spa
->spa_scrub_inflight
+= BP_GET_PSIZE(&spic
->spic_bp
);
2066 avl_remove(&scn
->scn_prefetch_queue
, spic
);
2068 mutex_exit(&spa
->spa_scrub_lock
);
2070 if (BP_IS_PROTECTED(&spic
->spic_bp
)) {
2071 ASSERT(BP_GET_TYPE(&spic
->spic_bp
) == DMU_OT_DNODE
||
2072 BP_GET_TYPE(&spic
->spic_bp
) == DMU_OT_OBJSET
);
2073 ASSERT3U(BP_GET_LEVEL(&spic
->spic_bp
), ==, 0);
2074 zio_flags
|= ZIO_FLAG_RAW
;
2077 /* issue the prefetch asynchronously */
2078 (void) arc_read(scn
->scn_zio_root
, scn
->scn_dp
->dp_spa
,
2079 &spic
->spic_bp
, dsl_scan_prefetch_cb
, spic
->spic_spc
,
2080 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, &spic
->spic_zb
);
2082 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
2085 ASSERT(scn
->scn_prefetch_stop
);
2087 /* free any prefetches we didn't get to complete */
2088 mutex_enter(&spa
->spa_scrub_lock
);
2089 while ((spic
= avl_first(&scn
->scn_prefetch_queue
)) != NULL
) {
2090 avl_remove(&scn
->scn_prefetch_queue
, spic
);
2091 scan_prefetch_ctx_rele(spic
->spic_spc
, scn
);
2092 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
2094 ASSERT0(avl_numnodes(&scn
->scn_prefetch_queue
));
2095 mutex_exit(&spa
->spa_scrub_lock
);
2099 dsl_scan_check_resume(dsl_scan_t
*scn
, const dnode_phys_t
*dnp
,
2100 const zbookmark_phys_t
*zb
)
2103 * We never skip over user/group accounting objects (obj<0)
2105 if (!ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
) &&
2106 (int64_t)zb
->zb_object
>= 0) {
2108 * If we already visited this bp & everything below (in
2109 * a prior txg sync), don't bother doing it again.
2111 if (zbookmark_subtree_completed(dnp
, zb
,
2112 &scn
->scn_phys
.scn_bookmark
))
2116 * If we found the block we're trying to resume from, or
2117 * we went past it, zero it out to indicate that it's OK
2118 * to start checking for suspending again.
2120 if (zbookmark_subtree_tbd(dnp
, zb
,
2121 &scn
->scn_phys
.scn_bookmark
)) {
2122 dprintf("resuming at %llx/%llx/%llx/%llx\n",
2123 (longlong_t
)zb
->zb_objset
,
2124 (longlong_t
)zb
->zb_object
,
2125 (longlong_t
)zb
->zb_level
,
2126 (longlong_t
)zb
->zb_blkid
);
2127 memset(&scn
->scn_phys
.scn_bookmark
, 0, sizeof (*zb
));
2133 static void dsl_scan_visitbp(blkptr_t
*bp
, const zbookmark_phys_t
*zb
,
2134 dnode_phys_t
*dnp
, dsl_dataset_t
*ds
, dsl_scan_t
*scn
,
2135 dmu_objset_type_t ostype
, dmu_tx_t
*tx
);
2136 inline __attribute__((always_inline
)) static void dsl_scan_visitdnode(
2137 dsl_scan_t
*, dsl_dataset_t
*ds
, dmu_objset_type_t ostype
,
2138 dnode_phys_t
*dnp
, uint64_t object
, dmu_tx_t
*tx
);
2141 * Return nonzero on i/o error.
2142 * Return new buf to write out in *bufp.
2144 inline __attribute__((always_inline
)) static int
2145 dsl_scan_recurse(dsl_scan_t
*scn
, dsl_dataset_t
*ds
, dmu_objset_type_t ostype
,
2146 dnode_phys_t
*dnp
, const blkptr_t
*bp
,
2147 const zbookmark_phys_t
*zb
, dmu_tx_t
*tx
)
2149 dsl_pool_t
*dp
= scn
->scn_dp
;
2150 spa_t
*spa
= dp
->dp_spa
;
2151 int zio_flags
= ZIO_FLAG_CANFAIL
| ZIO_FLAG_SCAN_THREAD
;
2154 ASSERT(!BP_IS_REDACTED(bp
));
2157 * There is an unlikely case of encountering dnodes with contradicting
2158 * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
2159 * or modified before commit 4254acb was merged. As it is not possible
2160 * to know which of the two is correct, report an error.
2163 dnp
->dn_bonuslen
> DN_MAX_BONUS_LEN(dnp
)) {
2164 scn
->scn_phys
.scn_errors
++;
2165 spa_log_error(spa
, zb
, &bp
->blk_birth
);
2166 return (SET_ERROR(EINVAL
));
2169 if (BP_GET_LEVEL(bp
) > 0) {
2170 arc_flags_t flags
= ARC_FLAG_WAIT
;
2173 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
2176 err
= arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &buf
,
2177 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
2179 scn
->scn_phys
.scn_errors
++;
2182 for (i
= 0, cbp
= buf
->b_data
; i
< epb
; i
++, cbp
++) {
2183 zbookmark_phys_t czb
;
2185 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
2187 zb
->zb_blkid
* epb
+ i
);
2188 dsl_scan_visitbp(cbp
, &czb
, dnp
,
2189 ds
, scn
, ostype
, tx
);
2191 arc_buf_destroy(buf
, &buf
);
2192 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
2193 arc_flags_t flags
= ARC_FLAG_WAIT
;
2196 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
2199 if (BP_IS_PROTECTED(bp
)) {
2200 ASSERT3U(BP_GET_COMPRESS(bp
), ==, ZIO_COMPRESS_OFF
);
2201 zio_flags
|= ZIO_FLAG_RAW
;
2204 err
= arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &buf
,
2205 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
2207 scn
->scn_phys
.scn_errors
++;
2210 for (i
= 0, cdnp
= buf
->b_data
; i
< epb
;
2211 i
+= cdnp
->dn_extra_slots
+ 1,
2212 cdnp
+= cdnp
->dn_extra_slots
+ 1) {
2213 dsl_scan_visitdnode(scn
, ds
, ostype
,
2214 cdnp
, zb
->zb_blkid
* epb
+ i
, tx
);
2217 arc_buf_destroy(buf
, &buf
);
2218 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
2219 arc_flags_t flags
= ARC_FLAG_WAIT
;
2223 err
= arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &buf
,
2224 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
2226 scn
->scn_phys
.scn_errors
++;
2232 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
2233 &osp
->os_meta_dnode
, DMU_META_DNODE_OBJECT
, tx
);
2235 if (OBJSET_BUF_HAS_USERUSED(buf
)) {
2237 * We also always visit user/group/project accounting
2238 * objects, and never skip them, even if we are
2239 * suspending. This is necessary so that the
2240 * space deltas from this txg get integrated.
2242 if (OBJSET_BUF_HAS_PROJECTUSED(buf
))
2243 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
2244 &osp
->os_projectused_dnode
,
2245 DMU_PROJECTUSED_OBJECT
, tx
);
2246 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
2247 &osp
->os_groupused_dnode
,
2248 DMU_GROUPUSED_OBJECT
, tx
);
2249 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
2250 &osp
->os_userused_dnode
,
2251 DMU_USERUSED_OBJECT
, tx
);
2253 arc_buf_destroy(buf
, &buf
);
2254 } else if (!zfs_blkptr_verify(spa
, bp
,
2255 BLK_CONFIG_NEEDED
, BLK_VERIFY_LOG
)) {
2257 * Sanity check the block pointer contents, this is handled
2258 * by arc_read() for the cases above.
2260 scn
->scn_phys
.scn_errors
++;
2261 spa_log_error(spa
, zb
, &bp
->blk_birth
);
2262 return (SET_ERROR(EINVAL
));
2268 inline __attribute__((always_inline
)) static void
2269 dsl_scan_visitdnode(dsl_scan_t
*scn
, dsl_dataset_t
*ds
,
2270 dmu_objset_type_t ostype
, dnode_phys_t
*dnp
,
2271 uint64_t object
, dmu_tx_t
*tx
)
2275 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
2276 zbookmark_phys_t czb
;
2278 SET_BOOKMARK(&czb
, ds
? ds
->ds_object
: 0, object
,
2279 dnp
->dn_nlevels
- 1, j
);
2280 dsl_scan_visitbp(&dnp
->dn_blkptr
[j
],
2281 &czb
, dnp
, ds
, scn
, ostype
, tx
);
2284 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
2285 zbookmark_phys_t czb
;
2286 SET_BOOKMARK(&czb
, ds
? ds
->ds_object
: 0, object
,
2287 0, DMU_SPILL_BLKID
);
2288 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp
),
2289 &czb
, dnp
, ds
, scn
, ostype
, tx
);
2294 * The arguments are in this order because mdb can only print the
2295 * first 5; we want them to be useful.
2298 dsl_scan_visitbp(blkptr_t
*bp
, const zbookmark_phys_t
*zb
,
2299 dnode_phys_t
*dnp
, dsl_dataset_t
*ds
, dsl_scan_t
*scn
,
2300 dmu_objset_type_t ostype
, dmu_tx_t
*tx
)
2302 dsl_pool_t
*dp
= scn
->scn_dp
;
2303 blkptr_t
*bp_toread
= NULL
;
2305 if (dsl_scan_check_suspend(scn
, zb
))
2308 if (dsl_scan_check_resume(scn
, dnp
, zb
))
2311 scn
->scn_visited_this_txg
++;
2313 if (BP_IS_HOLE(bp
)) {
2314 scn
->scn_holes_this_txg
++;
2318 if (BP_IS_REDACTED(bp
)) {
2319 ASSERT(dsl_dataset_feature_is_active(ds
,
2320 SPA_FEATURE_REDACTED_DATASETS
));
2325 * Check if this block contradicts any filesystem flags.
2327 spa_feature_t f
= SPA_FEATURE_LARGE_BLOCKS
;
2328 if (BP_GET_LSIZE(bp
) > SPA_OLD_MAXBLOCKSIZE
)
2329 ASSERT(dsl_dataset_feature_is_active(ds
, f
));
2331 f
= zio_checksum_to_feature(BP_GET_CHECKSUM(bp
));
2332 if (f
!= SPA_FEATURE_NONE
)
2333 ASSERT(dsl_dataset_feature_is_active(ds
, f
));
2335 f
= zio_compress_to_feature(BP_GET_COMPRESS(bp
));
2336 if (f
!= SPA_FEATURE_NONE
)
2337 ASSERT(dsl_dataset_feature_is_active(ds
, f
));
2339 if (bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
) {
2340 scn
->scn_lt_min_this_txg
++;
2344 bp_toread
= kmem_alloc(sizeof (blkptr_t
), KM_SLEEP
);
2347 if (dsl_scan_recurse(scn
, ds
, ostype
, dnp
, bp_toread
, zb
, tx
) != 0)
2351 * If dsl_scan_ddt() has already visited this block, it will have
2352 * already done any translations or scrubbing, so don't call the
2355 if (ddt_class_contains(dp
->dp_spa
,
2356 scn
->scn_phys
.scn_ddt_class_max
, bp
)) {
2357 scn
->scn_ddt_contained_this_txg
++;
2362 * If this block is from the future (after cur_max_txg), then we
2363 * are doing this on behalf of a deleted snapshot, and we will
2364 * revisit the future block on the next pass of this dataset.
2365 * Don't scan it now unless we need to because something
2366 * under it was modified.
2368 if (BP_PHYSICAL_BIRTH(bp
) > scn
->scn_phys
.scn_cur_max_txg
) {
2369 scn
->scn_gt_max_this_txg
++;
2373 scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, zb
);
2376 kmem_free(bp_toread
, sizeof (blkptr_t
));
2380 dsl_scan_visit_rootbp(dsl_scan_t
*scn
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
2383 zbookmark_phys_t zb
;
2384 scan_prefetch_ctx_t
*spc
;
2386 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
2387 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
2389 if (ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
)) {
2390 SET_BOOKMARK(&scn
->scn_prefetch_bookmark
,
2391 zb
.zb_objset
, 0, 0, 0);
2393 scn
->scn_prefetch_bookmark
= scn
->scn_phys
.scn_bookmark
;
2396 scn
->scn_objsets_visited_this_txg
++;
2398 spc
= scan_prefetch_ctx_create(scn
, NULL
, FTAG
);
2399 dsl_scan_prefetch(spc
, bp
, &zb
);
2400 scan_prefetch_ctx_rele(spc
, FTAG
);
2402 dsl_scan_visitbp(bp
, &zb
, NULL
, ds
, scn
, DMU_OST_NONE
, tx
);
2404 dprintf_ds(ds
, "finished scan%s", "");
2408 ds_destroyed_scn_phys(dsl_dataset_t
*ds
, dsl_scan_phys_t
*scn_phys
)
2410 if (scn_phys
->scn_bookmark
.zb_objset
== ds
->ds_object
) {
2411 if (ds
->ds_is_snapshot
) {
2414 * - scn_cur_{min,max}_txg stays the same.
2415 * - Setting the flag is not really necessary if
2416 * scn_cur_max_txg == scn_max_txg, because there
2417 * is nothing after this snapshot that we care
2418 * about. However, we set it anyway and then
2419 * ignore it when we retraverse it in
2420 * dsl_scan_visitds().
2422 scn_phys
->scn_bookmark
.zb_objset
=
2423 dsl_dataset_phys(ds
)->ds_next_snap_obj
;
2424 zfs_dbgmsg("destroying ds %llu on %s; currently "
2425 "traversing; reset zb_objset to %llu",
2426 (u_longlong_t
)ds
->ds_object
,
2427 ds
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2428 (u_longlong_t
)dsl_dataset_phys(ds
)->
2430 scn_phys
->scn_flags
|= DSF_VISIT_DS_AGAIN
;
2432 SET_BOOKMARK(&scn_phys
->scn_bookmark
,
2433 ZB_DESTROYED_OBJSET
, 0, 0, 0);
2434 zfs_dbgmsg("destroying ds %llu on %s; currently "
2435 "traversing; reset bookmark to -1,0,0,0",
2436 (u_longlong_t
)ds
->ds_object
,
2437 ds
->ds_dir
->dd_pool
->dp_spa
->spa_name
);
2443 * Invoked when a dataset is destroyed. We need to make sure that:
2445 * 1) If it is the dataset that was currently being scanned, we write
2446 * a new dsl_scan_phys_t and marking the objset reference in it
2448 * 2) Remove it from the work queue, if it was present.
2450 * If the dataset was actually a snapshot, instead of marking the dataset
2451 * as destroyed, we instead substitute the next snapshot in line.
2454 dsl_scan_ds_destroyed(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
2456 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2457 dsl_scan_t
*scn
= dp
->dp_scan
;
2460 if (!dsl_scan_is_running(scn
))
2463 ds_destroyed_scn_phys(ds
, &scn
->scn_phys
);
2464 ds_destroyed_scn_phys(ds
, &scn
->scn_phys_cached
);
2466 if (scan_ds_queue_contains(scn
, ds
->ds_object
, &mintxg
)) {
2467 scan_ds_queue_remove(scn
, ds
->ds_object
);
2468 if (ds
->ds_is_snapshot
)
2469 scan_ds_queue_insert(scn
,
2470 dsl_dataset_phys(ds
)->ds_next_snap_obj
, mintxg
);
2473 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
2474 ds
->ds_object
, &mintxg
) == 0) {
2475 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
2476 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2477 scn
->scn_phys
.scn_queue_obj
, ds
->ds_object
, tx
));
2478 if (ds
->ds_is_snapshot
) {
2480 * We keep the same mintxg; it could be >
2481 * ds_creation_txg if the previous snapshot was
2484 VERIFY(zap_add_int_key(dp
->dp_meta_objset
,
2485 scn
->scn_phys
.scn_queue_obj
,
2486 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
2488 zfs_dbgmsg("destroying ds %llu on %s; in queue; "
2489 "replacing with %llu",
2490 (u_longlong_t
)ds
->ds_object
,
2491 dp
->dp_spa
->spa_name
,
2492 (u_longlong_t
)dsl_dataset_phys(ds
)->
2495 zfs_dbgmsg("destroying ds %llu on %s; in queue; "
2497 (u_longlong_t
)ds
->ds_object
,
2498 dp
->dp_spa
->spa_name
);
2503 * dsl_scan_sync() should be called after this, and should sync
2504 * out our changed state, but just to be safe, do it here.
2506 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2510 ds_snapshotted_bookmark(dsl_dataset_t
*ds
, zbookmark_phys_t
*scn_bookmark
)
2512 if (scn_bookmark
->zb_objset
== ds
->ds_object
) {
2513 scn_bookmark
->zb_objset
=
2514 dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
2515 zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; "
2516 "reset zb_objset to %llu",
2517 (u_longlong_t
)ds
->ds_object
,
2518 ds
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2519 (u_longlong_t
)dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
2524 * Called when a dataset is snapshotted. If we were currently traversing
2525 * this snapshot, we reset our bookmark to point at the newly created
2526 * snapshot. We also modify our work queue to remove the old snapshot and
2527 * replace with the new one.
2530 dsl_scan_ds_snapshotted(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
2532 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2533 dsl_scan_t
*scn
= dp
->dp_scan
;
2536 if (!dsl_scan_is_running(scn
))
2539 ASSERT(dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0);
2541 ds_snapshotted_bookmark(ds
, &scn
->scn_phys
.scn_bookmark
);
2542 ds_snapshotted_bookmark(ds
, &scn
->scn_phys_cached
.scn_bookmark
);
2544 if (scan_ds_queue_contains(scn
, ds
->ds_object
, &mintxg
)) {
2545 scan_ds_queue_remove(scn
, ds
->ds_object
);
2546 scan_ds_queue_insert(scn
,
2547 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, mintxg
);
2550 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
2551 ds
->ds_object
, &mintxg
) == 0) {
2552 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2553 scn
->scn_phys
.scn_queue_obj
, ds
->ds_object
, tx
));
2554 VERIFY(zap_add_int_key(dp
->dp_meta_objset
,
2555 scn
->scn_phys
.scn_queue_obj
,
2556 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, mintxg
, tx
) == 0);
2557 zfs_dbgmsg("snapshotting ds %llu on %s; in queue; "
2558 "replacing with %llu",
2559 (u_longlong_t
)ds
->ds_object
,
2560 dp
->dp_spa
->spa_name
,
2561 (u_longlong_t
)dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
2564 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2568 ds_clone_swapped_bookmark(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
,
2569 zbookmark_phys_t
*scn_bookmark
)
2571 if (scn_bookmark
->zb_objset
== ds1
->ds_object
) {
2572 scn_bookmark
->zb_objset
= ds2
->ds_object
;
2573 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
2574 "reset zb_objset to %llu",
2575 (u_longlong_t
)ds1
->ds_object
,
2576 ds1
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2577 (u_longlong_t
)ds2
->ds_object
);
2578 } else if (scn_bookmark
->zb_objset
== ds2
->ds_object
) {
2579 scn_bookmark
->zb_objset
= ds1
->ds_object
;
2580 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
2581 "reset zb_objset to %llu",
2582 (u_longlong_t
)ds2
->ds_object
,
2583 ds2
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2584 (u_longlong_t
)ds1
->ds_object
);
2589 * Called when an origin dataset and its clone are swapped. If we were
2590 * currently traversing the dataset, we need to switch to traversing the
2591 * newly promoted clone.
2594 dsl_scan_ds_clone_swapped(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
, dmu_tx_t
*tx
)
2596 dsl_pool_t
*dp
= ds1
->ds_dir
->dd_pool
;
2597 dsl_scan_t
*scn
= dp
->dp_scan
;
2598 uint64_t mintxg1
, mintxg2
;
2599 boolean_t ds1_queued
, ds2_queued
;
2601 if (!dsl_scan_is_running(scn
))
2604 ds_clone_swapped_bookmark(ds1
, ds2
, &scn
->scn_phys
.scn_bookmark
);
2605 ds_clone_swapped_bookmark(ds1
, ds2
, &scn
->scn_phys_cached
.scn_bookmark
);
2608 * Handle the in-memory scan queue.
2610 ds1_queued
= scan_ds_queue_contains(scn
, ds1
->ds_object
, &mintxg1
);
2611 ds2_queued
= scan_ds_queue_contains(scn
, ds2
->ds_object
, &mintxg2
);
2613 /* Sanity checking. */
2615 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2616 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2619 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2620 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2623 if (ds1_queued
&& ds2_queued
) {
2625 * If both are queued, we don't need to do anything.
2626 * The swapping code below would not handle this case correctly,
2627 * since we can't insert ds2 if it is already there. That's
2628 * because scan_ds_queue_insert() prohibits a duplicate insert
2631 } else if (ds1_queued
) {
2632 scan_ds_queue_remove(scn
, ds1
->ds_object
);
2633 scan_ds_queue_insert(scn
, ds2
->ds_object
, mintxg1
);
2634 } else if (ds2_queued
) {
2635 scan_ds_queue_remove(scn
, ds2
->ds_object
);
2636 scan_ds_queue_insert(scn
, ds1
->ds_object
, mintxg2
);
2640 * Handle the on-disk scan queue.
2641 * The on-disk state is an out-of-date version of the in-memory state,
2642 * so the in-memory and on-disk values for ds1_queued and ds2_queued may
2643 * be different. Therefore we need to apply the swap logic to the
2644 * on-disk state independently of the in-memory state.
2646 ds1_queued
= zap_lookup_int_key(dp
->dp_meta_objset
,
2647 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, &mintxg1
) == 0;
2648 ds2_queued
= zap_lookup_int_key(dp
->dp_meta_objset
,
2649 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, &mintxg2
) == 0;
2651 /* Sanity checking. */
2653 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2654 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2657 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2658 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2661 if (ds1_queued
&& ds2_queued
) {
2663 * If both are queued, we don't need to do anything.
2664 * Alternatively, we could check for EEXIST from
2665 * zap_add_int_key() and back out to the original state, but
2666 * that would be more work than checking for this case upfront.
2668 } else if (ds1_queued
) {
2669 VERIFY3S(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2670 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, tx
));
2671 VERIFY3S(0, ==, zap_add_int_key(dp
->dp_meta_objset
,
2672 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, mintxg1
, tx
));
2673 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
2674 "replacing with %llu",
2675 (u_longlong_t
)ds1
->ds_object
,
2676 dp
->dp_spa
->spa_name
,
2677 (u_longlong_t
)ds2
->ds_object
);
2678 } else if (ds2_queued
) {
2679 VERIFY3S(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2680 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, tx
));
2681 VERIFY3S(0, ==, zap_add_int_key(dp
->dp_meta_objset
,
2682 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, mintxg2
, tx
));
2683 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
2684 "replacing with %llu",
2685 (u_longlong_t
)ds2
->ds_object
,
2686 dp
->dp_spa
->spa_name
,
2687 (u_longlong_t
)ds1
->ds_object
);
2690 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2694 enqueue_clones_cb(dsl_pool_t
*dp
, dsl_dataset_t
*hds
, void *arg
)
2696 uint64_t originobj
= *(uint64_t *)arg
;
2699 dsl_scan_t
*scn
= dp
->dp_scan
;
2701 if (dsl_dir_phys(hds
->ds_dir
)->dd_origin_obj
!= originobj
)
2704 err
= dsl_dataset_hold_obj(dp
, hds
->ds_object
, FTAG
, &ds
);
2708 while (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= originobj
) {
2709 dsl_dataset_t
*prev
;
2710 err
= dsl_dataset_hold_obj(dp
,
2711 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2713 dsl_dataset_rele(ds
, FTAG
);
2718 scan_ds_queue_insert(scn
, ds
->ds_object
,
2719 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2720 dsl_dataset_rele(ds
, FTAG
);
2725 dsl_scan_visitds(dsl_scan_t
*scn
, uint64_t dsobj
, dmu_tx_t
*tx
)
2727 dsl_pool_t
*dp
= scn
->scn_dp
;
2730 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
2732 if (scn
->scn_phys
.scn_cur_min_txg
>=
2733 scn
->scn_phys
.scn_max_txg
) {
2735 * This can happen if this snapshot was created after the
2736 * scan started, and we already completed a previous snapshot
2737 * that was created after the scan started. This snapshot
2738 * only references blocks with:
2740 * birth < our ds_creation_txg
2741 * cur_min_txg is no less than ds_creation_txg.
2742 * We have already visited these blocks.
2744 * birth > scn_max_txg
2745 * The scan requested not to visit these blocks.
2747 * Subsequent snapshots (and clones) can reference our
2748 * blocks, or blocks with even higher birth times.
2749 * Therefore we do not need to visit them either,
2750 * so we do not add them to the work queue.
2752 * Note that checking for cur_min_txg >= cur_max_txg
2753 * is not sufficient, because in that case we may need to
2754 * visit subsequent snapshots. This happens when min_txg > 0,
2755 * which raises cur_min_txg. In this case we will visit
2756 * this dataset but skip all of its blocks, because the
2757 * rootbp's birth time is < cur_min_txg. Then we will
2758 * add the next snapshots/clones to the work queue.
2760 char *dsname
= kmem_alloc(ZFS_MAX_DATASET_NAME_LEN
, KM_SLEEP
);
2761 dsl_dataset_name(ds
, dsname
);
2762 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
2763 "cur_min_txg (%llu) >= max_txg (%llu)",
2764 (longlong_t
)dsobj
, dsname
,
2765 (longlong_t
)scn
->scn_phys
.scn_cur_min_txg
,
2766 (longlong_t
)scn
->scn_phys
.scn_max_txg
);
2767 kmem_free(dsname
, MAXNAMELEN
);
2773 * Only the ZIL in the head (non-snapshot) is valid. Even though
2774 * snapshots can have ZIL block pointers (which may be the same
2775 * BP as in the head), they must be ignored. In addition, $ORIGIN
2776 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
2777 * need to look for a ZIL in it either. So we traverse the ZIL here,
2778 * rather than in scan_recurse(), because the regular snapshot
2779 * block-sharing rules don't apply to it.
2781 if (!dsl_dataset_is_snapshot(ds
) &&
2782 (dp
->dp_origin_snap
== NULL
||
2783 ds
->ds_dir
!= dp
->dp_origin_snap
->ds_dir
)) {
2785 if (dmu_objset_from_ds(ds
, &os
) != 0) {
2788 dsl_scan_zil(dp
, &os
->os_zil_header
);
2792 * Iterate over the bps in this ds.
2794 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2795 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2796 dsl_scan_visit_rootbp(scn
, ds
, &dsl_dataset_phys(ds
)->ds_bp
, tx
);
2797 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2799 char *dsname
= kmem_alloc(ZFS_MAX_DATASET_NAME_LEN
, KM_SLEEP
);
2800 dsl_dataset_name(ds
, dsname
);
2801 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
2803 (longlong_t
)dsobj
, dsname
,
2804 (longlong_t
)scn
->scn_phys
.scn_cur_min_txg
,
2805 (longlong_t
)scn
->scn_phys
.scn_cur_max_txg
,
2806 (int)scn
->scn_suspending
);
2807 kmem_free(dsname
, ZFS_MAX_DATASET_NAME_LEN
);
2809 if (scn
->scn_suspending
)
2813 * We've finished this pass over this dataset.
2817 * If we did not completely visit this dataset, do another pass.
2819 if (scn
->scn_phys
.scn_flags
& DSF_VISIT_DS_AGAIN
) {
2820 zfs_dbgmsg("incomplete pass on %s; visiting again",
2821 dp
->dp_spa
->spa_name
);
2822 scn
->scn_phys
.scn_flags
&= ~DSF_VISIT_DS_AGAIN
;
2823 scan_ds_queue_insert(scn
, ds
->ds_object
,
2824 scn
->scn_phys
.scn_cur_max_txg
);
2829 * Add descendant datasets to work queue.
2831 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0) {
2832 scan_ds_queue_insert(scn
,
2833 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
2834 dsl_dataset_phys(ds
)->ds_creation_txg
);
2836 if (dsl_dataset_phys(ds
)->ds_num_children
> 1) {
2837 boolean_t usenext
= B_FALSE
;
2838 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
2841 * A bug in a previous version of the code could
2842 * cause upgrade_clones_cb() to not set
2843 * ds_next_snap_obj when it should, leading to a
2844 * missing entry. Therefore we can only use the
2845 * next_clones_obj when its count is correct.
2847 int err
= zap_count(dp
->dp_meta_objset
,
2848 dsl_dataset_phys(ds
)->ds_next_clones_obj
, &count
);
2850 count
== dsl_dataset_phys(ds
)->ds_num_children
- 1)
2857 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2858 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
2859 zap_cursor_retrieve(&zc
, &za
) == 0;
2860 (void) zap_cursor_advance(&zc
)) {
2861 scan_ds_queue_insert(scn
,
2862 zfs_strtonum(za
.za_name
, NULL
),
2863 dsl_dataset_phys(ds
)->ds_creation_txg
);
2865 zap_cursor_fini(&zc
);
2867 VERIFY0(dmu_objset_find_dp(dp
, dp
->dp_root_dir_obj
,
2868 enqueue_clones_cb
, &ds
->ds_object
,
2874 dsl_dataset_rele(ds
, FTAG
);
2878 enqueue_cb(dsl_pool_t
*dp
, dsl_dataset_t
*hds
, void *arg
)
2883 dsl_scan_t
*scn
= dp
->dp_scan
;
2885 err
= dsl_dataset_hold_obj(dp
, hds
->ds_object
, FTAG
, &ds
);
2889 while (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
2890 dsl_dataset_t
*prev
;
2891 err
= dsl_dataset_hold_obj(dp
,
2892 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2894 dsl_dataset_rele(ds
, FTAG
);
2899 * If this is a clone, we don't need to worry about it for now.
2901 if (dsl_dataset_phys(prev
)->ds_next_snap_obj
!= ds
->ds_object
) {
2902 dsl_dataset_rele(ds
, FTAG
);
2903 dsl_dataset_rele(prev
, FTAG
);
2906 dsl_dataset_rele(ds
, FTAG
);
2910 scan_ds_queue_insert(scn
, ds
->ds_object
,
2911 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2912 dsl_dataset_rele(ds
, FTAG
);
2917 dsl_scan_ddt_entry(dsl_scan_t
*scn
, enum zio_checksum checksum
,
2918 ddt_entry_t
*dde
, dmu_tx_t
*tx
)
2921 const ddt_key_t
*ddk
= &dde
->dde_key
;
2922 ddt_phys_t
*ddp
= dde
->dde_phys
;
2924 zbookmark_phys_t zb
= { 0 };
2926 if (!dsl_scan_is_running(scn
))
2930 * This function is special because it is the only thing
2931 * that can add scan_io_t's to the vdev scan queues from
2932 * outside dsl_scan_sync(). For the most part this is ok
2933 * as long as it is called from within syncing context.
2934 * However, dsl_scan_sync() expects that no new sio's will
2935 * be added between when all the work for a scan is done
2936 * and the next txg when the scan is actually marked as
2937 * completed. This check ensures we do not issue new sio's
2938 * during this period.
2940 if (scn
->scn_done_txg
!= 0)
2943 for (int p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
2944 if (ddp
->ddp_phys_birth
== 0 ||
2945 ddp
->ddp_phys_birth
> scn
->scn_phys
.scn_max_txg
)
2947 ddt_bp_create(checksum
, ddk
, ddp
, &bp
);
2949 scn
->scn_visited_this_txg
++;
2950 scan_funcs
[scn
->scn_phys
.scn_func
](scn
->scn_dp
, &bp
, &zb
);
2955 * Scrub/dedup interaction.
2957 * If there are N references to a deduped block, we don't want to scrub it
2958 * N times -- ideally, we should scrub it exactly once.
2960 * We leverage the fact that the dde's replication class (enum ddt_class)
2961 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
2962 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
2964 * To prevent excess scrubbing, the scrub begins by walking the DDT
2965 * to find all blocks with refcnt > 1, and scrubs each of these once.
2966 * Since there are two replication classes which contain blocks with
2967 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
2968 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
2970 * There would be nothing more to say if a block's refcnt couldn't change
2971 * during a scrub, but of course it can so we must account for changes
2972 * in a block's replication class.
2974 * Here's an example of what can occur:
2976 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
2977 * when visited during the top-down scrub phase, it will be scrubbed twice.
2978 * This negates our scrub optimization, but is otherwise harmless.
2980 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
2981 * on each visit during the top-down scrub phase, it will never be scrubbed.
2982 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
2983 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
2984 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
2985 * while a scrub is in progress, it scrubs the block right then.
2988 dsl_scan_ddt(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
2990 ddt_bookmark_t
*ddb
= &scn
->scn_phys
.scn_ddt_bookmark
;
2991 ddt_entry_t dde
= {{{{0}}}};
2995 while ((error
= ddt_walk(scn
->scn_dp
->dp_spa
, ddb
, &dde
)) == 0) {
2998 if (ddb
->ddb_class
> scn
->scn_phys
.scn_ddt_class_max
)
3000 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
3001 (longlong_t
)ddb
->ddb_class
,
3002 (longlong_t
)ddb
->ddb_type
,
3003 (longlong_t
)ddb
->ddb_checksum
,
3004 (longlong_t
)ddb
->ddb_cursor
);
3006 /* There should be no pending changes to the dedup table */
3007 ddt
= scn
->scn_dp
->dp_spa
->spa_ddt
[ddb
->ddb_checksum
];
3008 ASSERT(avl_first(&ddt
->ddt_tree
) == NULL
);
3010 dsl_scan_ddt_entry(scn
, ddb
->ddb_checksum
, &dde
, tx
);
3013 if (dsl_scan_check_suspend(scn
, NULL
))
3017 zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; "
3018 "suspending=%u", (longlong_t
)n
, scn
->scn_dp
->dp_spa
->spa_name
,
3019 (int)scn
->scn_phys
.scn_ddt_class_max
, (int)scn
->scn_suspending
);
3021 ASSERT(error
== 0 || error
== ENOENT
);
3022 ASSERT(error
!= ENOENT
||
3023 ddb
->ddb_class
> scn
->scn_phys
.scn_ddt_class_max
);
3027 dsl_scan_ds_maxtxg(dsl_dataset_t
*ds
)
3029 uint64_t smt
= ds
->ds_dir
->dd_pool
->dp_scan
->scn_phys
.scn_max_txg
;
3030 if (ds
->ds_is_snapshot
)
3031 return (MIN(smt
, dsl_dataset_phys(ds
)->ds_creation_txg
));
3036 dsl_scan_visit(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
3039 dsl_pool_t
*dp
= scn
->scn_dp
;
3041 if (scn
->scn_phys
.scn_ddt_bookmark
.ddb_class
<=
3042 scn
->scn_phys
.scn_ddt_class_max
) {
3043 scn
->scn_phys
.scn_cur_min_txg
= scn
->scn_phys
.scn_min_txg
;
3044 scn
->scn_phys
.scn_cur_max_txg
= scn
->scn_phys
.scn_max_txg
;
3045 dsl_scan_ddt(scn
, tx
);
3046 if (scn
->scn_suspending
)
3050 if (scn
->scn_phys
.scn_bookmark
.zb_objset
== DMU_META_OBJSET
) {
3051 /* First do the MOS & ORIGIN */
3053 scn
->scn_phys
.scn_cur_min_txg
= scn
->scn_phys
.scn_min_txg
;
3054 scn
->scn_phys
.scn_cur_max_txg
= scn
->scn_phys
.scn_max_txg
;
3055 dsl_scan_visit_rootbp(scn
, NULL
,
3056 &dp
->dp_meta_rootbp
, tx
);
3057 spa_set_rootblkptr(dp
->dp_spa
, &dp
->dp_meta_rootbp
);
3058 if (scn
->scn_suspending
)
3061 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
) {
3062 VERIFY0(dmu_objset_find_dp(dp
, dp
->dp_root_dir_obj
,
3063 enqueue_cb
, NULL
, DS_FIND_CHILDREN
));
3065 dsl_scan_visitds(scn
,
3066 dp
->dp_origin_snap
->ds_object
, tx
);
3068 ASSERT(!scn
->scn_suspending
);
3069 } else if (scn
->scn_phys
.scn_bookmark
.zb_objset
!=
3070 ZB_DESTROYED_OBJSET
) {
3071 uint64_t dsobj
= scn
->scn_phys
.scn_bookmark
.zb_objset
;
3073 * If we were suspended, continue from here. Note if the
3074 * ds we were suspended on was deleted, the zb_objset may
3075 * be -1, so we will skip this and find a new objset
3078 dsl_scan_visitds(scn
, dsobj
, tx
);
3079 if (scn
->scn_suspending
)
3084 * In case we suspended right at the end of the ds, zero the
3085 * bookmark so we don't think that we're still trying to resume.
3087 memset(&scn
->scn_phys
.scn_bookmark
, 0, sizeof (zbookmark_phys_t
));
3090 * Keep pulling things out of the dataset avl queue. Updates to the
3091 * persistent zap-object-as-queue happen only at checkpoints.
3093 while ((sds
= avl_first(&scn
->scn_queue
)) != NULL
) {
3095 uint64_t dsobj
= sds
->sds_dsobj
;
3096 uint64_t txg
= sds
->sds_txg
;
3098 /* dequeue and free the ds from the queue */
3099 scan_ds_queue_remove(scn
, dsobj
);
3102 /* set up min / max txg */
3103 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
3105 scn
->scn_phys
.scn_cur_min_txg
=
3106 MAX(scn
->scn_phys
.scn_min_txg
, txg
);
3108 scn
->scn_phys
.scn_cur_min_txg
=
3109 MAX(scn
->scn_phys
.scn_min_txg
,
3110 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
3112 scn
->scn_phys
.scn_cur_max_txg
= dsl_scan_ds_maxtxg(ds
);
3113 dsl_dataset_rele(ds
, FTAG
);
3115 dsl_scan_visitds(scn
, dsobj
, tx
);
3116 if (scn
->scn_suspending
)
3120 /* No more objsets to fetch, we're done */
3121 scn
->scn_phys
.scn_bookmark
.zb_objset
= ZB_DESTROYED_OBJSET
;
3122 ASSERT0(scn
->scn_suspending
);
3126 dsl_scan_count_data_disks(spa_t
*spa
)
3128 vdev_t
*rvd
= spa
->spa_root_vdev
;
3129 uint64_t i
, leaves
= 0;
3131 for (i
= 0; i
< rvd
->vdev_children
; i
++) {
3132 vdev_t
*vd
= rvd
->vdev_child
[i
];
3133 if (vd
->vdev_islog
|| vd
->vdev_isspare
|| vd
->vdev_isl2cache
)
3135 leaves
+= vdev_get_ndisks(vd
) - vdev_get_nparity(vd
);
3141 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t
*q
, const blkptr_t
*bp
)
3144 uint64_t cur_size
= 0;
3146 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
3147 cur_size
+= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
3150 q
->q_total_zio_size_this_txg
+= cur_size
;
3151 q
->q_zios_this_txg
++;
3155 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t
*q
, uint64_t start
,
3158 q
->q_total_seg_size_this_txg
+= end
- start
;
3159 q
->q_segs_this_txg
++;
3163 scan_io_queue_check_suspend(dsl_scan_t
*scn
)
3165 /* See comment in dsl_scan_check_suspend() */
3166 uint64_t curr_time_ns
= gethrtime();
3167 uint64_t scan_time_ns
= curr_time_ns
- scn
->scn_sync_start_time
;
3168 uint64_t sync_time_ns
= curr_time_ns
-
3169 scn
->scn_dp
->dp_spa
->spa_sync_starttime
;
3170 uint64_t dirty_min_bytes
= zfs_dirty_data_max
*
3171 zfs_vdev_async_write_active_min_dirty_percent
/ 100;
3172 uint_t mintime
= (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) ?
3173 zfs_resilver_min_time_ms
: zfs_scrub_min_time_ms
;
3175 return ((NSEC2MSEC(scan_time_ns
) > mintime
&&
3176 (scn
->scn_dp
->dp_dirty_total
>= dirty_min_bytes
||
3177 txg_sync_waiting(scn
->scn_dp
) ||
3178 NSEC2SEC(sync_time_ns
) >= zfs_txg_timeout
)) ||
3179 spa_shutting_down(scn
->scn_dp
->dp_spa
));
3183 * Given a list of scan_io_t's in io_list, this issues the I/Os out to
3184 * disk. This consumes the io_list and frees the scan_io_t's. This is
3185 * called when emptying queues, either when we're up against the memory
3186 * limit or when we have finished scanning. Returns B_TRUE if we stopped
3187 * processing the list before we finished. Any sios that were not issued
3188 * will remain in the io_list.
3191 scan_io_queue_issue(dsl_scan_io_queue_t
*queue
, list_t
*io_list
)
3193 dsl_scan_t
*scn
= queue
->q_scn
;
3195 boolean_t suspended
= B_FALSE
;
3197 while ((sio
= list_head(io_list
)) != NULL
) {
3200 if (scan_io_queue_check_suspend(scn
)) {
3206 scan_exec_io(scn
->scn_dp
, &bp
, sio
->sio_flags
,
3207 &sio
->sio_zb
, queue
);
3208 (void) list_remove_head(io_list
);
3209 scan_io_queues_update_zio_stats(queue
, &bp
);
3216 * This function removes sios from an IO queue which reside within a given
3217 * range_seg_t and inserts them (in offset order) into a list. Note that
3218 * we only ever return a maximum of 32 sios at once. If there are more sios
3219 * to process within this segment that did not make it onto the list we
3220 * return B_TRUE and otherwise B_FALSE.
3223 scan_io_queue_gather(dsl_scan_io_queue_t
*queue
, range_seg_t
*rs
, list_t
*list
)
3225 scan_io_t
*srch_sio
, *sio
, *next_sio
;
3227 uint_t num_sios
= 0;
3228 int64_t bytes_issued
= 0;
3231 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
3233 srch_sio
= sio_alloc(1);
3234 srch_sio
->sio_nr_dvas
= 1;
3235 SIO_SET_OFFSET(srch_sio
, rs_get_start(rs
, queue
->q_exts_by_addr
));
3238 * The exact start of the extent might not contain any matching zios,
3239 * so if that's the case, examine the next one in the tree.
3241 sio
= avl_find(&queue
->q_sios_by_addr
, srch_sio
, &idx
);
3245 sio
= avl_nearest(&queue
->q_sios_by_addr
, idx
, AVL_AFTER
);
3247 while (sio
!= NULL
&& SIO_GET_OFFSET(sio
) < rs_get_end(rs
,
3248 queue
->q_exts_by_addr
) && num_sios
<= 32) {
3249 ASSERT3U(SIO_GET_OFFSET(sio
), >=, rs_get_start(rs
,
3250 queue
->q_exts_by_addr
));
3251 ASSERT3U(SIO_GET_END_OFFSET(sio
), <=, rs_get_end(rs
,
3252 queue
->q_exts_by_addr
));
3254 next_sio
= AVL_NEXT(&queue
->q_sios_by_addr
, sio
);
3255 avl_remove(&queue
->q_sios_by_addr
, sio
);
3256 if (avl_is_empty(&queue
->q_sios_by_addr
))
3257 atomic_add_64(&queue
->q_scn
->scn_queues_pending
, -1);
3258 queue
->q_sio_memused
-= SIO_GET_MUSED(sio
);
3260 bytes_issued
+= SIO_GET_ASIZE(sio
);
3262 list_insert_tail(list
, sio
);
3267 * We limit the number of sios we process at once to 32 to avoid
3268 * biting off more than we can chew. If we didn't take everything
3269 * in the segment we update it to reflect the work we were able to
3270 * complete. Otherwise, we remove it from the range tree entirely.
3272 if (sio
!= NULL
&& SIO_GET_OFFSET(sio
) < rs_get_end(rs
,
3273 queue
->q_exts_by_addr
)) {
3274 range_tree_adjust_fill(queue
->q_exts_by_addr
, rs
,
3276 range_tree_resize_segment(queue
->q_exts_by_addr
, rs
,
3277 SIO_GET_OFFSET(sio
), rs_get_end(rs
,
3278 queue
->q_exts_by_addr
) - SIO_GET_OFFSET(sio
));
3279 queue
->q_last_ext_addr
= SIO_GET_OFFSET(sio
);
3282 uint64_t rstart
= rs_get_start(rs
, queue
->q_exts_by_addr
);
3283 uint64_t rend
= rs_get_end(rs
, queue
->q_exts_by_addr
);
3284 range_tree_remove(queue
->q_exts_by_addr
, rstart
, rend
- rstart
);
3285 queue
->q_last_ext_addr
= -1;
3291 * This is called from the queue emptying thread and selects the next
3292 * extent from which we are to issue I/Os. The behavior of this function
3293 * depends on the state of the scan, the current memory consumption and
3294 * whether or not we are performing a scan shutdown.
3295 * 1) We select extents in an elevator algorithm (LBA-order) if the scan
3296 * needs to perform a checkpoint
3297 * 2) We select the largest available extent if we are up against the
3299 * 3) Otherwise we don't select any extents.
3301 static range_seg_t
*
3302 scan_io_queue_fetch_ext(dsl_scan_io_queue_t
*queue
)
3304 dsl_scan_t
*scn
= queue
->q_scn
;
3305 range_tree_t
*rt
= queue
->q_exts_by_addr
;
3307 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
3308 ASSERT(scn
->scn_is_sorted
);
3310 if (!scn
->scn_checkpointing
&& !scn
->scn_clearing
)
3314 * During normal clearing, we want to issue our largest segments
3315 * first, keeping IO as sequential as possible, and leaving the
3316 * smaller extents for later with the hope that they might eventually
3317 * grow to larger sequential segments. However, when the scan is
3318 * checkpointing, no new extents will be added to the sorting queue,
3319 * so the way we are sorted now is as good as it will ever get.
3320 * In this case, we instead switch to issuing extents in LBA order.
3322 if ((zfs_scan_issue_strategy
< 1 && scn
->scn_checkpointing
) ||
3323 zfs_scan_issue_strategy
== 1)
3324 return (range_tree_first(rt
));
3327 * Try to continue previous extent if it is not completed yet. After
3328 * shrink in scan_io_queue_gather() it may no longer be the best, but
3329 * otherwise we leave shorter remnant every txg.
3332 uint64_t size
= 1ULL << rt
->rt_shift
;
3333 range_seg_t
*addr_rs
;
3334 if (queue
->q_last_ext_addr
!= -1) {
3335 start
= queue
->q_last_ext_addr
;
3336 addr_rs
= range_tree_find(rt
, start
, size
);
3337 if (addr_rs
!= NULL
)
3342 * Nothing to continue, so find new best extent.
3344 uint64_t *v
= zfs_btree_first(&queue
->q_exts_by_size
, NULL
);
3347 queue
->q_last_ext_addr
= start
= *v
<< rt
->rt_shift
;
3350 * We need to get the original entry in the by_addr tree so we can
3353 addr_rs
= range_tree_find(rt
, start
, size
);
3354 ASSERT3P(addr_rs
, !=, NULL
);
3355 ASSERT3U(rs_get_start(addr_rs
, rt
), ==, start
);
3356 ASSERT3U(rs_get_end(addr_rs
, rt
), >, start
);
3361 scan_io_queues_run_one(void *arg
)
3363 dsl_scan_io_queue_t
*queue
= arg
;
3364 kmutex_t
*q_lock
= &queue
->q_vd
->vdev_scan_io_queue_lock
;
3365 boolean_t suspended
= B_FALSE
;
3371 ASSERT(queue
->q_scn
->scn_is_sorted
);
3373 list_create(&sio_list
, sizeof (scan_io_t
),
3374 offsetof(scan_io_t
, sio_nodes
.sio_list_node
));
3375 zio
= zio_null(queue
->q_scn
->scn_zio_root
, queue
->q_scn
->scn_dp
->dp_spa
,
3376 NULL
, NULL
, NULL
, ZIO_FLAG_CANFAIL
);
3377 mutex_enter(q_lock
);
3380 /* Calculate maximum in-flight bytes for this vdev. */
3381 queue
->q_maxinflight_bytes
= MAX(1, zfs_scan_vdev_limit
*
3382 (vdev_get_ndisks(queue
->q_vd
) - vdev_get_nparity(queue
->q_vd
)));
3384 /* reset per-queue scan statistics for this txg */
3385 queue
->q_total_seg_size_this_txg
= 0;
3386 queue
->q_segs_this_txg
= 0;
3387 queue
->q_total_zio_size_this_txg
= 0;
3388 queue
->q_zios_this_txg
= 0;
3390 /* loop until we run out of time or sios */
3391 while ((rs
= scan_io_queue_fetch_ext(queue
)) != NULL
) {
3392 uint64_t seg_start
= 0, seg_end
= 0;
3393 boolean_t more_left
;
3395 ASSERT(list_is_empty(&sio_list
));
3397 /* loop while we still have sios left to process in this rs */
3399 scan_io_t
*first_sio
, *last_sio
;
3402 * We have selected which extent needs to be
3403 * processed next. Gather up the corresponding sios.
3405 more_left
= scan_io_queue_gather(queue
, rs
, &sio_list
);
3406 ASSERT(!list_is_empty(&sio_list
));
3407 first_sio
= list_head(&sio_list
);
3408 last_sio
= list_tail(&sio_list
);
3410 seg_end
= SIO_GET_END_OFFSET(last_sio
);
3412 seg_start
= SIO_GET_OFFSET(first_sio
);
3415 * Issuing sios can take a long time so drop the
3416 * queue lock. The sio queue won't be updated by
3417 * other threads since we're in syncing context so
3418 * we can be sure that our trees will remain exactly
3422 suspended
= scan_io_queue_issue(queue
, &sio_list
);
3423 mutex_enter(q_lock
);
3427 } while (more_left
);
3429 /* update statistics for debugging purposes */
3430 scan_io_queues_update_seg_stats(queue
, seg_start
, seg_end
);
3437 * If we were suspended in the middle of processing,
3438 * requeue any unfinished sios and exit.
3440 while ((sio
= list_head(&sio_list
)) != NULL
) {
3441 list_remove(&sio_list
, sio
);
3442 scan_io_queue_insert_impl(queue
, sio
);
3445 queue
->q_zio
= NULL
;
3448 list_destroy(&sio_list
);
3452 * Performs an emptying run on all scan queues in the pool. This just
3453 * punches out one thread per top-level vdev, each of which processes
3454 * only that vdev's scan queue. We can parallelize the I/O here because
3455 * we know that each queue's I/Os only affect its own top-level vdev.
3457 * This function waits for the queue runs to complete, and must be
3458 * called from dsl_scan_sync (or in general, syncing context).
3461 scan_io_queues_run(dsl_scan_t
*scn
)
3463 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3465 ASSERT(scn
->scn_is_sorted
);
3466 ASSERT(spa_config_held(spa
, SCL_CONFIG
, RW_READER
));
3468 if (scn
->scn_queues_pending
== 0)
3471 if (scn
->scn_taskq
== NULL
) {
3472 int nthreads
= spa
->spa_root_vdev
->vdev_children
;
3475 * We need to make this taskq *always* execute as many
3476 * threads in parallel as we have top-level vdevs and no
3477 * less, otherwise strange serialization of the calls to
3478 * scan_io_queues_run_one can occur during spa_sync runs
3479 * and that significantly impacts performance.
3481 scn
->scn_taskq
= taskq_create("dsl_scan_iss", nthreads
,
3482 minclsyspri
, nthreads
, nthreads
, TASKQ_PREPOPULATE
);
3485 for (uint64_t i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
3486 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
3488 mutex_enter(&vd
->vdev_scan_io_queue_lock
);
3489 if (vd
->vdev_scan_io_queue
!= NULL
) {
3490 VERIFY(taskq_dispatch(scn
->scn_taskq
,
3491 scan_io_queues_run_one
, vd
->vdev_scan_io_queue
,
3492 TQ_SLEEP
) != TASKQID_INVALID
);
3494 mutex_exit(&vd
->vdev_scan_io_queue_lock
);
3498 * Wait for the queues to finish issuing their IOs for this run
3499 * before we return. There may still be IOs in flight at this
3502 taskq_wait(scn
->scn_taskq
);
3506 dsl_scan_async_block_should_pause(dsl_scan_t
*scn
)
3508 uint64_t elapsed_nanosecs
;
3513 if (zfs_async_block_max_blocks
!= 0 &&
3514 scn
->scn_visited_this_txg
>= zfs_async_block_max_blocks
) {
3518 if (zfs_max_async_dedup_frees
!= 0 &&
3519 scn
->scn_dedup_frees_this_txg
>= zfs_max_async_dedup_frees
) {
3523 elapsed_nanosecs
= gethrtime() - scn
->scn_sync_start_time
;
3524 return (elapsed_nanosecs
/ NANOSEC
> zfs_txg_timeout
||
3525 (NSEC2MSEC(elapsed_nanosecs
) > scn
->scn_async_block_min_time_ms
&&
3526 txg_sync_waiting(scn
->scn_dp
)) ||
3527 spa_shutting_down(scn
->scn_dp
->dp_spa
));
3531 dsl_scan_free_block_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
3533 dsl_scan_t
*scn
= arg
;
3535 if (!scn
->scn_is_bptree
||
3536 (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_OBJSET
)) {
3537 if (dsl_scan_async_block_should_pause(scn
))
3538 return (SET_ERROR(ERESTART
));
3541 zio_nowait(zio_free_sync(scn
->scn_zio_root
, scn
->scn_dp
->dp_spa
,
3542 dmu_tx_get_txg(tx
), bp
, 0));
3543 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
, DD_USED_HEAD
,
3544 -bp_get_dsize_sync(scn
->scn_dp
->dp_spa
, bp
),
3545 -BP_GET_PSIZE(bp
), -BP_GET_UCSIZE(bp
), tx
);
3546 scn
->scn_visited_this_txg
++;
3547 if (BP_GET_DEDUP(bp
))
3548 scn
->scn_dedup_frees_this_txg
++;
3553 dsl_scan_update_stats(dsl_scan_t
*scn
)
3555 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3557 uint64_t seg_size_total
= 0, zio_size_total
= 0;
3558 uint64_t seg_count_total
= 0, zio_count_total
= 0;
3560 for (i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
3561 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
3562 dsl_scan_io_queue_t
*queue
= vd
->vdev_scan_io_queue
;
3567 seg_size_total
+= queue
->q_total_seg_size_this_txg
;
3568 zio_size_total
+= queue
->q_total_zio_size_this_txg
;
3569 seg_count_total
+= queue
->q_segs_this_txg
;
3570 zio_count_total
+= queue
->q_zios_this_txg
;
3573 if (seg_count_total
== 0 || zio_count_total
== 0) {
3574 scn
->scn_avg_seg_size_this_txg
= 0;
3575 scn
->scn_avg_zio_size_this_txg
= 0;
3576 scn
->scn_segs_this_txg
= 0;
3577 scn
->scn_zios_this_txg
= 0;
3581 scn
->scn_avg_seg_size_this_txg
= seg_size_total
/ seg_count_total
;
3582 scn
->scn_avg_zio_size_this_txg
= zio_size_total
/ zio_count_total
;
3583 scn
->scn_segs_this_txg
= seg_count_total
;
3584 scn
->scn_zios_this_txg
= zio_count_total
;
3588 bpobj_dsl_scan_free_block_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
3592 return (dsl_scan_free_block_cb(arg
, bp
, tx
));
3596 dsl_scan_obsolete_block_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
3600 dsl_scan_t
*scn
= arg
;
3601 const dva_t
*dva
= &bp
->blk_dva
[0];
3603 if (dsl_scan_async_block_should_pause(scn
))
3604 return (SET_ERROR(ERESTART
));
3606 spa_vdev_indirect_mark_obsolete(scn
->scn_dp
->dp_spa
,
3607 DVA_GET_VDEV(dva
), DVA_GET_OFFSET(dva
),
3608 DVA_GET_ASIZE(dva
), tx
);
3609 scn
->scn_visited_this_txg
++;
3614 dsl_scan_active(dsl_scan_t
*scn
)
3616 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3617 uint64_t used
= 0, comp
, uncomp
;
3618 boolean_t clones_left
;
3620 if (spa
->spa_load_state
!= SPA_LOAD_NONE
)
3622 if (spa_shutting_down(spa
))
3624 if ((dsl_scan_is_running(scn
) && !dsl_scan_is_paused_scrub(scn
)) ||
3625 (scn
->scn_async_destroying
&& !scn
->scn_async_stalled
))
3628 if (spa_version(scn
->scn_dp
->dp_spa
) >= SPA_VERSION_DEADLISTS
) {
3629 (void) bpobj_space(&scn
->scn_dp
->dp_free_bpobj
,
3630 &used
, &comp
, &uncomp
);
3632 clones_left
= spa_livelist_delete_check(spa
);
3633 return ((used
!= 0) || (clones_left
));
3637 dsl_errorscrub_active(dsl_scan_t
*scn
)
3639 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3640 if (spa
->spa_load_state
!= SPA_LOAD_NONE
)
3642 if (spa_shutting_down(spa
))
3644 if (dsl_errorscrubbing(scn
->scn_dp
))
3650 dsl_scan_check_deferred(vdev_t
*vd
)
3652 boolean_t need_resilver
= B_FALSE
;
3654 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
3656 dsl_scan_check_deferred(vd
->vdev_child
[c
]);
3659 if (!vdev_is_concrete(vd
) || vd
->vdev_aux
||
3660 !vd
->vdev_ops
->vdev_op_leaf
)
3661 return (need_resilver
);
3663 if (!vd
->vdev_resilver_deferred
)
3664 need_resilver
= B_TRUE
;
3666 return (need_resilver
);
3670 dsl_scan_need_resilver(spa_t
*spa
, const dva_t
*dva
, size_t psize
,
3671 uint64_t phys_birth
)
3675 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
3677 if (vd
->vdev_ops
== &vdev_indirect_ops
) {
3679 * The indirect vdev can point to multiple
3680 * vdevs. For simplicity, always create
3681 * the resilver zio_t. zio_vdev_io_start()
3682 * will bypass the child resilver i/o's if
3683 * they are on vdevs that don't have DTL's.
3688 if (DVA_GET_GANG(dva
)) {
3690 * Gang members may be spread across multiple
3691 * vdevs, so the best estimate we have is the
3692 * scrub range, which has already been checked.
3693 * XXX -- it would be better to change our
3694 * allocation policy to ensure that all
3695 * gang members reside on the same vdev.
3701 * Check if the top-level vdev must resilver this offset.
3702 * When the offset does not intersect with a dirty leaf DTL
3703 * then it may be possible to skip the resilver IO. The psize
3704 * is provided instead of asize to simplify the check for RAIDZ.
3706 if (!vdev_dtl_need_resilver(vd
, dva
, psize
, phys_birth
))
3710 * Check that this top-level vdev has a device under it which
3711 * is resilvering and is not deferred.
3713 if (!dsl_scan_check_deferred(vd
))
3720 dsl_process_async_destroys(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
3722 dsl_scan_t
*scn
= dp
->dp_scan
;
3723 spa_t
*spa
= dp
->dp_spa
;
3726 if (spa_suspend_async_destroy(spa
))
3729 if (zfs_free_bpobj_enabled
&&
3730 spa_version(spa
) >= SPA_VERSION_DEADLISTS
) {
3731 scn
->scn_is_bptree
= B_FALSE
;
3732 scn
->scn_async_block_min_time_ms
= zfs_free_min_time_ms
;
3733 scn
->scn_zio_root
= zio_root(spa
, NULL
,
3734 NULL
, ZIO_FLAG_MUSTSUCCEED
);
3735 err
= bpobj_iterate(&dp
->dp_free_bpobj
,
3736 bpobj_dsl_scan_free_block_cb
, scn
, tx
);
3737 VERIFY0(zio_wait(scn
->scn_zio_root
));
3738 scn
->scn_zio_root
= NULL
;
3740 if (err
!= 0 && err
!= ERESTART
)
3741 zfs_panic_recover("error %u from bpobj_iterate()", err
);
3744 if (err
== 0 && spa_feature_is_active(spa
, SPA_FEATURE_ASYNC_DESTROY
)) {
3745 ASSERT(scn
->scn_async_destroying
);
3746 scn
->scn_is_bptree
= B_TRUE
;
3747 scn
->scn_zio_root
= zio_root(spa
, NULL
,
3748 NULL
, ZIO_FLAG_MUSTSUCCEED
);
3749 err
= bptree_iterate(dp
->dp_meta_objset
,
3750 dp
->dp_bptree_obj
, B_TRUE
, dsl_scan_free_block_cb
, scn
, tx
);
3751 VERIFY0(zio_wait(scn
->scn_zio_root
));
3752 scn
->scn_zio_root
= NULL
;
3754 if (err
== EIO
|| err
== ECKSUM
) {
3756 } else if (err
!= 0 && err
!= ERESTART
) {
3757 zfs_panic_recover("error %u from "
3758 "traverse_dataset_destroyed()", err
);
3761 if (bptree_is_empty(dp
->dp_meta_objset
, dp
->dp_bptree_obj
)) {
3762 /* finished; deactivate async destroy feature */
3763 spa_feature_decr(spa
, SPA_FEATURE_ASYNC_DESTROY
, tx
);
3764 ASSERT(!spa_feature_is_active(spa
,
3765 SPA_FEATURE_ASYNC_DESTROY
));
3766 VERIFY0(zap_remove(dp
->dp_meta_objset
,
3767 DMU_POOL_DIRECTORY_OBJECT
,
3768 DMU_POOL_BPTREE_OBJ
, tx
));
3769 VERIFY0(bptree_free(dp
->dp_meta_objset
,
3770 dp
->dp_bptree_obj
, tx
));
3771 dp
->dp_bptree_obj
= 0;
3772 scn
->scn_async_destroying
= B_FALSE
;
3773 scn
->scn_async_stalled
= B_FALSE
;
3776 * If we didn't make progress, mark the async
3777 * destroy as stalled, so that we will not initiate
3778 * a spa_sync() on its behalf. Note that we only
3779 * check this if we are not finished, because if the
3780 * bptree had no blocks for us to visit, we can
3781 * finish without "making progress".
3783 scn
->scn_async_stalled
=
3784 (scn
->scn_visited_this_txg
== 0);
3787 if (scn
->scn_visited_this_txg
) {
3788 zfs_dbgmsg("freed %llu blocks in %llums from "
3789 "free_bpobj/bptree on %s in txg %llu; err=%u",
3790 (longlong_t
)scn
->scn_visited_this_txg
,
3792 NSEC2MSEC(gethrtime() - scn
->scn_sync_start_time
),
3793 spa
->spa_name
, (longlong_t
)tx
->tx_txg
, err
);
3794 scn
->scn_visited_this_txg
= 0;
3795 scn
->scn_dedup_frees_this_txg
= 0;
3798 * Write out changes to the DDT and the BRT that may be required
3799 * as a result of the blocks freed. This ensures that the DDT
3800 * and the BRT are clean when a scrub/resilver runs.
3802 ddt_sync(spa
, tx
->tx_txg
);
3803 brt_sync(spa
, tx
->tx_txg
);
3807 if (dp
->dp_free_dir
!= NULL
&& !scn
->scn_async_destroying
&&
3808 zfs_free_leak_on_eio
&&
3809 (dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
!= 0 ||
3810 dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
!= 0 ||
3811 dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
!= 0)) {
3813 * We have finished background destroying, but there is still
3814 * some space left in the dp_free_dir. Transfer this leaked
3815 * space to the dp_leak_dir.
3817 if (dp
->dp_leak_dir
== NULL
) {
3818 rrw_enter(&dp
->dp_config_rwlock
, RW_WRITER
, FTAG
);
3819 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
,
3821 VERIFY0(dsl_pool_open_special_dir(dp
,
3822 LEAK_DIR_NAME
, &dp
->dp_leak_dir
));
3823 rrw_exit(&dp
->dp_config_rwlock
, FTAG
);
3825 dsl_dir_diduse_space(dp
->dp_leak_dir
, DD_USED_HEAD
,
3826 dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
,
3827 dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
,
3828 dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
, tx
);
3829 dsl_dir_diduse_space(dp
->dp_free_dir
, DD_USED_HEAD
,
3830 -dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
,
3831 -dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
,
3832 -dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
, tx
);
3835 if (dp
->dp_free_dir
!= NULL
&& !scn
->scn_async_destroying
&&
3836 !spa_livelist_delete_check(spa
)) {
3837 /* finished; verify that space accounting went to zero */
3838 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
);
3839 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
);
3840 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
);
3843 spa_notify_waiters(spa
);
3845 EQUIV(bpobj_is_open(&dp
->dp_obsolete_bpobj
),
3846 0 == zap_contains(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
3847 DMU_POOL_OBSOLETE_BPOBJ
));
3848 if (err
== 0 && bpobj_is_open(&dp
->dp_obsolete_bpobj
)) {
3849 ASSERT(spa_feature_is_active(dp
->dp_spa
,
3850 SPA_FEATURE_OBSOLETE_COUNTS
));
3852 scn
->scn_is_bptree
= B_FALSE
;
3853 scn
->scn_async_block_min_time_ms
= zfs_obsolete_min_time_ms
;
3854 err
= bpobj_iterate(&dp
->dp_obsolete_bpobj
,
3855 dsl_scan_obsolete_block_cb
, scn
, tx
);
3856 if (err
!= 0 && err
!= ERESTART
)
3857 zfs_panic_recover("error %u from bpobj_iterate()", err
);
3859 if (bpobj_is_empty(&dp
->dp_obsolete_bpobj
))
3860 dsl_pool_destroy_obsolete_bpobj(dp
, tx
);
3866 name_to_bookmark(char *buf
, zbookmark_phys_t
*zb
)
3868 zb
->zb_objset
= zfs_strtonum(buf
, &buf
);
3869 ASSERT(*buf
== ':');
3870 zb
->zb_object
= zfs_strtonum(buf
+ 1, &buf
);
3871 ASSERT(*buf
== ':');
3872 zb
->zb_level
= (int)zfs_strtonum(buf
+ 1, &buf
);
3873 ASSERT(*buf
== ':');
3874 zb
->zb_blkid
= zfs_strtonum(buf
+ 1, &buf
);
3875 ASSERT(*buf
== '\0');
3879 name_to_object(char *buf
, uint64_t *obj
)
3881 *obj
= zfs_strtonum(buf
, &buf
);
3882 ASSERT(*buf
== '\0');
3886 read_by_block_level(dsl_scan_t
*scn
, zbookmark_phys_t zb
)
3888 dsl_pool_t
*dp
= scn
->scn_dp
;
3891 if (dsl_dataset_hold_obj(dp
, zb
.zb_objset
, FTAG
, &ds
) != 0)
3894 if (dmu_objset_from_ds(ds
, &os
) != 0) {
3895 dsl_dataset_rele(ds
, FTAG
);
3900 * If the key is not loaded dbuf_dnode_findbp() will error out with
3901 * EACCES. However in that case dnode_hold() will eventually call
3902 * dbuf_read()->zio_wait() which may call spa_log_error(). This will
3903 * lead to a deadlock due to us holding the mutex spa_errlist_lock.
3904 * Avoid this by checking here if the keys are loaded, if not return.
3905 * If the keys are not loaded the head_errlog feature is meaningless
3906 * as we cannot figure out the birth txg of the block pointer.
3908 if (dsl_dataset_get_keystatus(ds
->ds_dir
) ==
3909 ZFS_KEYSTATUS_UNAVAILABLE
) {
3910 dsl_dataset_rele(ds
, FTAG
);
3917 if (dnode_hold(os
, zb
.zb_object
, FTAG
, &dn
) != 0) {
3918 dsl_dataset_rele(ds
, FTAG
);
3922 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
3923 int error
= dbuf_dnode_findbp(dn
, zb
.zb_level
, zb
.zb_blkid
, &bp
, NULL
,
3927 rw_exit(&dn
->dn_struct_rwlock
);
3928 dnode_rele(dn
, FTAG
);
3929 dsl_dataset_rele(ds
, FTAG
);
3933 if (!error
&& BP_IS_HOLE(&bp
)) {
3934 rw_exit(&dn
->dn_struct_rwlock
);
3935 dnode_rele(dn
, FTAG
);
3936 dsl_dataset_rele(ds
, FTAG
);
3940 int zio_flags
= ZIO_FLAG_SCAN_THREAD
| ZIO_FLAG_RAW
|
3941 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SCRUB
;
3943 /* If it's an intent log block, failure is expected. */
3944 if (zb
.zb_level
== ZB_ZIL_LEVEL
)
3945 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
3947 ASSERT(!BP_IS_EMBEDDED(&bp
));
3948 scan_exec_io(dp
, &bp
, zio_flags
, &zb
, NULL
);
3949 rw_exit(&dn
->dn_struct_rwlock
);
3950 dnode_rele(dn
, FTAG
);
3951 dsl_dataset_rele(ds
, FTAG
);
3955 * We keep track of the scrubbed error blocks in "count". This will be used
3956 * when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This
3957 * function is modelled after check_filesystem().
3960 scrub_filesystem(spa_t
*spa
, uint64_t fs
, zbookmark_err_phys_t
*zep
,
3964 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
3965 dsl_scan_t
*scn
= dp
->dp_scan
;
3967 int error
= dsl_dataset_hold_obj(dp
, fs
, FTAG
, &ds
);
3971 uint64_t latest_txg
;
3972 uint64_t txg_to_consider
= spa
->spa_syncing_txg
;
3973 boolean_t check_snapshot
= B_TRUE
;
3975 error
= find_birth_txg(ds
, zep
, &latest_txg
);
3978 * If find_birth_txg() errors out, then err on the side of caution and
3979 * proceed. In worst case scenario scrub all objects. If zep->zb_birth
3980 * is 0 (e.g. in case of encryption with unloaded keys) also proceed to
3981 * scrub all objects.
3983 if (error
== 0 && zep
->zb_birth
== latest_txg
) {
3984 /* Block neither free nor re written. */
3985 zbookmark_phys_t zb
;
3986 zep_to_zb(fs
, zep
, &zb
);
3987 scn
->scn_zio_root
= zio_root(spa
, NULL
, NULL
,
3989 /* We have already acquired the config lock for spa */
3990 read_by_block_level(scn
, zb
);
3992 (void) zio_wait(scn
->scn_zio_root
);
3993 scn
->scn_zio_root
= NULL
;
3995 scn
->errorscrub_phys
.dep_examined
++;
3996 scn
->errorscrub_phys
.dep_to_examine
--;
3998 if ((*count
) == zfs_scrub_error_blocks_per_txg
||
3999 dsl_error_scrub_check_suspend(scn
, &zb
)) {
4000 dsl_dataset_rele(ds
, FTAG
);
4001 return (SET_ERROR(EFAULT
));
4004 check_snapshot
= B_FALSE
;
4005 } else if (error
== 0) {
4006 txg_to_consider
= latest_txg
;
4010 * Retrieve the number of snapshots if the dataset is not a snapshot.
4012 uint64_t snap_count
= 0;
4013 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
!= 0) {
4015 error
= zap_count(spa
->spa_meta_objset
,
4016 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, &snap_count
);
4019 dsl_dataset_rele(ds
, FTAG
);
4024 if (snap_count
== 0) {
4025 /* Filesystem without snapshots. */
4026 dsl_dataset_rele(ds
, FTAG
);
4030 uint64_t snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
4031 uint64_t snap_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
4033 dsl_dataset_rele(ds
, FTAG
);
4035 /* Check only snapshots created from this file system. */
4036 while (snap_obj
!= 0 && zep
->zb_birth
< snap_obj_txg
&&
4037 snap_obj_txg
<= txg_to_consider
) {
4039 error
= dsl_dataset_hold_obj(dp
, snap_obj
, FTAG
, &ds
);
4043 if (dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
!= fs
) {
4044 snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
4045 snap_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
4046 dsl_dataset_rele(ds
, FTAG
);
4050 boolean_t affected
= B_TRUE
;
4051 if (check_snapshot
) {
4053 error
= find_birth_txg(ds
, zep
, &blk_txg
);
4056 * Scrub the snapshot also when zb_birth == 0 or when
4057 * find_birth_txg() returns an error.
4059 affected
= (error
== 0 && zep
->zb_birth
== blk_txg
) ||
4060 (error
!= 0) || (zep
->zb_birth
== 0);
4063 /* Scrub snapshots. */
4065 zbookmark_phys_t zb
;
4066 zep_to_zb(snap_obj
, zep
, &zb
);
4067 scn
->scn_zio_root
= zio_root(spa
, NULL
, NULL
,
4069 /* We have already acquired the config lock for spa */
4070 read_by_block_level(scn
, zb
);
4072 (void) zio_wait(scn
->scn_zio_root
);
4073 scn
->scn_zio_root
= NULL
;
4075 scn
->errorscrub_phys
.dep_examined
++;
4076 scn
->errorscrub_phys
.dep_to_examine
--;
4078 if ((*count
) == zfs_scrub_error_blocks_per_txg
||
4079 dsl_error_scrub_check_suspend(scn
, &zb
)) {
4080 dsl_dataset_rele(ds
, FTAG
);
4084 snap_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
4085 snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
4086 dsl_dataset_rele(ds
, FTAG
);
4092 dsl_errorscrub_sync(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
4094 spa_t
*spa
= dp
->dp_spa
;
4095 dsl_scan_t
*scn
= dp
->dp_scan
;
4098 * Only process scans in sync pass 1.
4101 if (spa_sync_pass(spa
) > 1)
4105 * If the spa is shutting down, then stop scanning. This will
4106 * ensure that the scan does not dirty any new data during the
4109 if (spa_shutting_down(spa
))
4112 if (!dsl_errorscrub_active(scn
) || dsl_errorscrub_is_paused(scn
)) {
4116 if (dsl_scan_resilvering(scn
->scn_dp
)) {
4117 /* cancel the error scrub if resilver started */
4118 dsl_scan_cancel(scn
->scn_dp
);
4122 spa
->spa_scrub_active
= B_TRUE
;
4123 scn
->scn_sync_start_time
= gethrtime();
4126 * zfs_scan_suspend_progress can be set to disable scrub progress.
4127 * See more detailed comment in dsl_scan_sync().
4129 if (zfs_scan_suspend_progress
) {
4130 uint64_t scan_time_ns
= gethrtime() - scn
->scn_sync_start_time
;
4131 int mintime
= zfs_scrub_min_time_ms
;
4133 while (zfs_scan_suspend_progress
&&
4134 !txg_sync_waiting(scn
->scn_dp
) &&
4135 !spa_shutting_down(scn
->scn_dp
->dp_spa
) &&
4136 NSEC2MSEC(scan_time_ns
) < mintime
) {
4138 scan_time_ns
= gethrtime() - scn
->scn_sync_start_time
;
4144 zap_attribute_t
*za
;
4145 zbookmark_phys_t
*zb
;
4146 boolean_t limit_exceeded
= B_FALSE
;
4148 za
= kmem_zalloc(sizeof (zap_attribute_t
), KM_SLEEP
);
4149 zb
= kmem_zalloc(sizeof (zbookmark_phys_t
), KM_SLEEP
);
4151 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
4152 for (; zap_cursor_retrieve(&scn
->errorscrub_cursor
, za
) == 0;
4153 zap_cursor_advance(&scn
->errorscrub_cursor
)) {
4154 name_to_bookmark(za
->za_name
, zb
);
4156 scn
->scn_zio_root
= zio_root(dp
->dp_spa
, NULL
,
4157 NULL
, ZIO_FLAG_CANFAIL
);
4158 dsl_pool_config_enter(dp
, FTAG
);
4159 read_by_block_level(scn
, *zb
);
4160 dsl_pool_config_exit(dp
, FTAG
);
4162 (void) zio_wait(scn
->scn_zio_root
);
4163 scn
->scn_zio_root
= NULL
;
4165 scn
->errorscrub_phys
.dep_examined
+= 1;
4166 scn
->errorscrub_phys
.dep_to_examine
-= 1;
4168 if (i
== zfs_scrub_error_blocks_per_txg
||
4169 dsl_error_scrub_check_suspend(scn
, zb
)) {
4170 limit_exceeded
= B_TRUE
;
4175 if (!limit_exceeded
)
4176 dsl_errorscrub_done(scn
, B_TRUE
, tx
);
4178 dsl_errorscrub_sync_state(scn
, tx
);
4179 kmem_free(za
, sizeof (*za
));
4180 kmem_free(zb
, sizeof (*zb
));
4185 for (; zap_cursor_retrieve(&scn
->errorscrub_cursor
, za
) == 0;
4186 zap_cursor_advance(&scn
->errorscrub_cursor
)) {
4188 zap_cursor_t
*head_ds_cursor
;
4189 zap_attribute_t
*head_ds_attr
;
4190 zbookmark_err_phys_t head_ds_block
;
4192 head_ds_cursor
= kmem_zalloc(sizeof (zap_cursor_t
), KM_SLEEP
);
4193 head_ds_attr
= kmem_zalloc(sizeof (zap_attribute_t
), KM_SLEEP
);
4195 uint64_t head_ds_err_obj
= za
->za_first_integer
;
4197 name_to_object(za
->za_name
, &head_ds
);
4198 boolean_t config_held
= B_FALSE
;
4199 uint64_t top_affected_fs
;
4201 for (zap_cursor_init(head_ds_cursor
, spa
->spa_meta_objset
,
4202 head_ds_err_obj
); zap_cursor_retrieve(head_ds_cursor
,
4203 head_ds_attr
) == 0; zap_cursor_advance(head_ds_cursor
)) {
4205 name_to_errphys(head_ds_attr
->za_name
, &head_ds_block
);
4208 * In case we are called from spa_sync the pool
4209 * config is already held.
4211 if (!dsl_pool_config_held(dp
)) {
4212 dsl_pool_config_enter(dp
, FTAG
);
4213 config_held
= B_TRUE
;
4216 error
= find_top_affected_fs(spa
,
4217 head_ds
, &head_ds_block
, &top_affected_fs
);
4221 error
= scrub_filesystem(spa
, top_affected_fs
,
4222 &head_ds_block
, &i
);
4224 if (error
== SET_ERROR(EFAULT
)) {
4225 limit_exceeded
= B_TRUE
;
4230 zap_cursor_fini(head_ds_cursor
);
4231 kmem_free(head_ds_cursor
, sizeof (*head_ds_cursor
));
4232 kmem_free(head_ds_attr
, sizeof (*head_ds_attr
));
4235 dsl_pool_config_exit(dp
, FTAG
);
4238 kmem_free(za
, sizeof (*za
));
4239 kmem_free(zb
, sizeof (*zb
));
4240 if (!limit_exceeded
)
4241 dsl_errorscrub_done(scn
, B_TRUE
, tx
);
4243 dsl_errorscrub_sync_state(scn
, tx
);
4247 * This is the primary entry point for scans that is called from syncing
4248 * context. Scans must happen entirely during syncing context so that we
4249 * can guarantee that blocks we are currently scanning will not change out
4250 * from under us. While a scan is active, this function controls how quickly
4251 * transaction groups proceed, instead of the normal handling provided by
4252 * txg_sync_thread().
4255 dsl_scan_sync(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
4258 dsl_scan_t
*scn
= dp
->dp_scan
;
4259 spa_t
*spa
= dp
->dp_spa
;
4260 state_sync_type_t sync_type
= SYNC_OPTIONAL
;
4262 if (spa
->spa_resilver_deferred
&&
4263 !spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_RESILVER_DEFER
))
4264 spa_feature_incr(spa
, SPA_FEATURE_RESILVER_DEFER
, tx
);
4267 * Check for scn_restart_txg before checking spa_load_state, so
4268 * that we can restart an old-style scan while the pool is being
4269 * imported (see dsl_scan_init). We also restart scans if there
4270 * is a deferred resilver and the user has manually disabled
4271 * deferred resilvers via the tunable.
4273 if (dsl_scan_restarting(scn
, tx
) ||
4274 (spa
->spa_resilver_deferred
&& zfs_resilver_disable_defer
)) {
4275 pool_scan_func_t func
= POOL_SCAN_SCRUB
;
4276 dsl_scan_done(scn
, B_FALSE
, tx
);
4277 if (vdev_resilver_needed(spa
->spa_root_vdev
, NULL
, NULL
))
4278 func
= POOL_SCAN_RESILVER
;
4279 zfs_dbgmsg("restarting scan func=%u on %s txg=%llu",
4280 func
, dp
->dp_spa
->spa_name
, (longlong_t
)tx
->tx_txg
);
4281 dsl_scan_setup_sync(&func
, tx
);
4285 * Only process scans in sync pass 1.
4287 if (spa_sync_pass(spa
) > 1)
4291 * If the spa is shutting down, then stop scanning. This will
4292 * ensure that the scan does not dirty any new data during the
4295 if (spa_shutting_down(spa
))
4299 * If the scan is inactive due to a stalled async destroy, try again.
4301 if (!scn
->scn_async_stalled
&& !dsl_scan_active(scn
))
4304 /* reset scan statistics */
4305 scn
->scn_visited_this_txg
= 0;
4306 scn
->scn_dedup_frees_this_txg
= 0;
4307 scn
->scn_holes_this_txg
= 0;
4308 scn
->scn_lt_min_this_txg
= 0;
4309 scn
->scn_gt_max_this_txg
= 0;
4310 scn
->scn_ddt_contained_this_txg
= 0;
4311 scn
->scn_objsets_visited_this_txg
= 0;
4312 scn
->scn_avg_seg_size_this_txg
= 0;
4313 scn
->scn_segs_this_txg
= 0;
4314 scn
->scn_avg_zio_size_this_txg
= 0;
4315 scn
->scn_zios_this_txg
= 0;
4316 scn
->scn_suspending
= B_FALSE
;
4317 scn
->scn_sync_start_time
= gethrtime();
4318 spa
->spa_scrub_active
= B_TRUE
;
4321 * First process the async destroys. If we suspend, don't do
4322 * any scrubbing or resilvering. This ensures that there are no
4323 * async destroys while we are scanning, so the scan code doesn't
4324 * have to worry about traversing it. It is also faster to free the
4325 * blocks than to scrub them.
4327 err
= dsl_process_async_destroys(dp
, tx
);
4331 if (!dsl_scan_is_running(scn
) || dsl_scan_is_paused_scrub(scn
))
4335 * Wait a few txgs after importing to begin scanning so that
4336 * we can get the pool imported quickly.
4338 if (spa
->spa_syncing_txg
< spa
->spa_first_txg
+ SCAN_IMPORT_WAIT_TXGS
)
4342 * zfs_scan_suspend_progress can be set to disable scan progress.
4343 * We don't want to spin the txg_sync thread, so we add a delay
4344 * here to simulate the time spent doing a scan. This is mostly
4345 * useful for testing and debugging.
4347 if (zfs_scan_suspend_progress
) {
4348 uint64_t scan_time_ns
= gethrtime() - scn
->scn_sync_start_time
;
4349 uint_t mintime
= (scn
->scn_phys
.scn_func
==
4350 POOL_SCAN_RESILVER
) ? zfs_resilver_min_time_ms
:
4351 zfs_scrub_min_time_ms
;
4353 while (zfs_scan_suspend_progress
&&
4354 !txg_sync_waiting(scn
->scn_dp
) &&
4355 !spa_shutting_down(scn
->scn_dp
->dp_spa
) &&
4356 NSEC2MSEC(scan_time_ns
) < mintime
) {
4358 scan_time_ns
= gethrtime() - scn
->scn_sync_start_time
;
4364 * Disabled by default, set zfs_scan_report_txgs to report
4365 * average performance over the last zfs_scan_report_txgs TXGs.
4367 if (!dsl_scan_is_paused_scrub(scn
) && zfs_scan_report_txgs
!= 0 &&
4368 tx
->tx_txg
% zfs_scan_report_txgs
== 0) {
4369 scn
->scn_issued_before_pass
+= spa
->spa_scan_pass_issued
;
4370 spa_scan_stat_init(spa
);
4374 * It is possible to switch from unsorted to sorted at any time,
4375 * but afterwards the scan will remain sorted unless reloaded from
4376 * a checkpoint after a reboot.
4378 if (!zfs_scan_legacy
) {
4379 scn
->scn_is_sorted
= B_TRUE
;
4380 if (scn
->scn_last_checkpoint
== 0)
4381 scn
->scn_last_checkpoint
= ddi_get_lbolt();
4385 * For sorted scans, determine what kind of work we will be doing
4386 * this txg based on our memory limitations and whether or not we
4387 * need to perform a checkpoint.
4389 if (scn
->scn_is_sorted
) {
4391 * If we are over our checkpoint interval, set scn_clearing
4392 * so that we can begin checkpointing immediately. The
4393 * checkpoint allows us to save a consistent bookmark
4394 * representing how much data we have scrubbed so far.
4395 * Otherwise, use the memory limit to determine if we should
4396 * scan for metadata or start issue scrub IOs. We accumulate
4397 * metadata until we hit our hard memory limit at which point
4398 * we issue scrub IOs until we are at our soft memory limit.
4400 if (scn
->scn_checkpointing
||
4401 ddi_get_lbolt() - scn
->scn_last_checkpoint
>
4402 SEC_TO_TICK(zfs_scan_checkpoint_intval
)) {
4403 if (!scn
->scn_checkpointing
)
4404 zfs_dbgmsg("begin scan checkpoint for %s",
4407 scn
->scn_checkpointing
= B_TRUE
;
4408 scn
->scn_clearing
= B_TRUE
;
4410 boolean_t should_clear
= dsl_scan_should_clear(scn
);
4411 if (should_clear
&& !scn
->scn_clearing
) {
4412 zfs_dbgmsg("begin scan clearing for %s",
4414 scn
->scn_clearing
= B_TRUE
;
4415 } else if (!should_clear
&& scn
->scn_clearing
) {
4416 zfs_dbgmsg("finish scan clearing for %s",
4418 scn
->scn_clearing
= B_FALSE
;
4422 ASSERT0(scn
->scn_checkpointing
);
4423 ASSERT0(scn
->scn_clearing
);
4426 if (!scn
->scn_clearing
&& scn
->scn_done_txg
== 0) {
4427 /* Need to scan metadata for more blocks to scrub */
4428 dsl_scan_phys_t
*scnp
= &scn
->scn_phys
;
4429 taskqid_t prefetch_tqid
;
4432 * Calculate the max number of in-flight bytes for pool-wide
4433 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
4434 * Limits for the issuing phase are done per top-level vdev and
4435 * are handled separately.
4437 scn
->scn_maxinflight_bytes
= MIN(arc_c_max
/ 4, MAX(1ULL << 20,
4438 zfs_scan_vdev_limit
* dsl_scan_count_data_disks(spa
)));
4440 if (scnp
->scn_ddt_bookmark
.ddb_class
<=
4441 scnp
->scn_ddt_class_max
) {
4442 ASSERT(ZB_IS_ZERO(&scnp
->scn_bookmark
));
4443 zfs_dbgmsg("doing scan sync for %s txg %llu; "
4444 "ddt bm=%llu/%llu/%llu/%llx",
4446 (longlong_t
)tx
->tx_txg
,
4447 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_class
,
4448 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_type
,
4449 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_checksum
,
4450 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_cursor
);
4452 zfs_dbgmsg("doing scan sync for %s txg %llu; "
4453 "bm=%llu/%llu/%llu/%llu",
4455 (longlong_t
)tx
->tx_txg
,
4456 (longlong_t
)scnp
->scn_bookmark
.zb_objset
,
4457 (longlong_t
)scnp
->scn_bookmark
.zb_object
,
4458 (longlong_t
)scnp
->scn_bookmark
.zb_level
,
4459 (longlong_t
)scnp
->scn_bookmark
.zb_blkid
);
4462 scn
->scn_zio_root
= zio_root(dp
->dp_spa
, NULL
,
4463 NULL
, ZIO_FLAG_CANFAIL
);
4465 scn
->scn_prefetch_stop
= B_FALSE
;
4466 prefetch_tqid
= taskq_dispatch(dp
->dp_sync_taskq
,
4467 dsl_scan_prefetch_thread
, scn
, TQ_SLEEP
);
4468 ASSERT(prefetch_tqid
!= TASKQID_INVALID
);
4470 dsl_pool_config_enter(dp
, FTAG
);
4471 dsl_scan_visit(scn
, tx
);
4472 dsl_pool_config_exit(dp
, FTAG
);
4474 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
4475 scn
->scn_prefetch_stop
= B_TRUE
;
4476 cv_broadcast(&spa
->spa_scrub_io_cv
);
4477 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
4479 taskq_wait_id(dp
->dp_sync_taskq
, prefetch_tqid
);
4480 (void) zio_wait(scn
->scn_zio_root
);
4481 scn
->scn_zio_root
= NULL
;
4483 zfs_dbgmsg("scan visited %llu blocks of %s in %llums "
4484 "(%llu os's, %llu holes, %llu < mintxg, "
4485 "%llu in ddt, %llu > maxtxg)",
4486 (longlong_t
)scn
->scn_visited_this_txg
,
4488 (longlong_t
)NSEC2MSEC(gethrtime() -
4489 scn
->scn_sync_start_time
),
4490 (longlong_t
)scn
->scn_objsets_visited_this_txg
,
4491 (longlong_t
)scn
->scn_holes_this_txg
,
4492 (longlong_t
)scn
->scn_lt_min_this_txg
,
4493 (longlong_t
)scn
->scn_ddt_contained_this_txg
,
4494 (longlong_t
)scn
->scn_gt_max_this_txg
);
4496 if (!scn
->scn_suspending
) {
4497 ASSERT0(avl_numnodes(&scn
->scn_queue
));
4498 scn
->scn_done_txg
= tx
->tx_txg
+ 1;
4499 if (scn
->scn_is_sorted
) {
4500 scn
->scn_checkpointing
= B_TRUE
;
4501 scn
->scn_clearing
= B_TRUE
;
4502 scn
->scn_issued_before_pass
+=
4503 spa
->spa_scan_pass_issued
;
4504 spa_scan_stat_init(spa
);
4506 zfs_dbgmsg("scan complete for %s txg %llu",
4508 (longlong_t
)tx
->tx_txg
);
4510 } else if (scn
->scn_is_sorted
&& scn
->scn_queues_pending
!= 0) {
4511 ASSERT(scn
->scn_clearing
);
4513 /* need to issue scrubbing IOs from per-vdev queues */
4514 scn
->scn_zio_root
= zio_root(dp
->dp_spa
, NULL
,
4515 NULL
, ZIO_FLAG_CANFAIL
);
4516 scan_io_queues_run(scn
);
4517 (void) zio_wait(scn
->scn_zio_root
);
4518 scn
->scn_zio_root
= NULL
;
4520 /* calculate and dprintf the current memory usage */
4521 (void) dsl_scan_should_clear(scn
);
4522 dsl_scan_update_stats(scn
);
4524 zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) "
4525 "in %llums (avg_block_size = %llu, avg_seg_size = %llu)",
4526 (longlong_t
)scn
->scn_zios_this_txg
,
4528 (longlong_t
)scn
->scn_segs_this_txg
,
4529 (longlong_t
)NSEC2MSEC(gethrtime() -
4530 scn
->scn_sync_start_time
),
4531 (longlong_t
)scn
->scn_avg_zio_size_this_txg
,
4532 (longlong_t
)scn
->scn_avg_seg_size_this_txg
);
4533 } else if (scn
->scn_done_txg
!= 0 && scn
->scn_done_txg
<= tx
->tx_txg
) {
4534 /* Finished with everything. Mark the scrub as complete */
4535 zfs_dbgmsg("scan issuing complete txg %llu for %s",
4536 (longlong_t
)tx
->tx_txg
,
4538 ASSERT3U(scn
->scn_done_txg
, !=, 0);
4539 ASSERT0(spa
->spa_scrub_inflight
);
4540 ASSERT0(scn
->scn_queues_pending
);
4541 dsl_scan_done(scn
, B_TRUE
, tx
);
4542 sync_type
= SYNC_MANDATORY
;
4545 dsl_scan_sync_state(scn
, tx
, sync_type
);
4549 count_block_issued(spa_t
*spa
, const blkptr_t
*bp
, boolean_t all
)
4552 * Don't count embedded bp's, since we already did the work of
4553 * scanning these when we scanned the containing block.
4555 if (BP_IS_EMBEDDED(bp
))
4559 * Update the spa's stats on how many bytes we have issued.
4560 * Sequential scrubs create a zio for each DVA of the bp. Each
4561 * of these will include all DVAs for repair purposes, but the
4562 * zio code will only try the first one unless there is an issue.
4563 * Therefore, we should only count the first DVA for these IOs.
4565 atomic_add_64(&spa
->spa_scan_pass_issued
,
4566 all
? BP_GET_ASIZE(bp
) : DVA_GET_ASIZE(&bp
->blk_dva
[0]));
4570 count_block(zfs_all_blkstats_t
*zab
, const blkptr_t
*bp
)
4573 * If we resume after a reboot, zab will be NULL; don't record
4574 * incomplete stats in that case.
4579 for (int i
= 0; i
< 4; i
++) {
4580 int l
= (i
< 2) ? BP_GET_LEVEL(bp
) : DN_MAX_LEVELS
;
4581 int t
= (i
& 1) ? BP_GET_TYPE(bp
) : DMU_OT_TOTAL
;
4583 if (t
& DMU_OT_NEWTYPE
)
4585 zfs_blkstat_t
*zb
= &zab
->zab_type
[l
][t
];
4589 zb
->zb_asize
+= BP_GET_ASIZE(bp
);
4590 zb
->zb_lsize
+= BP_GET_LSIZE(bp
);
4591 zb
->zb_psize
+= BP_GET_PSIZE(bp
);
4592 zb
->zb_gangs
+= BP_COUNT_GANG(bp
);
4594 switch (BP_GET_NDVAS(bp
)) {
4596 if (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
4597 DVA_GET_VDEV(&bp
->blk_dva
[1]))
4598 zb
->zb_ditto_2_of_2_samevdev
++;
4601 equal
= (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
4602 DVA_GET_VDEV(&bp
->blk_dva
[1])) +
4603 (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
4604 DVA_GET_VDEV(&bp
->blk_dva
[2])) +
4605 (DVA_GET_VDEV(&bp
->blk_dva
[1]) ==
4606 DVA_GET_VDEV(&bp
->blk_dva
[2]));
4608 zb
->zb_ditto_2_of_3_samevdev
++;
4609 else if (equal
== 3)
4610 zb
->zb_ditto_3_of_3_samevdev
++;
4617 scan_io_queue_insert_impl(dsl_scan_io_queue_t
*queue
, scan_io_t
*sio
)
4620 dsl_scan_t
*scn
= queue
->q_scn
;
4622 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
4624 if (unlikely(avl_is_empty(&queue
->q_sios_by_addr
)))
4625 atomic_add_64(&scn
->scn_queues_pending
, 1);
4626 if (avl_find(&queue
->q_sios_by_addr
, sio
, &idx
) != NULL
) {
4627 /* block is already scheduled for reading */
4631 avl_insert(&queue
->q_sios_by_addr
, sio
, idx
);
4632 queue
->q_sio_memused
+= SIO_GET_MUSED(sio
);
4633 range_tree_add(queue
->q_exts_by_addr
, SIO_GET_OFFSET(sio
),
4634 SIO_GET_ASIZE(sio
));
4638 * Given all the info we got from our metadata scanning process, we
4639 * construct a scan_io_t and insert it into the scan sorting queue. The
4640 * I/O must already be suitable for us to process. This is controlled
4641 * by dsl_scan_enqueue().
4644 scan_io_queue_insert(dsl_scan_io_queue_t
*queue
, const blkptr_t
*bp
, int dva_i
,
4645 int zio_flags
, const zbookmark_phys_t
*zb
)
4647 scan_io_t
*sio
= sio_alloc(BP_GET_NDVAS(bp
));
4649 ASSERT0(BP_IS_GANG(bp
));
4650 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
4652 bp2sio(bp
, sio
, dva_i
);
4653 sio
->sio_flags
= zio_flags
;
4656 queue
->q_last_ext_addr
= -1;
4657 scan_io_queue_insert_impl(queue
, sio
);
4661 * Given a set of I/O parameters as discovered by the metadata traversal
4662 * process, attempts to place the I/O into the sorted queues (if allowed),
4663 * or immediately executes the I/O.
4666 dsl_scan_enqueue(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
4667 const zbookmark_phys_t
*zb
)
4669 spa_t
*spa
= dp
->dp_spa
;
4671 ASSERT(!BP_IS_EMBEDDED(bp
));
4674 * Gang blocks are hard to issue sequentially, so we just issue them
4675 * here immediately instead of queuing them.
4677 if (!dp
->dp_scan
->scn_is_sorted
|| BP_IS_GANG(bp
)) {
4678 scan_exec_io(dp
, bp
, zio_flags
, zb
, NULL
);
4682 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
4686 dva
= bp
->blk_dva
[i
];
4687 vdev
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
));
4688 ASSERT(vdev
!= NULL
);
4690 mutex_enter(&vdev
->vdev_scan_io_queue_lock
);
4691 if (vdev
->vdev_scan_io_queue
== NULL
)
4692 vdev
->vdev_scan_io_queue
= scan_io_queue_create(vdev
);
4693 ASSERT(dp
->dp_scan
!= NULL
);
4694 scan_io_queue_insert(vdev
->vdev_scan_io_queue
, bp
,
4696 mutex_exit(&vdev
->vdev_scan_io_queue_lock
);
4701 dsl_scan_scrub_cb(dsl_pool_t
*dp
,
4702 const blkptr_t
*bp
, const zbookmark_phys_t
*zb
)
4704 dsl_scan_t
*scn
= dp
->dp_scan
;
4705 spa_t
*spa
= dp
->dp_spa
;
4706 uint64_t phys_birth
= BP_PHYSICAL_BIRTH(bp
);
4707 size_t psize
= BP_GET_PSIZE(bp
);
4708 boolean_t needs_io
= B_FALSE
;
4709 int zio_flags
= ZIO_FLAG_SCAN_THREAD
| ZIO_FLAG_RAW
| ZIO_FLAG_CANFAIL
;
4711 count_block(dp
->dp_blkstats
, bp
);
4712 if (phys_birth
<= scn
->scn_phys
.scn_min_txg
||
4713 phys_birth
>= scn
->scn_phys
.scn_max_txg
) {
4714 count_block_issued(spa
, bp
, B_TRUE
);
4718 /* Embedded BP's have phys_birth==0, so we reject them above. */
4719 ASSERT(!BP_IS_EMBEDDED(bp
));
4721 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn
));
4722 if (scn
->scn_phys
.scn_func
== POOL_SCAN_SCRUB
) {
4723 zio_flags
|= ZIO_FLAG_SCRUB
;
4726 ASSERT3U(scn
->scn_phys
.scn_func
, ==, POOL_SCAN_RESILVER
);
4727 zio_flags
|= ZIO_FLAG_RESILVER
;
4731 /* If it's an intent log block, failure is expected. */
4732 if (zb
->zb_level
== ZB_ZIL_LEVEL
)
4733 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
4735 for (int d
= 0; d
< BP_GET_NDVAS(bp
); d
++) {
4736 const dva_t
*dva
= &bp
->blk_dva
[d
];
4739 * Keep track of how much data we've examined so that
4740 * zpool(8) status can make useful progress reports.
4742 uint64_t asize
= DVA_GET_ASIZE(dva
);
4743 scn
->scn_phys
.scn_examined
+= asize
;
4744 spa
->spa_scan_pass_exam
+= asize
;
4746 /* if it's a resilver, this may not be in the target range */
4748 needs_io
= dsl_scan_need_resilver(spa
, dva
, psize
,
4752 if (needs_io
&& !zfs_no_scrub_io
) {
4753 dsl_scan_enqueue(dp
, bp
, zio_flags
, zb
);
4755 count_block_issued(spa
, bp
, B_TRUE
);
4758 /* do not relocate this block */
4763 dsl_scan_scrub_done(zio_t
*zio
)
4765 spa_t
*spa
= zio
->io_spa
;
4766 blkptr_t
*bp
= zio
->io_bp
;
4767 dsl_scan_io_queue_t
*queue
= zio
->io_private
;
4769 abd_free(zio
->io_abd
);
4771 if (queue
== NULL
) {
4772 mutex_enter(&spa
->spa_scrub_lock
);
4773 ASSERT3U(spa
->spa_scrub_inflight
, >=, BP_GET_PSIZE(bp
));
4774 spa
->spa_scrub_inflight
-= BP_GET_PSIZE(bp
);
4775 cv_broadcast(&spa
->spa_scrub_io_cv
);
4776 mutex_exit(&spa
->spa_scrub_lock
);
4778 mutex_enter(&queue
->q_vd
->vdev_scan_io_queue_lock
);
4779 ASSERT3U(queue
->q_inflight_bytes
, >=, BP_GET_PSIZE(bp
));
4780 queue
->q_inflight_bytes
-= BP_GET_PSIZE(bp
);
4781 cv_broadcast(&queue
->q_zio_cv
);
4782 mutex_exit(&queue
->q_vd
->vdev_scan_io_queue_lock
);
4785 if (zio
->io_error
&& (zio
->io_error
!= ECKSUM
||
4786 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
))) {
4787 if (dsl_errorscrubbing(spa
->spa_dsl_pool
) &&
4788 !dsl_errorscrub_is_paused(spa
->spa_dsl_pool
->dp_scan
)) {
4789 atomic_inc_64(&spa
->spa_dsl_pool
->dp_scan
4790 ->errorscrub_phys
.dep_errors
);
4792 atomic_inc_64(&spa
->spa_dsl_pool
->dp_scan
->scn_phys
4799 * Given a scanning zio's information, executes the zio. The zio need
4800 * not necessarily be only sortable, this function simply executes the
4801 * zio, no matter what it is. The optional queue argument allows the
4802 * caller to specify that they want per top level vdev IO rate limiting
4803 * instead of the legacy global limiting.
4806 scan_exec_io(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
4807 const zbookmark_phys_t
*zb
, dsl_scan_io_queue_t
*queue
)
4809 spa_t
*spa
= dp
->dp_spa
;
4810 dsl_scan_t
*scn
= dp
->dp_scan
;
4811 size_t size
= BP_GET_PSIZE(bp
);
4812 abd_t
*data
= abd_alloc_for_io(size
, B_FALSE
);
4815 if (queue
== NULL
) {
4816 ASSERT3U(scn
->scn_maxinflight_bytes
, >, 0);
4817 mutex_enter(&spa
->spa_scrub_lock
);
4818 while (spa
->spa_scrub_inflight
>= scn
->scn_maxinflight_bytes
)
4819 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
4820 spa
->spa_scrub_inflight
+= BP_GET_PSIZE(bp
);
4821 mutex_exit(&spa
->spa_scrub_lock
);
4822 pio
= scn
->scn_zio_root
;
4824 kmutex_t
*q_lock
= &queue
->q_vd
->vdev_scan_io_queue_lock
;
4826 ASSERT3U(queue
->q_maxinflight_bytes
, >, 0);
4827 mutex_enter(q_lock
);
4828 while (queue
->q_inflight_bytes
>= queue
->q_maxinflight_bytes
)
4829 cv_wait(&queue
->q_zio_cv
, q_lock
);
4830 queue
->q_inflight_bytes
+= BP_GET_PSIZE(bp
);
4835 ASSERT(pio
!= NULL
);
4836 count_block_issued(spa
, bp
, queue
== NULL
);
4837 zio_nowait(zio_read(pio
, spa
, bp
, data
, size
, dsl_scan_scrub_done
,
4838 queue
, ZIO_PRIORITY_SCRUB
, zio_flags
, zb
));
4842 * This is the primary extent sorting algorithm. We balance two parameters:
4843 * 1) how many bytes of I/O are in an extent
4844 * 2) how well the extent is filled with I/O (as a fraction of its total size)
4845 * Since we allow extents to have gaps between their constituent I/Os, it's
4846 * possible to have a fairly large extent that contains the same amount of
4847 * I/O bytes than a much smaller extent, which just packs the I/O more tightly.
4848 * The algorithm sorts based on a score calculated from the extent's size,
4849 * the relative fill volume (in %) and a "fill weight" parameter that controls
4850 * the split between whether we prefer larger extents or more well populated
4853 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
4856 * 1) assume extsz = 64 MiB
4857 * 2) assume fill = 32 MiB (extent is half full)
4858 * 3) assume fill_weight = 3
4859 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
4860 * SCORE = 32M + (50 * 3 * 32M) / 100
4861 * SCORE = 32M + (4800M / 100)
4864 * | +--- final total relative fill-based score
4865 * +--------- final total fill-based score
4868 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
4869 * extents that are more completely filled (in a 3:2 ratio) vs just larger.
4870 * Note that as an optimization, we replace multiplication and division by
4871 * 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
4873 * Since we do not care if one extent is only few percent better than another,
4874 * compress the score into 6 bits via binary logarithm AKA highbit64() and
4875 * put into otherwise unused due to ashift high bits of offset. This allows
4876 * to reduce q_exts_by_size B-tree elements to only 64 bits and compare them
4877 * with single operation. Plus it makes scrubs more sequential and reduces
4878 * chances that minor extent change move it within the B-tree.
4881 ext_size_compare(const void *x
, const void *y
)
4883 const uint64_t *a
= x
, *b
= y
;
4885 return (TREE_CMP(*a
, *b
));
4889 ext_size_create(range_tree_t
*rt
, void *arg
)
4892 zfs_btree_t
*size_tree
= arg
;
4894 zfs_btree_create(size_tree
, ext_size_compare
, sizeof (uint64_t));
4898 ext_size_destroy(range_tree_t
*rt
, void *arg
)
4901 zfs_btree_t
*size_tree
= arg
;
4902 ASSERT0(zfs_btree_numnodes(size_tree
));
4904 zfs_btree_destroy(size_tree
);
4908 ext_size_value(range_tree_t
*rt
, range_seg_gap_t
*rsg
)
4911 uint64_t size
= rsg
->rs_end
- rsg
->rs_start
;
4912 uint64_t score
= rsg
->rs_fill
+ ((((rsg
->rs_fill
<< 7) / size
) *
4913 fill_weight
* rsg
->rs_fill
) >> 7);
4914 ASSERT3U(rt
->rt_shift
, >=, 8);
4915 return (((uint64_t)(64 - highbit64(score
)) << 56) | rsg
->rs_start
);
4919 ext_size_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
4921 zfs_btree_t
*size_tree
= arg
;
4922 ASSERT3U(rt
->rt_type
, ==, RANGE_SEG_GAP
);
4923 uint64_t v
= ext_size_value(rt
, (range_seg_gap_t
*)rs
);
4924 zfs_btree_add(size_tree
, &v
);
4928 ext_size_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
4930 zfs_btree_t
*size_tree
= arg
;
4931 ASSERT3U(rt
->rt_type
, ==, RANGE_SEG_GAP
);
4932 uint64_t v
= ext_size_value(rt
, (range_seg_gap_t
*)rs
);
4933 zfs_btree_remove(size_tree
, &v
);
4937 ext_size_vacate(range_tree_t
*rt
, void *arg
)
4939 zfs_btree_t
*size_tree
= arg
;
4940 zfs_btree_clear(size_tree
);
4941 zfs_btree_destroy(size_tree
);
4943 ext_size_create(rt
, arg
);
4946 static const range_tree_ops_t ext_size_ops
= {
4947 .rtop_create
= ext_size_create
,
4948 .rtop_destroy
= ext_size_destroy
,
4949 .rtop_add
= ext_size_add
,
4950 .rtop_remove
= ext_size_remove
,
4951 .rtop_vacate
= ext_size_vacate
4955 * Comparator for the q_sios_by_addr tree. Sorting is simply performed
4956 * based on LBA-order (from lowest to highest).
4959 sio_addr_compare(const void *x
, const void *y
)
4961 const scan_io_t
*a
= x
, *b
= y
;
4963 return (TREE_CMP(SIO_GET_OFFSET(a
), SIO_GET_OFFSET(b
)));
4966 /* IO queues are created on demand when they are needed. */
4967 static dsl_scan_io_queue_t
*
4968 scan_io_queue_create(vdev_t
*vd
)
4970 dsl_scan_t
*scn
= vd
->vdev_spa
->spa_dsl_pool
->dp_scan
;
4971 dsl_scan_io_queue_t
*q
= kmem_zalloc(sizeof (*q
), KM_SLEEP
);
4975 q
->q_sio_memused
= 0;
4976 q
->q_last_ext_addr
= -1;
4977 cv_init(&q
->q_zio_cv
, NULL
, CV_DEFAULT
, NULL
);
4978 q
->q_exts_by_addr
= range_tree_create_gap(&ext_size_ops
, RANGE_SEG_GAP
,
4979 &q
->q_exts_by_size
, 0, vd
->vdev_ashift
, zfs_scan_max_ext_gap
);
4980 avl_create(&q
->q_sios_by_addr
, sio_addr_compare
,
4981 sizeof (scan_io_t
), offsetof(scan_io_t
, sio_nodes
.sio_addr_node
));
4987 * Destroys a scan queue and all segments and scan_io_t's contained in it.
4988 * No further execution of I/O occurs, anything pending in the queue is
4989 * simply freed without being executed.
4992 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t
*queue
)
4994 dsl_scan_t
*scn
= queue
->q_scn
;
4996 void *cookie
= NULL
;
4998 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
5000 if (!avl_is_empty(&queue
->q_sios_by_addr
))
5001 atomic_add_64(&scn
->scn_queues_pending
, -1);
5002 while ((sio
= avl_destroy_nodes(&queue
->q_sios_by_addr
, &cookie
)) !=
5004 ASSERT(range_tree_contains(queue
->q_exts_by_addr
,
5005 SIO_GET_OFFSET(sio
), SIO_GET_ASIZE(sio
)));
5006 queue
->q_sio_memused
-= SIO_GET_MUSED(sio
);
5010 ASSERT0(queue
->q_sio_memused
);
5011 range_tree_vacate(queue
->q_exts_by_addr
, NULL
, queue
);
5012 range_tree_destroy(queue
->q_exts_by_addr
);
5013 avl_destroy(&queue
->q_sios_by_addr
);
5014 cv_destroy(&queue
->q_zio_cv
);
5016 kmem_free(queue
, sizeof (*queue
));
5020 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
5021 * called on behalf of vdev_top_transfer when creating or destroying
5022 * a mirror vdev due to zpool attach/detach.
5025 dsl_scan_io_queue_vdev_xfer(vdev_t
*svd
, vdev_t
*tvd
)
5027 mutex_enter(&svd
->vdev_scan_io_queue_lock
);
5028 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
5030 VERIFY3P(tvd
->vdev_scan_io_queue
, ==, NULL
);
5031 tvd
->vdev_scan_io_queue
= svd
->vdev_scan_io_queue
;
5032 svd
->vdev_scan_io_queue
= NULL
;
5033 if (tvd
->vdev_scan_io_queue
!= NULL
)
5034 tvd
->vdev_scan_io_queue
->q_vd
= tvd
;
5036 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
5037 mutex_exit(&svd
->vdev_scan_io_queue_lock
);
5041 scan_io_queues_destroy(dsl_scan_t
*scn
)
5043 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
5045 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
5046 vdev_t
*tvd
= rvd
->vdev_child
[i
];
5048 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
5049 if (tvd
->vdev_scan_io_queue
!= NULL
)
5050 dsl_scan_io_queue_destroy(tvd
->vdev_scan_io_queue
);
5051 tvd
->vdev_scan_io_queue
= NULL
;
5052 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
5057 dsl_scan_freed_dva(spa_t
*spa
, const blkptr_t
*bp
, int dva_i
)
5059 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
5060 dsl_scan_t
*scn
= dp
->dp_scan
;
5063 dsl_scan_io_queue_t
*queue
;
5064 scan_io_t
*srch_sio
, *sio
;
5066 uint64_t start
, size
;
5068 vdev
= vdev_lookup_top(spa
, DVA_GET_VDEV(&bp
->blk_dva
[dva_i
]));
5069 ASSERT(vdev
!= NULL
);
5070 q_lock
= &vdev
->vdev_scan_io_queue_lock
;
5071 queue
= vdev
->vdev_scan_io_queue
;
5073 mutex_enter(q_lock
);
5074 if (queue
== NULL
) {
5079 srch_sio
= sio_alloc(BP_GET_NDVAS(bp
));
5080 bp2sio(bp
, srch_sio
, dva_i
);
5081 start
= SIO_GET_OFFSET(srch_sio
);
5082 size
= SIO_GET_ASIZE(srch_sio
);
5085 * We can find the zio in two states:
5086 * 1) Cold, just sitting in the queue of zio's to be issued at
5087 * some point in the future. In this case, all we do is
5088 * remove the zio from the q_sios_by_addr tree, decrement
5089 * its data volume from the containing range_seg_t and
5090 * resort the q_exts_by_size tree to reflect that the
5091 * range_seg_t has lost some of its 'fill'. We don't shorten
5092 * the range_seg_t - this is usually rare enough not to be
5093 * worth the extra hassle of trying keep track of precise
5094 * extent boundaries.
5095 * 2) Hot, where the zio is currently in-flight in
5096 * dsl_scan_issue_ios. In this case, we can't simply
5097 * reach in and stop the in-flight zio's, so we instead
5098 * block the caller. Eventually, dsl_scan_issue_ios will
5099 * be done with issuing the zio's it gathered and will
5102 sio
= avl_find(&queue
->q_sios_by_addr
, srch_sio
, &idx
);
5108 /* Got it while it was cold in the queue */
5109 ASSERT3U(start
, ==, SIO_GET_OFFSET(sio
));
5110 ASSERT3U(size
, ==, SIO_GET_ASIZE(sio
));
5111 avl_remove(&queue
->q_sios_by_addr
, sio
);
5112 if (avl_is_empty(&queue
->q_sios_by_addr
))
5113 atomic_add_64(&scn
->scn_queues_pending
, -1);
5114 queue
->q_sio_memused
-= SIO_GET_MUSED(sio
);
5116 ASSERT(range_tree_contains(queue
->q_exts_by_addr
, start
, size
));
5117 range_tree_remove_fill(queue
->q_exts_by_addr
, start
, size
);
5119 /* count the block as though we issued it */
5120 sio2bp(sio
, &tmpbp
);
5121 count_block_issued(spa
, &tmpbp
, B_FALSE
);
5129 * Callback invoked when a zio_free() zio is executing. This needs to be
5130 * intercepted to prevent the zio from deallocating a particular portion
5131 * of disk space and it then getting reallocated and written to, while we
5132 * still have it queued up for processing.
5135 dsl_scan_freed(spa_t
*spa
, const blkptr_t
*bp
)
5137 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
5138 dsl_scan_t
*scn
= dp
->dp_scan
;
5140 ASSERT(!BP_IS_EMBEDDED(bp
));
5141 ASSERT(scn
!= NULL
);
5142 if (!dsl_scan_is_running(scn
))
5145 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++)
5146 dsl_scan_freed_dva(spa
, bp
, i
);
5150 * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
5151 * not started, start it. Otherwise, only restart if max txg in DTL range is
5152 * greater than the max txg in the current scan. If the DTL max is less than
5153 * the scan max, then the vdev has not missed any new data since the resilver
5154 * started, so a restart is not needed.
5157 dsl_scan_assess_vdev(dsl_pool_t
*dp
, vdev_t
*vd
)
5161 if (!vdev_resilver_needed(vd
, &min
, &max
))
5164 if (!dsl_scan_resilvering(dp
)) {
5165 spa_async_request(dp
->dp_spa
, SPA_ASYNC_RESILVER
);
5169 if (max
<= dp
->dp_scan
->scn_phys
.scn_max_txg
)
5172 /* restart is needed, check if it can be deferred */
5173 if (spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_RESILVER_DEFER
))
5174 vdev_defer_resilver(vd
);
5176 spa_async_request(dp
->dp_spa
, SPA_ASYNC_RESILVER
);
5179 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_vdev_limit
, U64
, ZMOD_RW
,
5180 "Max bytes in flight per leaf vdev for scrubs and resilvers");
5182 ZFS_MODULE_PARAM(zfs
, zfs_
, scrub_min_time_ms
, UINT
, ZMOD_RW
,
5183 "Min millisecs to scrub per txg");
5185 ZFS_MODULE_PARAM(zfs
, zfs_
, obsolete_min_time_ms
, UINT
, ZMOD_RW
,
5186 "Min millisecs to obsolete per txg");
5188 ZFS_MODULE_PARAM(zfs
, zfs_
, free_min_time_ms
, UINT
, ZMOD_RW
,
5189 "Min millisecs to free per txg");
5191 ZFS_MODULE_PARAM(zfs
, zfs_
, resilver_min_time_ms
, UINT
, ZMOD_RW
,
5192 "Min millisecs to resilver per txg");
5194 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_suspend_progress
, INT
, ZMOD_RW
,
5195 "Set to prevent scans from progressing");
5197 ZFS_MODULE_PARAM(zfs
, zfs_
, no_scrub_io
, INT
, ZMOD_RW
,
5198 "Set to disable scrub I/O");
5200 ZFS_MODULE_PARAM(zfs
, zfs_
, no_scrub_prefetch
, INT
, ZMOD_RW
,
5201 "Set to disable scrub prefetching");
5203 ZFS_MODULE_PARAM(zfs
, zfs_
, async_block_max_blocks
, U64
, ZMOD_RW
,
5204 "Max number of blocks freed in one txg");
5206 ZFS_MODULE_PARAM(zfs
, zfs_
, max_async_dedup_frees
, U64
, ZMOD_RW
,
5207 "Max number of dedup blocks freed in one txg");
5209 ZFS_MODULE_PARAM(zfs
, zfs_
, free_bpobj_enabled
, INT
, ZMOD_RW
,
5210 "Enable processing of the free_bpobj");
5212 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_blkstats
, INT
, ZMOD_RW
,
5213 "Enable block statistics calculation during scrub");
5215 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_mem_lim_fact
, UINT
, ZMOD_RW
,
5216 "Fraction of RAM for scan hard limit");
5218 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_issue_strategy
, UINT
, ZMOD_RW
,
5219 "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
5221 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_legacy
, INT
, ZMOD_RW
,
5222 "Scrub using legacy non-sequential method");
5224 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_checkpoint_intval
, UINT
, ZMOD_RW
,
5225 "Scan progress on-disk checkpointing interval");
5227 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_max_ext_gap
, U64
, ZMOD_RW
,
5228 "Max gap in bytes between sequential scrub / resilver I/Os");
5230 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_mem_lim_soft_fact
, UINT
, ZMOD_RW
,
5231 "Fraction of hard limit used as soft limit");
5233 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_strict_mem_lim
, INT
, ZMOD_RW
,
5234 "Tunable to attempt to reduce lock contention");
5236 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_fill_weight
, UINT
, ZMOD_RW
,
5237 "Tunable to adjust bias towards more filled segments during scans");
5239 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_report_txgs
, UINT
, ZMOD_RW
,
5240 "Tunable to report resilver performance over the last N txgs");
5242 ZFS_MODULE_PARAM(zfs
, zfs_
, resilver_disable_defer
, INT
, ZMOD_RW
,
5243 "Process all resilvers immediately");
5245 ZFS_MODULE_PARAM(zfs
, zfs_
, scrub_error_blocks_per_txg
, U64
, ZMOD_RW
,
5246 "Error blocks to be scrubbed in one txg");