4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2021 by Delphix. All rights reserved.
24 * Copyright 2016 Gary Mills
25 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2019 Joyent, Inc.
30 #include <sys/dsl_scan.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_synctask.h>
36 #include <sys/dnode.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/dmu_objset.h>
42 #include <sys/zfs_context.h>
43 #include <sys/fs/zfs.h>
44 #include <sys/zfs_znode.h>
45 #include <sys/spa_impl.h>
46 #include <sys/vdev_impl.h>
47 #include <sys/zil_impl.h>
48 #include <sys/zio_checksum.h>
51 #include <sys/sa_impl.h>
52 #include <sys/zfeature.h>
54 #include <sys/range_tree.h>
56 #include <sys/zfs_vfsops.h>
60 * Grand theory statement on scan queue sorting
62 * Scanning is implemented by recursively traversing all indirection levels
63 * in an object and reading all blocks referenced from said objects. This
64 * results in us approximately traversing the object from lowest logical
65 * offset to the highest. For best performance, we would want the logical
66 * blocks to be physically contiguous. However, this is frequently not the
67 * case with pools given the allocation patterns of copy-on-write filesystems.
68 * So instead, we put the I/Os into a reordering queue and issue them in a
69 * way that will most benefit physical disks (LBA-order).
73 * Ideally, we would want to scan all metadata and queue up all block I/O
74 * prior to starting to issue it, because that allows us to do an optimal
75 * sorting job. This can however consume large amounts of memory. Therefore
76 * we continuously monitor the size of the queues and constrain them to 5%
77 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
78 * limit, we clear out a few of the largest extents at the head of the queues
79 * to make room for more scanning. Hopefully, these extents will be fairly
80 * large and contiguous, allowing us to approach sequential I/O throughput
81 * even without a fully sorted tree.
83 * Metadata scanning takes place in dsl_scan_visit(), which is called from
84 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all
85 * metadata on the pool, or we need to make room in memory because our
86 * queues are too large, dsl_scan_visit() is postponed and
87 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
88 * that metadata scanning and queued I/O issuing are mutually exclusive. This
89 * allows us to provide maximum sequential I/O throughput for the majority of
90 * I/O's issued since sequential I/O performance is significantly negatively
91 * impacted if it is interleaved with random I/O.
93 * Implementation Notes
95 * One side effect of the queued scanning algorithm is that the scanning code
96 * needs to be notified whenever a block is freed. This is needed to allow
97 * the scanning code to remove these I/Os from the issuing queue. Additionally,
98 * we do not attempt to queue gang blocks to be issued sequentially since this
99 * is very hard to do and would have an extremely limited performance benefit.
100 * Instead, we simply issue gang I/Os as soon as we find them using the legacy
103 * Backwards compatibility
105 * This new algorithm is backwards compatible with the legacy on-disk data
106 * structures (and therefore does not require a new feature flag).
107 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
108 * will stop scanning metadata (in logical order) and wait for all outstanding
109 * sorted I/O to complete. Once this is done, we write out a checkpoint
110 * bookmark, indicating that we have scanned everything logically before it.
111 * If the pool is imported on a machine without the new sorting algorithm,
112 * the scan simply resumes from the last checkpoint using the legacy algorithm.
115 typedef int (scan_cb_t
)(dsl_pool_t
*, const blkptr_t
*,
116 const zbookmark_phys_t
*);
118 static scan_cb_t dsl_scan_scrub_cb
;
120 static int scan_ds_queue_compare(const void *a
, const void *b
);
121 static int scan_prefetch_queue_compare(const void *a
, const void *b
);
122 static void scan_ds_queue_clear(dsl_scan_t
*scn
);
123 static void scan_ds_prefetch_queue_clear(dsl_scan_t
*scn
);
124 static boolean_t
scan_ds_queue_contains(dsl_scan_t
*scn
, uint64_t dsobj
,
126 static void scan_ds_queue_insert(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t txg
);
127 static void scan_ds_queue_remove(dsl_scan_t
*scn
, uint64_t dsobj
);
128 static void scan_ds_queue_sync(dsl_scan_t
*scn
, dmu_tx_t
*tx
);
129 static uint64_t dsl_scan_count_data_disks(vdev_t
*vd
);
131 extern int zfs_vdev_async_write_active_min_dirty_percent
;
134 * By default zfs will check to ensure it is not over the hard memory
135 * limit before each txg. If finer-grained control of this is needed
136 * this value can be set to 1 to enable checking before scanning each
139 static int zfs_scan_strict_mem_lim
= B_FALSE
;
142 * Maximum number of parallelly executed bytes per leaf vdev. We attempt
143 * to strike a balance here between keeping the vdev queues full of I/Os
144 * at all times and not overflowing the queues to cause long latency,
145 * which would cause long txg sync times. No matter what, we will not
146 * overload the drives with I/O, since that is protected by
147 * zfs_vdev_scrub_max_active.
149 static unsigned long zfs_scan_vdev_limit
= 4 << 20;
151 static int zfs_scan_issue_strategy
= 0;
152 static int zfs_scan_legacy
= B_FALSE
; /* don't queue & sort zios, go direct */
153 static unsigned long zfs_scan_max_ext_gap
= 2 << 20; /* in bytes */
156 * fill_weight is non-tunable at runtime, so we copy it at module init from
157 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
158 * break queue sorting.
160 static int zfs_scan_fill_weight
= 3;
161 static uint64_t fill_weight
;
163 /* See dsl_scan_should_clear() for details on the memory limit tunables */
164 static const uint64_t zfs_scan_mem_lim_min
= 16 << 20; /* bytes */
165 static const uint64_t zfs_scan_mem_lim_soft_max
= 128 << 20; /* bytes */
166 static int zfs_scan_mem_lim_fact
= 20; /* fraction of physmem */
167 static int zfs_scan_mem_lim_soft_fact
= 20; /* fraction of mem lim above */
169 static int zfs_scrub_min_time_ms
= 1000; /* min millis to scrub per txg */
170 static int zfs_obsolete_min_time_ms
= 500; /* min millis to obsolete per txg */
171 static int zfs_free_min_time_ms
= 1000; /* min millis to free per txg */
172 static int zfs_resilver_min_time_ms
= 3000; /* min millis to resilver per txg */
173 static int zfs_scan_checkpoint_intval
= 7200; /* in seconds */
174 int zfs_scan_suspend_progress
= 0; /* set to prevent scans from progressing */
175 static int zfs_no_scrub_io
= B_FALSE
; /* set to disable scrub i/o */
176 static int zfs_no_scrub_prefetch
= B_FALSE
; /* set to disable scrub prefetch */
177 static const enum ddt_class zfs_scrub_ddt_class_max
= DDT_CLASS_DUPLICATE
;
178 /* max number of blocks to free in a single TXG */
179 static unsigned long zfs_async_block_max_blocks
= ULONG_MAX
;
180 /* max number of dedup blocks to free in a single TXG */
181 static unsigned long zfs_max_async_dedup_frees
= 100000;
183 /* set to disable resilver deferring */
184 static int zfs_resilver_disable_defer
= B_FALSE
;
187 * We wait a few txgs after importing a pool to begin scanning so that
188 * the import / mounting code isn't held up by scrub / resilver IO.
189 * Unfortunately, it is a bit difficult to determine exactly how long
190 * this will take since userspace will trigger fs mounts asynchronously
191 * and the kernel will create zvol minors asynchronously. As a result,
192 * the value provided here is a bit arbitrary, but represents a
193 * reasonable estimate of how many txgs it will take to finish fully
196 #define SCAN_IMPORT_WAIT_TXGS 5
198 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
199 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
200 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
203 * Enable/disable the processing of the free_bpobj object.
205 static int zfs_free_bpobj_enabled
= 1;
207 /* the order has to match pool_scan_type */
208 static scan_cb_t
*scan_funcs
[POOL_SCAN_FUNCS
] = {
210 dsl_scan_scrub_cb
, /* POOL_SCAN_SCRUB */
211 dsl_scan_scrub_cb
, /* POOL_SCAN_RESILVER */
214 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */
222 * This controls what conditions are placed on dsl_scan_sync_state():
223 * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
224 * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
225 * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
226 * write out the scn_phys_cached version.
227 * See dsl_scan_sync_state for details.
236 * This struct represents the minimum information needed to reconstruct a
237 * zio for sequential scanning. This is useful because many of these will
238 * accumulate in the sequential IO queues before being issued, so saving
239 * memory matters here.
241 typedef struct scan_io
{
242 /* fields from blkptr_t */
243 uint64_t sio_blk_prop
;
244 uint64_t sio_phys_birth
;
246 zio_cksum_t sio_cksum
;
247 uint32_t sio_nr_dvas
;
249 /* fields from zio_t */
251 zbookmark_phys_t sio_zb
;
253 /* members for queue sorting */
255 avl_node_t sio_addr_node
; /* link into issuing queue */
256 list_node_t sio_list_node
; /* link for issuing to disk */
260 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
261 * depending on how many were in the original bp. Only the
262 * first DVA is really used for sorting and issuing purposes.
263 * The other DVAs (if provided) simply exist so that the zio
264 * layer can find additional copies to repair from in the
265 * event of an error. This array must go at the end of the
266 * struct to allow this for the variable number of elements.
271 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
272 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
273 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
274 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
275 #define SIO_GET_END_OFFSET(sio) \
276 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
277 #define SIO_GET_MUSED(sio) \
278 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
280 struct dsl_scan_io_queue
{
281 dsl_scan_t
*q_scn
; /* associated dsl_scan_t */
282 vdev_t
*q_vd
; /* top-level vdev that this queue represents */
284 /* trees used for sorting I/Os and extents of I/Os */
285 range_tree_t
*q_exts_by_addr
;
286 zfs_btree_t q_exts_by_size
;
287 avl_tree_t q_sios_by_addr
;
288 uint64_t q_sio_memused
;
290 /* members for zio rate limiting */
291 uint64_t q_maxinflight_bytes
;
292 uint64_t q_inflight_bytes
;
293 kcondvar_t q_zio_cv
; /* used under vd->vdev_scan_io_queue_lock */
295 /* per txg statistics */
296 uint64_t q_total_seg_size_this_txg
;
297 uint64_t q_segs_this_txg
;
298 uint64_t q_total_zio_size_this_txg
;
299 uint64_t q_zios_this_txg
;
302 /* private data for dsl_scan_prefetch_cb() */
303 typedef struct scan_prefetch_ctx
{
304 zfs_refcount_t spc_refcnt
; /* refcount for memory management */
305 dsl_scan_t
*spc_scn
; /* dsl_scan_t for the pool */
306 boolean_t spc_root
; /* is this prefetch for an objset? */
307 uint8_t spc_indblkshift
; /* dn_indblkshift of current dnode */
308 uint16_t spc_datablkszsec
; /* dn_idatablkszsec of current dnode */
309 } scan_prefetch_ctx_t
;
311 /* private data for dsl_scan_prefetch() */
312 typedef struct scan_prefetch_issue_ctx
{
313 avl_node_t spic_avl_node
; /* link into scn->scn_prefetch_queue */
314 scan_prefetch_ctx_t
*spic_spc
; /* spc for the callback */
315 blkptr_t spic_bp
; /* bp to prefetch */
316 zbookmark_phys_t spic_zb
; /* bookmark to prefetch */
317 } scan_prefetch_issue_ctx_t
;
319 static void scan_exec_io(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
320 const zbookmark_phys_t
*zb
, dsl_scan_io_queue_t
*queue
);
321 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t
*queue
,
324 static dsl_scan_io_queue_t
*scan_io_queue_create(vdev_t
*vd
);
325 static void scan_io_queues_destroy(dsl_scan_t
*scn
);
327 static kmem_cache_t
*sio_cache
[SPA_DVAS_PER_BP
];
329 /* sio->sio_nr_dvas must be set so we know which cache to free from */
331 sio_free(scan_io_t
*sio
)
333 ASSERT3U(sio
->sio_nr_dvas
, >, 0);
334 ASSERT3U(sio
->sio_nr_dvas
, <=, SPA_DVAS_PER_BP
);
336 kmem_cache_free(sio_cache
[sio
->sio_nr_dvas
- 1], sio
);
339 /* It is up to the caller to set sio->sio_nr_dvas for freeing */
341 sio_alloc(unsigned short nr_dvas
)
343 ASSERT3U(nr_dvas
, >, 0);
344 ASSERT3U(nr_dvas
, <=, SPA_DVAS_PER_BP
);
346 return (kmem_cache_alloc(sio_cache
[nr_dvas
- 1], KM_SLEEP
));
353 * This is used in ext_size_compare() to weight segments
354 * based on how sparse they are. This cannot be changed
355 * mid-scan and the tree comparison functions don't currently
356 * have a mechanism for passing additional context to the
357 * compare functions. Thus we store this value globally and
358 * we only allow it to be set at module initialization time
360 fill_weight
= zfs_scan_fill_weight
;
362 for (int i
= 0; i
< SPA_DVAS_PER_BP
; i
++) {
365 (void) snprintf(name
, sizeof (name
), "sio_cache_%d", i
);
366 sio_cache
[i
] = kmem_cache_create(name
,
367 (sizeof (scan_io_t
) + ((i
+ 1) * sizeof (dva_t
))),
368 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
375 for (int i
= 0; i
< SPA_DVAS_PER_BP
; i
++) {
376 kmem_cache_destroy(sio_cache
[i
]);
380 static inline boolean_t
381 dsl_scan_is_running(const dsl_scan_t
*scn
)
383 return (scn
->scn_phys
.scn_state
== DSS_SCANNING
);
387 dsl_scan_resilvering(dsl_pool_t
*dp
)
389 return (dsl_scan_is_running(dp
->dp_scan
) &&
390 dp
->dp_scan
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
);
394 sio2bp(const scan_io_t
*sio
, blkptr_t
*bp
)
396 memset(bp
, 0, sizeof (*bp
));
397 bp
->blk_prop
= sio
->sio_blk_prop
;
398 bp
->blk_phys_birth
= sio
->sio_phys_birth
;
399 bp
->blk_birth
= sio
->sio_birth
;
400 bp
->blk_fill
= 1; /* we always only work with data pointers */
401 bp
->blk_cksum
= sio
->sio_cksum
;
403 ASSERT3U(sio
->sio_nr_dvas
, >, 0);
404 ASSERT3U(sio
->sio_nr_dvas
, <=, SPA_DVAS_PER_BP
);
406 memcpy(bp
->blk_dva
, sio
->sio_dva
, sio
->sio_nr_dvas
* sizeof (dva_t
));
410 bp2sio(const blkptr_t
*bp
, scan_io_t
*sio
, int dva_i
)
412 sio
->sio_blk_prop
= bp
->blk_prop
;
413 sio
->sio_phys_birth
= bp
->blk_phys_birth
;
414 sio
->sio_birth
= bp
->blk_birth
;
415 sio
->sio_cksum
= bp
->blk_cksum
;
416 sio
->sio_nr_dvas
= BP_GET_NDVAS(bp
);
419 * Copy the DVAs to the sio. We need all copies of the block so
420 * that the self healing code can use the alternate copies if the
421 * first is corrupted. We want the DVA at index dva_i to be first
422 * in the sio since this is the primary one that we want to issue.
424 for (int i
= 0, j
= dva_i
; i
< sio
->sio_nr_dvas
; i
++, j
++) {
425 sio
->sio_dva
[i
] = bp
->blk_dva
[j
% sio
->sio_nr_dvas
];
430 dsl_scan_init(dsl_pool_t
*dp
, uint64_t txg
)
434 spa_t
*spa
= dp
->dp_spa
;
437 scn
= dp
->dp_scan
= kmem_zalloc(sizeof (dsl_scan_t
), KM_SLEEP
);
441 * It's possible that we're resuming a scan after a reboot so
442 * make sure that the scan_async_destroying flag is initialized
445 ASSERT(!scn
->scn_async_destroying
);
446 scn
->scn_async_destroying
= spa_feature_is_active(dp
->dp_spa
,
447 SPA_FEATURE_ASYNC_DESTROY
);
450 * Calculate the max number of in-flight bytes for pool-wide
451 * scanning operations (minimum 1MB). Limits for the issuing
452 * phase are done per top-level vdev and are handled separately.
454 scn
->scn_maxinflight_bytes
= MAX(zfs_scan_vdev_limit
*
455 dsl_scan_count_data_disks(spa
->spa_root_vdev
), 1ULL << 20);
457 avl_create(&scn
->scn_queue
, scan_ds_queue_compare
, sizeof (scan_ds_t
),
458 offsetof(scan_ds_t
, sds_node
));
459 avl_create(&scn
->scn_prefetch_queue
, scan_prefetch_queue_compare
,
460 sizeof (scan_prefetch_issue_ctx_t
),
461 offsetof(scan_prefetch_issue_ctx_t
, spic_avl_node
));
463 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
464 "scrub_func", sizeof (uint64_t), 1, &f
);
467 * There was an old-style scrub in progress. Restart a
468 * new-style scrub from the beginning.
470 scn
->scn_restart_txg
= txg
;
471 zfs_dbgmsg("old-style scrub was in progress for %s; "
472 "restarting new-style scrub in txg %llu",
474 (longlong_t
)scn
->scn_restart_txg
);
477 * Load the queue obj from the old location so that it
478 * can be freed by dsl_scan_done().
480 (void) zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
481 "scrub_queue", sizeof (uint64_t), 1,
482 &scn
->scn_phys
.scn_queue_obj
);
484 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
485 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
488 * Detect if the pool contains the signature of #2094. If it
489 * does properly update the scn->scn_phys structure and notify
490 * the administrator by setting an errata for the pool.
492 if (err
== EOVERFLOW
) {
493 uint64_t zaptmp
[SCAN_PHYS_NUMINTS
+ 1];
494 VERIFY3S(SCAN_PHYS_NUMINTS
, ==, 24);
495 VERIFY3S(offsetof(dsl_scan_phys_t
, scn_flags
), ==,
496 (23 * sizeof (uint64_t)));
498 err
= zap_lookup(dp
->dp_meta_objset
,
499 DMU_POOL_DIRECTORY_OBJECT
, DMU_POOL_SCAN
,
500 sizeof (uint64_t), SCAN_PHYS_NUMINTS
+ 1, &zaptmp
);
502 uint64_t overflow
= zaptmp
[SCAN_PHYS_NUMINTS
];
504 if (overflow
& ~DSL_SCAN_FLAGS_MASK
||
505 scn
->scn_async_destroying
) {
507 ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY
;
511 memcpy(&scn
->scn_phys
, zaptmp
,
512 SCAN_PHYS_NUMINTS
* sizeof (uint64_t));
513 scn
->scn_phys
.scn_flags
= overflow
;
515 /* Required scrub already in progress. */
516 if (scn
->scn_phys
.scn_state
== DSS_FINISHED
||
517 scn
->scn_phys
.scn_state
== DSS_CANCELED
)
519 ZPOOL_ERRATA_ZOL_2094_SCRUB
;
529 * We might be restarting after a reboot, so jump the issued
530 * counter to how far we've scanned. We know we're consistent
533 scn
->scn_issued_before_pass
= scn
->scn_phys
.scn_examined
;
535 if (dsl_scan_is_running(scn
) &&
536 spa_prev_software_version(dp
->dp_spa
) < SPA_VERSION_SCAN
) {
538 * A new-type scrub was in progress on an old
539 * pool, and the pool was accessed by old
540 * software. Restart from the beginning, since
541 * the old software may have changed the pool in
544 scn
->scn_restart_txg
= txg
;
545 zfs_dbgmsg("new-style scrub for %s was modified "
546 "by old software; restarting in txg %llu",
548 (longlong_t
)scn
->scn_restart_txg
);
549 } else if (dsl_scan_resilvering(dp
)) {
551 * If a resilver is in progress and there are already
552 * errors, restart it instead of finishing this scan and
553 * then restarting it. If there haven't been any errors
554 * then remember that the incore DTL is valid.
556 if (scn
->scn_phys
.scn_errors
> 0) {
557 scn
->scn_restart_txg
= txg
;
558 zfs_dbgmsg("resilver can't excise DTL_MISSING "
559 "when finished; restarting on %s in txg "
562 (u_longlong_t
)scn
->scn_restart_txg
);
564 /* it's safe to excise DTL when finished */
565 spa
->spa_scrub_started
= B_TRUE
;
570 memcpy(&scn
->scn_phys_cached
, &scn
->scn_phys
, sizeof (scn
->scn_phys
));
572 /* reload the queue into the in-core state */
573 if (scn
->scn_phys
.scn_queue_obj
!= 0) {
577 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
578 scn
->scn_phys
.scn_queue_obj
);
579 zap_cursor_retrieve(&zc
, &za
) == 0;
580 (void) zap_cursor_advance(&zc
)) {
581 scan_ds_queue_insert(scn
,
582 zfs_strtonum(za
.za_name
, NULL
),
583 za
.za_first_integer
);
585 zap_cursor_fini(&zc
);
588 spa_scan_stat_init(spa
);
593 dsl_scan_fini(dsl_pool_t
*dp
)
595 if (dp
->dp_scan
!= NULL
) {
596 dsl_scan_t
*scn
= dp
->dp_scan
;
598 if (scn
->scn_taskq
!= NULL
)
599 taskq_destroy(scn
->scn_taskq
);
601 scan_ds_queue_clear(scn
);
602 avl_destroy(&scn
->scn_queue
);
603 scan_ds_prefetch_queue_clear(scn
);
604 avl_destroy(&scn
->scn_prefetch_queue
);
606 kmem_free(dp
->dp_scan
, sizeof (dsl_scan_t
));
612 dsl_scan_restarting(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
614 return (scn
->scn_restart_txg
!= 0 &&
615 scn
->scn_restart_txg
<= tx
->tx_txg
);
619 dsl_scan_resilver_scheduled(dsl_pool_t
*dp
)
621 return ((dp
->dp_scan
&& dp
->dp_scan
->scn_restart_txg
!= 0) ||
622 (spa_async_tasks(dp
->dp_spa
) & SPA_ASYNC_RESILVER
));
626 dsl_scan_scrubbing(const dsl_pool_t
*dp
)
628 dsl_scan_phys_t
*scn_phys
= &dp
->dp_scan
->scn_phys
;
630 return (scn_phys
->scn_state
== DSS_SCANNING
&&
631 scn_phys
->scn_func
== POOL_SCAN_SCRUB
);
635 dsl_scan_is_paused_scrub(const dsl_scan_t
*scn
)
637 return (dsl_scan_scrubbing(scn
->scn_dp
) &&
638 scn
->scn_phys
.scn_flags
& DSF_SCRUB_PAUSED
);
642 * Writes out a persistent dsl_scan_phys_t record to the pool directory.
643 * Because we can be running in the block sorting algorithm, we do not always
644 * want to write out the record, only when it is "safe" to do so. This safety
645 * condition is achieved by making sure that the sorting queues are empty
646 * (scn_bytes_pending == 0). When this condition is not true, the sync'd state
647 * is inconsistent with how much actual scanning progress has been made. The
648 * kind of sync to be performed is specified by the sync_type argument. If the
649 * sync is optional, we only sync if the queues are empty. If the sync is
650 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The
651 * third possible state is a "cached" sync. This is done in response to:
652 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
653 * destroyed, so we wouldn't be able to restart scanning from it.
654 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
655 * superseded by a newer snapshot.
656 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
657 * swapped with its clone.
658 * In all cases, a cached sync simply rewrites the last record we've written,
659 * just slightly modified. For the modifications that are performed to the
660 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
661 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
664 dsl_scan_sync_state(dsl_scan_t
*scn
, dmu_tx_t
*tx
, state_sync_type_t sync_type
)
667 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
669 ASSERT(sync_type
!= SYNC_MANDATORY
|| scn
->scn_bytes_pending
== 0);
670 if (scn
->scn_bytes_pending
== 0) {
671 for (i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
672 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
673 dsl_scan_io_queue_t
*q
= vd
->vdev_scan_io_queue
;
678 mutex_enter(&vd
->vdev_scan_io_queue_lock
);
679 ASSERT3P(avl_first(&q
->q_sios_by_addr
), ==, NULL
);
680 ASSERT3P(zfs_btree_first(&q
->q_exts_by_size
, NULL
), ==,
682 ASSERT3P(range_tree_first(q
->q_exts_by_addr
), ==, NULL
);
683 mutex_exit(&vd
->vdev_scan_io_queue_lock
);
686 if (scn
->scn_phys
.scn_queue_obj
!= 0)
687 scan_ds_queue_sync(scn
, tx
);
688 VERIFY0(zap_update(scn
->scn_dp
->dp_meta_objset
,
689 DMU_POOL_DIRECTORY_OBJECT
,
690 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
691 &scn
->scn_phys
, tx
));
692 memcpy(&scn
->scn_phys_cached
, &scn
->scn_phys
,
693 sizeof (scn
->scn_phys
));
695 if (scn
->scn_checkpointing
)
696 zfs_dbgmsg("finish scan checkpoint for %s",
699 scn
->scn_checkpointing
= B_FALSE
;
700 scn
->scn_last_checkpoint
= ddi_get_lbolt();
701 } else if (sync_type
== SYNC_CACHED
) {
702 VERIFY0(zap_update(scn
->scn_dp
->dp_meta_objset
,
703 DMU_POOL_DIRECTORY_OBJECT
,
704 DMU_POOL_SCAN
, sizeof (uint64_t), SCAN_PHYS_NUMINTS
,
705 &scn
->scn_phys_cached
, tx
));
710 dsl_scan_setup_check(void *arg
, dmu_tx_t
*tx
)
713 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
714 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
716 if (dsl_scan_is_running(scn
) || vdev_rebuild_active(rvd
))
717 return (SET_ERROR(EBUSY
));
723 dsl_scan_setup_sync(void *arg
, dmu_tx_t
*tx
)
725 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
726 pool_scan_func_t
*funcp
= arg
;
727 dmu_object_type_t ot
= 0;
728 dsl_pool_t
*dp
= scn
->scn_dp
;
729 spa_t
*spa
= dp
->dp_spa
;
731 ASSERT(!dsl_scan_is_running(scn
));
732 ASSERT(*funcp
> POOL_SCAN_NONE
&& *funcp
< POOL_SCAN_FUNCS
);
733 memset(&scn
->scn_phys
, 0, sizeof (scn
->scn_phys
));
734 scn
->scn_phys
.scn_func
= *funcp
;
735 scn
->scn_phys
.scn_state
= DSS_SCANNING
;
736 scn
->scn_phys
.scn_min_txg
= 0;
737 scn
->scn_phys
.scn_max_txg
= tx
->tx_txg
;
738 scn
->scn_phys
.scn_ddt_class_max
= DDT_CLASSES
- 1; /* the entire DDT */
739 scn
->scn_phys
.scn_start_time
= gethrestime_sec();
740 scn
->scn_phys
.scn_errors
= 0;
741 scn
->scn_phys
.scn_to_examine
= spa
->spa_root_vdev
->vdev_stat
.vs_alloc
;
742 scn
->scn_issued_before_pass
= 0;
743 scn
->scn_restart_txg
= 0;
744 scn
->scn_done_txg
= 0;
745 scn
->scn_last_checkpoint
= 0;
746 scn
->scn_checkpointing
= B_FALSE
;
747 spa_scan_stat_init(spa
);
749 if (DSL_SCAN_IS_SCRUB_RESILVER(scn
)) {
750 scn
->scn_phys
.scn_ddt_class_max
= zfs_scrub_ddt_class_max
;
752 /* rewrite all disk labels */
753 vdev_config_dirty(spa
->spa_root_vdev
);
755 if (vdev_resilver_needed(spa
->spa_root_vdev
,
756 &scn
->scn_phys
.scn_min_txg
, &scn
->scn_phys
.scn_max_txg
)) {
757 nvlist_t
*aux
= fnvlist_alloc();
758 fnvlist_add_string(aux
, ZFS_EV_RESILVER_TYPE
,
760 spa_event_notify(spa
, NULL
, aux
,
761 ESC_ZFS_RESILVER_START
);
764 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_START
);
767 spa
->spa_scrub_started
= B_TRUE
;
769 * If this is an incremental scrub, limit the DDT scrub phase
770 * to just the auto-ditto class (for correctness); the rest
771 * of the scrub should go faster using top-down pruning.
773 if (scn
->scn_phys
.scn_min_txg
> TXG_INITIAL
)
774 scn
->scn_phys
.scn_ddt_class_max
= DDT_CLASS_DITTO
;
777 * When starting a resilver clear any existing rebuild state.
778 * This is required to prevent stale rebuild status from
779 * being reported when a rebuild is run, then a resilver and
780 * finally a scrub. In which case only the scrub status
781 * should be reported by 'zpool status'.
783 if (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) {
784 vdev_t
*rvd
= spa
->spa_root_vdev
;
785 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
786 vdev_t
*vd
= rvd
->vdev_child
[i
];
787 vdev_rebuild_clear_sync(
788 (void *)(uintptr_t)vd
->vdev_id
, tx
);
793 /* back to the generic stuff */
795 if (dp
->dp_blkstats
== NULL
) {
797 vmem_alloc(sizeof (zfs_all_blkstats_t
), KM_SLEEP
);
798 mutex_init(&dp
->dp_blkstats
->zab_lock
, NULL
,
799 MUTEX_DEFAULT
, NULL
);
801 memset(&dp
->dp_blkstats
->zab_type
, 0,
802 sizeof (dp
->dp_blkstats
->zab_type
));
804 if (spa_version(spa
) < SPA_VERSION_DSL_SCRUB
)
805 ot
= DMU_OT_ZAP_OTHER
;
807 scn
->scn_phys
.scn_queue_obj
= zap_create(dp
->dp_meta_objset
,
808 ot
? ot
: DMU_OT_SCAN_QUEUE
, DMU_OT_NONE
, 0, tx
);
810 memcpy(&scn
->scn_phys_cached
, &scn
->scn_phys
, sizeof (scn
->scn_phys
));
812 dsl_scan_sync_state(scn
, tx
, SYNC_MANDATORY
);
814 spa_history_log_internal(spa
, "scan setup", tx
,
815 "func=%u mintxg=%llu maxtxg=%llu",
816 *funcp
, (u_longlong_t
)scn
->scn_phys
.scn_min_txg
,
817 (u_longlong_t
)scn
->scn_phys
.scn_max_txg
);
821 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
822 * Can also be called to resume a paused scrub.
825 dsl_scan(dsl_pool_t
*dp
, pool_scan_func_t func
)
827 spa_t
*spa
= dp
->dp_spa
;
828 dsl_scan_t
*scn
= dp
->dp_scan
;
831 * Purge all vdev caches and probe all devices. We do this here
832 * rather than in sync context because this requires a writer lock
833 * on the spa_config lock, which we can't do from sync context. The
834 * spa_scrub_reopen flag indicates that vdev_open() should not
835 * attempt to start another scrub.
837 spa_vdev_state_enter(spa
, SCL_NONE
);
838 spa
->spa_scrub_reopen
= B_TRUE
;
839 vdev_reopen(spa
->spa_root_vdev
);
840 spa
->spa_scrub_reopen
= B_FALSE
;
841 (void) spa_vdev_state_exit(spa
, NULL
, 0);
843 if (func
== POOL_SCAN_RESILVER
) {
844 dsl_scan_restart_resilver(spa
->spa_dsl_pool
, 0);
848 if (func
== POOL_SCAN_SCRUB
&& dsl_scan_is_paused_scrub(scn
)) {
849 /* got scrub start cmd, resume paused scrub */
850 int err
= dsl_scrub_set_pause_resume(scn
->scn_dp
,
853 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_RESUME
);
854 return (SET_ERROR(ECANCELED
));
857 return (SET_ERROR(err
));
860 return (dsl_sync_task(spa_name(spa
), dsl_scan_setup_check
,
861 dsl_scan_setup_sync
, &func
, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED
));
865 dsl_scan_done(dsl_scan_t
*scn
, boolean_t complete
, dmu_tx_t
*tx
)
867 static const char *old_names
[] = {
869 "scrub_ddt_bookmark",
870 "scrub_ddt_class_max",
879 dsl_pool_t
*dp
= scn
->scn_dp
;
880 spa_t
*spa
= dp
->dp_spa
;
883 /* Remove any remnants of an old-style scrub. */
884 for (i
= 0; old_names
[i
]; i
++) {
885 (void) zap_remove(dp
->dp_meta_objset
,
886 DMU_POOL_DIRECTORY_OBJECT
, old_names
[i
], tx
);
889 if (scn
->scn_phys
.scn_queue_obj
!= 0) {
890 VERIFY0(dmu_object_free(dp
->dp_meta_objset
,
891 scn
->scn_phys
.scn_queue_obj
, tx
));
892 scn
->scn_phys
.scn_queue_obj
= 0;
894 scan_ds_queue_clear(scn
);
895 scan_ds_prefetch_queue_clear(scn
);
897 scn
->scn_phys
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
900 * If we were "restarted" from a stopped state, don't bother
901 * with anything else.
903 if (!dsl_scan_is_running(scn
)) {
904 ASSERT(!scn
->scn_is_sorted
);
908 if (scn
->scn_is_sorted
) {
909 scan_io_queues_destroy(scn
);
910 scn
->scn_is_sorted
= B_FALSE
;
912 if (scn
->scn_taskq
!= NULL
) {
913 taskq_destroy(scn
->scn_taskq
);
914 scn
->scn_taskq
= NULL
;
918 scn
->scn_phys
.scn_state
= complete
? DSS_FINISHED
: DSS_CANCELED
;
920 spa_notify_waiters(spa
);
922 if (dsl_scan_restarting(scn
, tx
))
923 spa_history_log_internal(spa
, "scan aborted, restarting", tx
,
924 "errors=%llu", (u_longlong_t
)spa_get_errlog_size(spa
));
926 spa_history_log_internal(spa
, "scan cancelled", tx
,
927 "errors=%llu", (u_longlong_t
)spa_get_errlog_size(spa
));
929 spa_history_log_internal(spa
, "scan done", tx
,
930 "errors=%llu", (u_longlong_t
)spa_get_errlog_size(spa
));
932 if (DSL_SCAN_IS_SCRUB_RESILVER(scn
)) {
933 spa
->spa_scrub_active
= B_FALSE
;
936 * If the scrub/resilver completed, update all DTLs to
937 * reflect this. Whether it succeeded or not, vacate
938 * all temporary scrub DTLs.
940 * As the scrub does not currently support traversing
941 * data that have been freed but are part of a checkpoint,
942 * we don't mark the scrub as done in the DTLs as faults
943 * may still exist in those vdevs.
946 !spa_feature_is_active(spa
, SPA_FEATURE_POOL_CHECKPOINT
)) {
947 vdev_dtl_reassess(spa
->spa_root_vdev
, tx
->tx_txg
,
948 scn
->scn_phys
.scn_max_txg
, B_TRUE
, B_FALSE
);
950 if (scn
->scn_phys
.scn_min_txg
) {
951 nvlist_t
*aux
= fnvlist_alloc();
952 fnvlist_add_string(aux
, ZFS_EV_RESILVER_TYPE
,
954 spa_event_notify(spa
, NULL
, aux
,
955 ESC_ZFS_RESILVER_FINISH
);
958 spa_event_notify(spa
, NULL
, NULL
,
959 ESC_ZFS_SCRUB_FINISH
);
962 vdev_dtl_reassess(spa
->spa_root_vdev
, tx
->tx_txg
,
965 spa_errlog_rotate(spa
);
968 * Don't clear flag until after vdev_dtl_reassess to ensure that
969 * DTL_MISSING will get updated when possible.
971 spa
->spa_scrub_started
= B_FALSE
;
974 * We may have finished replacing a device.
975 * Let the async thread assess this and handle the detach.
977 spa_async_request(spa
, SPA_ASYNC_RESILVER_DONE
);
980 * Clear any resilver_deferred flags in the config.
981 * If there are drives that need resilvering, kick
982 * off an asynchronous request to start resilver.
983 * vdev_clear_resilver_deferred() may update the config
984 * before the resilver can restart. In the event of
985 * a crash during this period, the spa loading code
986 * will find the drives that need to be resilvered
987 * and start the resilver then.
989 if (spa_feature_is_enabled(spa
, SPA_FEATURE_RESILVER_DEFER
) &&
990 vdev_clear_resilver_deferred(spa
->spa_root_vdev
, tx
)) {
991 spa_history_log_internal(spa
,
992 "starting deferred resilver", tx
, "errors=%llu",
993 (u_longlong_t
)spa_get_errlog_size(spa
));
994 spa_async_request(spa
, SPA_ASYNC_RESILVER
);
997 /* Clear recent error events (i.e. duplicate events tracking) */
999 zfs_ereport_clear(spa
, NULL
);
1002 scn
->scn_phys
.scn_end_time
= gethrestime_sec();
1004 if (spa
->spa_errata
== ZPOOL_ERRATA_ZOL_2094_SCRUB
)
1005 spa
->spa_errata
= 0;
1007 ASSERT(!dsl_scan_is_running(scn
));
1011 dsl_scan_cancel_check(void *arg
, dmu_tx_t
*tx
)
1014 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
1016 if (!dsl_scan_is_running(scn
))
1017 return (SET_ERROR(ENOENT
));
1022 dsl_scan_cancel_sync(void *arg
, dmu_tx_t
*tx
)
1025 dsl_scan_t
*scn
= dmu_tx_pool(tx
)->dp_scan
;
1027 dsl_scan_done(scn
, B_FALSE
, tx
);
1028 dsl_scan_sync_state(scn
, tx
, SYNC_MANDATORY
);
1029 spa_event_notify(scn
->scn_dp
->dp_spa
, NULL
, NULL
, ESC_ZFS_SCRUB_ABORT
);
1033 dsl_scan_cancel(dsl_pool_t
*dp
)
1035 return (dsl_sync_task(spa_name(dp
->dp_spa
), dsl_scan_cancel_check
,
1036 dsl_scan_cancel_sync
, NULL
, 3, ZFS_SPACE_CHECK_RESERVED
));
1040 dsl_scrub_pause_resume_check(void *arg
, dmu_tx_t
*tx
)
1042 pool_scrub_cmd_t
*cmd
= arg
;
1043 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1044 dsl_scan_t
*scn
= dp
->dp_scan
;
1046 if (*cmd
== POOL_SCRUB_PAUSE
) {
1047 /* can't pause a scrub when there is no in-progress scrub */
1048 if (!dsl_scan_scrubbing(dp
))
1049 return (SET_ERROR(ENOENT
));
1051 /* can't pause a paused scrub */
1052 if (dsl_scan_is_paused_scrub(scn
))
1053 return (SET_ERROR(EBUSY
));
1054 } else if (*cmd
!= POOL_SCRUB_NORMAL
) {
1055 return (SET_ERROR(ENOTSUP
));
1062 dsl_scrub_pause_resume_sync(void *arg
, dmu_tx_t
*tx
)
1064 pool_scrub_cmd_t
*cmd
= arg
;
1065 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1066 spa_t
*spa
= dp
->dp_spa
;
1067 dsl_scan_t
*scn
= dp
->dp_scan
;
1069 if (*cmd
== POOL_SCRUB_PAUSE
) {
1070 /* can't pause a scrub when there is no in-progress scrub */
1071 spa
->spa_scan_pass_scrub_pause
= gethrestime_sec();
1072 scn
->scn_phys
.scn_flags
|= DSF_SCRUB_PAUSED
;
1073 scn
->scn_phys_cached
.scn_flags
|= DSF_SCRUB_PAUSED
;
1074 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
1075 spa_event_notify(spa
, NULL
, NULL
, ESC_ZFS_SCRUB_PAUSED
);
1076 spa_notify_waiters(spa
);
1078 ASSERT3U(*cmd
, ==, POOL_SCRUB_NORMAL
);
1079 if (dsl_scan_is_paused_scrub(scn
)) {
1081 * We need to keep track of how much time we spend
1082 * paused per pass so that we can adjust the scrub rate
1083 * shown in the output of 'zpool status'
1085 spa
->spa_scan_pass_scrub_spent_paused
+=
1086 gethrestime_sec() - spa
->spa_scan_pass_scrub_pause
;
1087 spa
->spa_scan_pass_scrub_pause
= 0;
1088 scn
->scn_phys
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
1089 scn
->scn_phys_cached
.scn_flags
&= ~DSF_SCRUB_PAUSED
;
1090 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
1096 * Set scrub pause/resume state if it makes sense to do so
1099 dsl_scrub_set_pause_resume(const dsl_pool_t
*dp
, pool_scrub_cmd_t cmd
)
1101 return (dsl_sync_task(spa_name(dp
->dp_spa
),
1102 dsl_scrub_pause_resume_check
, dsl_scrub_pause_resume_sync
, &cmd
, 3,
1103 ZFS_SPACE_CHECK_RESERVED
));
1107 /* start a new scan, or restart an existing one. */
1109 dsl_scan_restart_resilver(dsl_pool_t
*dp
, uint64_t txg
)
1113 tx
= dmu_tx_create_dd(dp
->dp_mos_dir
);
1114 VERIFY(0 == dmu_tx_assign(tx
, TXG_WAIT
));
1116 txg
= dmu_tx_get_txg(tx
);
1117 dp
->dp_scan
->scn_restart_txg
= txg
;
1120 dp
->dp_scan
->scn_restart_txg
= txg
;
1122 zfs_dbgmsg("restarting resilver for %s at txg=%llu",
1123 dp
->dp_spa
->spa_name
, (longlong_t
)txg
);
1127 dsl_free(dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bp
)
1129 zio_free(dp
->dp_spa
, txg
, bp
);
1133 dsl_free_sync(zio_t
*pio
, dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bpp
)
1135 ASSERT(dsl_pool_sync_context(dp
));
1136 zio_nowait(zio_free_sync(pio
, dp
->dp_spa
, txg
, bpp
, pio
->io_flags
));
1140 scan_ds_queue_compare(const void *a
, const void *b
)
1142 const scan_ds_t
*sds_a
= a
, *sds_b
= b
;
1144 if (sds_a
->sds_dsobj
< sds_b
->sds_dsobj
)
1146 if (sds_a
->sds_dsobj
== sds_b
->sds_dsobj
)
1152 scan_ds_queue_clear(dsl_scan_t
*scn
)
1154 void *cookie
= NULL
;
1156 while ((sds
= avl_destroy_nodes(&scn
->scn_queue
, &cookie
)) != NULL
) {
1157 kmem_free(sds
, sizeof (*sds
));
1162 scan_ds_queue_contains(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t *txg
)
1164 scan_ds_t srch
, *sds
;
1166 srch
.sds_dsobj
= dsobj
;
1167 sds
= avl_find(&scn
->scn_queue
, &srch
, NULL
);
1168 if (sds
!= NULL
&& txg
!= NULL
)
1169 *txg
= sds
->sds_txg
;
1170 return (sds
!= NULL
);
1174 scan_ds_queue_insert(dsl_scan_t
*scn
, uint64_t dsobj
, uint64_t txg
)
1179 sds
= kmem_zalloc(sizeof (*sds
), KM_SLEEP
);
1180 sds
->sds_dsobj
= dsobj
;
1183 VERIFY3P(avl_find(&scn
->scn_queue
, sds
, &where
), ==, NULL
);
1184 avl_insert(&scn
->scn_queue
, sds
, where
);
1188 scan_ds_queue_remove(dsl_scan_t
*scn
, uint64_t dsobj
)
1190 scan_ds_t srch
, *sds
;
1192 srch
.sds_dsobj
= dsobj
;
1194 sds
= avl_find(&scn
->scn_queue
, &srch
, NULL
);
1195 VERIFY(sds
!= NULL
);
1196 avl_remove(&scn
->scn_queue
, sds
);
1197 kmem_free(sds
, sizeof (*sds
));
1201 scan_ds_queue_sync(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
1203 dsl_pool_t
*dp
= scn
->scn_dp
;
1204 spa_t
*spa
= dp
->dp_spa
;
1205 dmu_object_type_t ot
= (spa_version(spa
) >= SPA_VERSION_DSL_SCRUB
) ?
1206 DMU_OT_SCAN_QUEUE
: DMU_OT_ZAP_OTHER
;
1208 ASSERT0(scn
->scn_bytes_pending
);
1209 ASSERT(scn
->scn_phys
.scn_queue_obj
!= 0);
1211 VERIFY0(dmu_object_free(dp
->dp_meta_objset
,
1212 scn
->scn_phys
.scn_queue_obj
, tx
));
1213 scn
->scn_phys
.scn_queue_obj
= zap_create(dp
->dp_meta_objset
, ot
,
1214 DMU_OT_NONE
, 0, tx
);
1215 for (scan_ds_t
*sds
= avl_first(&scn
->scn_queue
);
1216 sds
!= NULL
; sds
= AVL_NEXT(&scn
->scn_queue
, sds
)) {
1217 VERIFY0(zap_add_int_key(dp
->dp_meta_objset
,
1218 scn
->scn_phys
.scn_queue_obj
, sds
->sds_dsobj
,
1224 * Computes the memory limit state that we're currently in. A sorted scan
1225 * needs quite a bit of memory to hold the sorting queue, so we need to
1226 * reasonably constrain the size so it doesn't impact overall system
1227 * performance. We compute two limits:
1228 * 1) Hard memory limit: if the amount of memory used by the sorting
1229 * queues on a pool gets above this value, we stop the metadata
1230 * scanning portion and start issuing the queued up and sorted
1231 * I/Os to reduce memory usage.
1232 * This limit is calculated as a fraction of physmem (by default 5%).
1233 * We constrain the lower bound of the hard limit to an absolute
1234 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
1235 * the upper bound to 5% of the total pool size - no chance we'll
1236 * ever need that much memory, but just to keep the value in check.
1237 * 2) Soft memory limit: once we hit the hard memory limit, we start
1238 * issuing I/O to reduce queue memory usage, but we don't want to
1239 * completely empty out the queues, since we might be able to find I/Os
1240 * that will fill in the gaps of our non-sequential IOs at some point
1241 * in the future. So we stop the issuing of I/Os once the amount of
1242 * memory used drops below the soft limit (at which point we stop issuing
1243 * I/O and start scanning metadata again).
1245 * This limit is calculated by subtracting a fraction of the hard
1246 * limit from the hard limit. By default this fraction is 5%, so
1247 * the soft limit is 95% of the hard limit. We cap the size of the
1248 * difference between the hard and soft limits at an absolute
1249 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
1250 * sufficient to not cause too frequent switching between the
1251 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
1252 * worth of queues is about 1.2 GiB of on-pool data, so scanning
1253 * that should take at least a decent fraction of a second).
1256 dsl_scan_should_clear(dsl_scan_t
*scn
)
1258 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1259 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
1260 uint64_t alloc
, mlim_hard
, mlim_soft
, mused
;
1262 alloc
= metaslab_class_get_alloc(spa_normal_class(spa
));
1263 alloc
+= metaslab_class_get_alloc(spa_special_class(spa
));
1264 alloc
+= metaslab_class_get_alloc(spa_dedup_class(spa
));
1266 mlim_hard
= MAX((physmem
/ zfs_scan_mem_lim_fact
) * PAGESIZE
,
1267 zfs_scan_mem_lim_min
);
1268 mlim_hard
= MIN(mlim_hard
, alloc
/ 20);
1269 mlim_soft
= mlim_hard
- MIN(mlim_hard
/ zfs_scan_mem_lim_soft_fact
,
1270 zfs_scan_mem_lim_soft_max
);
1272 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
1273 vdev_t
*tvd
= rvd
->vdev_child
[i
];
1274 dsl_scan_io_queue_t
*queue
;
1276 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
1277 queue
= tvd
->vdev_scan_io_queue
;
1278 if (queue
!= NULL
) {
1279 /* # extents in exts_by_size = # in exts_by_addr */
1280 mused
+= zfs_btree_numnodes(&queue
->q_exts_by_size
) *
1281 sizeof (range_seg_gap_t
) + queue
->q_sio_memused
;
1283 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
1286 dprintf("current scan memory usage: %llu bytes\n", (longlong_t
)mused
);
1289 ASSERT0(scn
->scn_bytes_pending
);
1292 * If we are above our hard limit, we need to clear out memory.
1293 * If we are below our soft limit, we need to accumulate sequential IOs.
1294 * Otherwise, we should keep doing whatever we are currently doing.
1296 if (mused
>= mlim_hard
)
1298 else if (mused
< mlim_soft
)
1301 return (scn
->scn_clearing
);
1305 dsl_scan_check_suspend(dsl_scan_t
*scn
, const zbookmark_phys_t
*zb
)
1307 /* we never skip user/group accounting objects */
1308 if (zb
&& (int64_t)zb
->zb_object
< 0)
1311 if (scn
->scn_suspending
)
1312 return (B_TRUE
); /* we're already suspending */
1314 if (!ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
))
1315 return (B_FALSE
); /* we're resuming */
1317 /* We only know how to resume from level-0 and objset blocks. */
1318 if (zb
&& (zb
->zb_level
!= 0 && zb
->zb_level
!= ZB_ROOT_LEVEL
))
1323 * - we have scanned for at least the minimum time (default 1 sec
1324 * for scrub, 3 sec for resilver), and either we have sufficient
1325 * dirty data that we are starting to write more quickly
1326 * (default 30%), someone is explicitly waiting for this txg
1327 * to complete, or we have used up all of the time in the txg
1328 * timeout (default 5 sec).
1330 * - the spa is shutting down because this pool is being exported
1331 * or the machine is rebooting.
1333 * - the scan queue has reached its memory use limit
1335 uint64_t curr_time_ns
= gethrtime();
1336 uint64_t scan_time_ns
= curr_time_ns
- scn
->scn_sync_start_time
;
1337 uint64_t sync_time_ns
= curr_time_ns
-
1338 scn
->scn_dp
->dp_spa
->spa_sync_starttime
;
1339 int dirty_pct
= scn
->scn_dp
->dp_dirty_total
* 100 / zfs_dirty_data_max
;
1340 int mintime
= (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) ?
1341 zfs_resilver_min_time_ms
: zfs_scrub_min_time_ms
;
1343 if ((NSEC2MSEC(scan_time_ns
) > mintime
&&
1344 (dirty_pct
>= zfs_vdev_async_write_active_min_dirty_percent
||
1345 txg_sync_waiting(scn
->scn_dp
) ||
1346 NSEC2SEC(sync_time_ns
) >= zfs_txg_timeout
)) ||
1347 spa_shutting_down(scn
->scn_dp
->dp_spa
) ||
1348 (zfs_scan_strict_mem_lim
&& dsl_scan_should_clear(scn
))) {
1349 if (zb
&& zb
->zb_level
== ZB_ROOT_LEVEL
) {
1350 dprintf("suspending at first available bookmark "
1351 "%llx/%llx/%llx/%llx\n",
1352 (longlong_t
)zb
->zb_objset
,
1353 (longlong_t
)zb
->zb_object
,
1354 (longlong_t
)zb
->zb_level
,
1355 (longlong_t
)zb
->zb_blkid
);
1356 SET_BOOKMARK(&scn
->scn_phys
.scn_bookmark
,
1357 zb
->zb_objset
, 0, 0, 0);
1358 } else if (zb
!= NULL
) {
1359 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
1360 (longlong_t
)zb
->zb_objset
,
1361 (longlong_t
)zb
->zb_object
,
1362 (longlong_t
)zb
->zb_level
,
1363 (longlong_t
)zb
->zb_blkid
);
1364 scn
->scn_phys
.scn_bookmark
= *zb
;
1367 dsl_scan_phys_t
*scnp
= &scn
->scn_phys
;
1368 dprintf("suspending at at DDT bookmark "
1369 "%llx/%llx/%llx/%llx\n",
1370 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_class
,
1371 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_type
,
1372 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_checksum
,
1373 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_cursor
);
1376 scn
->scn_suspending
= B_TRUE
;
1382 typedef struct zil_scan_arg
{
1384 zil_header_t
*zsa_zh
;
1388 dsl_scan_zil_block(zilog_t
*zilog
, const blkptr_t
*bp
, void *arg
,
1392 zil_scan_arg_t
*zsa
= arg
;
1393 dsl_pool_t
*dp
= zsa
->zsa_dp
;
1394 dsl_scan_t
*scn
= dp
->dp_scan
;
1395 zil_header_t
*zh
= zsa
->zsa_zh
;
1396 zbookmark_phys_t zb
;
1398 ASSERT(!BP_IS_REDACTED(bp
));
1399 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
)
1403 * One block ("stubby") can be allocated a long time ago; we
1404 * want to visit that one because it has been allocated
1405 * (on-disk) even if it hasn't been claimed (even though for
1406 * scrub there's nothing to do to it).
1408 if (claim_txg
== 0 && bp
->blk_birth
>= spa_min_claim_txg(dp
->dp_spa
))
1411 SET_BOOKMARK(&zb
, zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
1412 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
, bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
1414 VERIFY(0 == scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, &zb
));
1419 dsl_scan_zil_record(zilog_t
*zilog
, const lr_t
*lrc
, void *arg
,
1423 if (lrc
->lrc_txtype
== TX_WRITE
) {
1424 zil_scan_arg_t
*zsa
= arg
;
1425 dsl_pool_t
*dp
= zsa
->zsa_dp
;
1426 dsl_scan_t
*scn
= dp
->dp_scan
;
1427 zil_header_t
*zh
= zsa
->zsa_zh
;
1428 const lr_write_t
*lr
= (const lr_write_t
*)lrc
;
1429 const blkptr_t
*bp
= &lr
->lr_blkptr
;
1430 zbookmark_phys_t zb
;
1432 ASSERT(!BP_IS_REDACTED(bp
));
1433 if (BP_IS_HOLE(bp
) ||
1434 bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
)
1438 * birth can be < claim_txg if this record's txg is
1439 * already txg sync'ed (but this log block contains
1440 * other records that are not synced)
1442 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
1445 SET_BOOKMARK(&zb
, zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
1446 lr
->lr_foid
, ZB_ZIL_LEVEL
,
1447 lr
->lr_offset
/ BP_GET_LSIZE(bp
));
1449 VERIFY(0 == scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, &zb
));
1455 dsl_scan_zil(dsl_pool_t
*dp
, zil_header_t
*zh
)
1457 uint64_t claim_txg
= zh
->zh_claim_txg
;
1458 zil_scan_arg_t zsa
= { dp
, zh
};
1461 ASSERT(spa_writeable(dp
->dp_spa
));
1464 * We only want to visit blocks that have been claimed but not yet
1465 * replayed (or, in read-only mode, blocks that *would* be claimed).
1470 zilog
= zil_alloc(dp
->dp_meta_objset
, zh
);
1472 (void) zil_parse(zilog
, dsl_scan_zil_block
, dsl_scan_zil_record
, &zsa
,
1473 claim_txg
, B_FALSE
);
1479 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
1480 * here is to sort the AVL tree by the order each block will be needed.
1483 scan_prefetch_queue_compare(const void *a
, const void *b
)
1485 const scan_prefetch_issue_ctx_t
*spic_a
= a
, *spic_b
= b
;
1486 const scan_prefetch_ctx_t
*spc_a
= spic_a
->spic_spc
;
1487 const scan_prefetch_ctx_t
*spc_b
= spic_b
->spic_spc
;
1489 return (zbookmark_compare(spc_a
->spc_datablkszsec
,
1490 spc_a
->spc_indblkshift
, spc_b
->spc_datablkszsec
,
1491 spc_b
->spc_indblkshift
, &spic_a
->spic_zb
, &spic_b
->spic_zb
));
1495 scan_prefetch_ctx_rele(scan_prefetch_ctx_t
*spc
, void *tag
)
1497 if (zfs_refcount_remove(&spc
->spc_refcnt
, tag
) == 0) {
1498 zfs_refcount_destroy(&spc
->spc_refcnt
);
1499 kmem_free(spc
, sizeof (scan_prefetch_ctx_t
));
1503 static scan_prefetch_ctx_t
*
1504 scan_prefetch_ctx_create(dsl_scan_t
*scn
, dnode_phys_t
*dnp
, void *tag
)
1506 scan_prefetch_ctx_t
*spc
;
1508 spc
= kmem_alloc(sizeof (scan_prefetch_ctx_t
), KM_SLEEP
);
1509 zfs_refcount_create(&spc
->spc_refcnt
);
1510 zfs_refcount_add(&spc
->spc_refcnt
, tag
);
1513 spc
->spc_datablkszsec
= dnp
->dn_datablkszsec
;
1514 spc
->spc_indblkshift
= dnp
->dn_indblkshift
;
1515 spc
->spc_root
= B_FALSE
;
1517 spc
->spc_datablkszsec
= 0;
1518 spc
->spc_indblkshift
= 0;
1519 spc
->spc_root
= B_TRUE
;
1526 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t
*spc
, void *tag
)
1528 zfs_refcount_add(&spc
->spc_refcnt
, tag
);
1532 scan_ds_prefetch_queue_clear(dsl_scan_t
*scn
)
1534 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1535 void *cookie
= NULL
;
1536 scan_prefetch_issue_ctx_t
*spic
= NULL
;
1538 mutex_enter(&spa
->spa_scrub_lock
);
1539 while ((spic
= avl_destroy_nodes(&scn
->scn_prefetch_queue
,
1540 &cookie
)) != NULL
) {
1541 scan_prefetch_ctx_rele(spic
->spic_spc
, scn
);
1542 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1544 mutex_exit(&spa
->spa_scrub_lock
);
1548 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t
*spc
,
1549 const zbookmark_phys_t
*zb
)
1551 zbookmark_phys_t
*last_zb
= &spc
->spc_scn
->scn_prefetch_bookmark
;
1552 dnode_phys_t tmp_dnp
;
1553 dnode_phys_t
*dnp
= (spc
->spc_root
) ? NULL
: &tmp_dnp
;
1555 if (zb
->zb_objset
!= last_zb
->zb_objset
)
1557 if ((int64_t)zb
->zb_object
< 0)
1560 tmp_dnp
.dn_datablkszsec
= spc
->spc_datablkszsec
;
1561 tmp_dnp
.dn_indblkshift
= spc
->spc_indblkshift
;
1563 if (zbookmark_subtree_completed(dnp
, zb
, last_zb
))
1570 dsl_scan_prefetch(scan_prefetch_ctx_t
*spc
, blkptr_t
*bp
, zbookmark_phys_t
*zb
)
1573 dsl_scan_t
*scn
= spc
->spc_scn
;
1574 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1575 scan_prefetch_issue_ctx_t
*spic
;
1577 if (zfs_no_scrub_prefetch
|| BP_IS_REDACTED(bp
))
1580 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
||
1581 (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_DNODE
&&
1582 BP_GET_TYPE(bp
) != DMU_OT_OBJSET
))
1585 if (dsl_scan_check_prefetch_resume(spc
, zb
))
1588 scan_prefetch_ctx_add_ref(spc
, scn
);
1589 spic
= kmem_alloc(sizeof (scan_prefetch_issue_ctx_t
), KM_SLEEP
);
1590 spic
->spic_spc
= spc
;
1591 spic
->spic_bp
= *bp
;
1592 spic
->spic_zb
= *zb
;
1595 * Add the IO to the queue of blocks to prefetch. This allows us to
1596 * prioritize blocks that we will need first for the main traversal
1599 mutex_enter(&spa
->spa_scrub_lock
);
1600 if (avl_find(&scn
->scn_prefetch_queue
, spic
, &idx
) != NULL
) {
1601 /* this block is already queued for prefetch */
1602 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1603 scan_prefetch_ctx_rele(spc
, scn
);
1604 mutex_exit(&spa
->spa_scrub_lock
);
1608 avl_insert(&scn
->scn_prefetch_queue
, spic
, idx
);
1609 cv_broadcast(&spa
->spa_scrub_io_cv
);
1610 mutex_exit(&spa
->spa_scrub_lock
);
1614 dsl_scan_prefetch_dnode(dsl_scan_t
*scn
, dnode_phys_t
*dnp
,
1615 uint64_t objset
, uint64_t object
)
1618 zbookmark_phys_t zb
;
1619 scan_prefetch_ctx_t
*spc
;
1621 if (dnp
->dn_nblkptr
== 0 && !(dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
))
1624 SET_BOOKMARK(&zb
, objset
, object
, 0, 0);
1626 spc
= scan_prefetch_ctx_create(scn
, dnp
, FTAG
);
1628 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
1629 zb
.zb_level
= BP_GET_LEVEL(&dnp
->dn_blkptr
[i
]);
1631 dsl_scan_prefetch(spc
, &dnp
->dn_blkptr
[i
], &zb
);
1634 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1636 zb
.zb_blkid
= DMU_SPILL_BLKID
;
1637 dsl_scan_prefetch(spc
, DN_SPILL_BLKPTR(dnp
), &zb
);
1640 scan_prefetch_ctx_rele(spc
, FTAG
);
1644 dsl_scan_prefetch_cb(zio_t
*zio
, const zbookmark_phys_t
*zb
, const blkptr_t
*bp
,
1645 arc_buf_t
*buf
, void *private)
1648 scan_prefetch_ctx_t
*spc
= private;
1649 dsl_scan_t
*scn
= spc
->spc_scn
;
1650 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1652 /* broadcast that the IO has completed for rate limiting purposes */
1653 mutex_enter(&spa
->spa_scrub_lock
);
1654 ASSERT3U(spa
->spa_scrub_inflight
, >=, BP_GET_PSIZE(bp
));
1655 spa
->spa_scrub_inflight
-= BP_GET_PSIZE(bp
);
1656 cv_broadcast(&spa
->spa_scrub_io_cv
);
1657 mutex_exit(&spa
->spa_scrub_lock
);
1659 /* if there was an error or we are done prefetching, just cleanup */
1660 if (buf
== NULL
|| scn
->scn_prefetch_stop
)
1663 if (BP_GET_LEVEL(bp
) > 0) {
1666 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
1667 zbookmark_phys_t czb
;
1669 for (i
= 0, cbp
= buf
->b_data
; i
< epb
; i
++, cbp
++) {
1670 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
1671 zb
->zb_level
- 1, zb
->zb_blkid
* epb
+ i
);
1672 dsl_scan_prefetch(spc
, cbp
, &czb
);
1674 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
1677 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
1679 for (i
= 0, cdnp
= buf
->b_data
; i
< epb
;
1680 i
+= cdnp
->dn_extra_slots
+ 1,
1681 cdnp
+= cdnp
->dn_extra_slots
+ 1) {
1682 dsl_scan_prefetch_dnode(scn
, cdnp
,
1683 zb
->zb_objset
, zb
->zb_blkid
* epb
+ i
);
1685 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
1686 objset_phys_t
*osp
= buf
->b_data
;
1688 dsl_scan_prefetch_dnode(scn
, &osp
->os_meta_dnode
,
1689 zb
->zb_objset
, DMU_META_DNODE_OBJECT
);
1691 if (OBJSET_BUF_HAS_USERUSED(buf
)) {
1692 dsl_scan_prefetch_dnode(scn
,
1693 &osp
->os_groupused_dnode
, zb
->zb_objset
,
1694 DMU_GROUPUSED_OBJECT
);
1695 dsl_scan_prefetch_dnode(scn
,
1696 &osp
->os_userused_dnode
, zb
->zb_objset
,
1697 DMU_USERUSED_OBJECT
);
1703 arc_buf_destroy(buf
, private);
1704 scan_prefetch_ctx_rele(spc
, scn
);
1708 dsl_scan_prefetch_thread(void *arg
)
1710 dsl_scan_t
*scn
= arg
;
1711 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
1712 scan_prefetch_issue_ctx_t
*spic
;
1714 /* loop until we are told to stop */
1715 while (!scn
->scn_prefetch_stop
) {
1716 arc_flags_t flags
= ARC_FLAG_NOWAIT
|
1717 ARC_FLAG_PRESCIENT_PREFETCH
| ARC_FLAG_PREFETCH
;
1718 int zio_flags
= ZIO_FLAG_CANFAIL
| ZIO_FLAG_SCAN_THREAD
;
1720 mutex_enter(&spa
->spa_scrub_lock
);
1723 * Wait until we have an IO to issue and are not above our
1724 * maximum in flight limit.
1726 while (!scn
->scn_prefetch_stop
&&
1727 (avl_numnodes(&scn
->scn_prefetch_queue
) == 0 ||
1728 spa
->spa_scrub_inflight
>= scn
->scn_maxinflight_bytes
)) {
1729 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
1732 /* recheck if we should stop since we waited for the cv */
1733 if (scn
->scn_prefetch_stop
) {
1734 mutex_exit(&spa
->spa_scrub_lock
);
1738 /* remove the prefetch IO from the tree */
1739 spic
= avl_first(&scn
->scn_prefetch_queue
);
1740 spa
->spa_scrub_inflight
+= BP_GET_PSIZE(&spic
->spic_bp
);
1741 avl_remove(&scn
->scn_prefetch_queue
, spic
);
1743 mutex_exit(&spa
->spa_scrub_lock
);
1745 if (BP_IS_PROTECTED(&spic
->spic_bp
)) {
1746 ASSERT(BP_GET_TYPE(&spic
->spic_bp
) == DMU_OT_DNODE
||
1747 BP_GET_TYPE(&spic
->spic_bp
) == DMU_OT_OBJSET
);
1748 ASSERT3U(BP_GET_LEVEL(&spic
->spic_bp
), ==, 0);
1749 zio_flags
|= ZIO_FLAG_RAW
;
1752 /* issue the prefetch asynchronously */
1753 (void) arc_read(scn
->scn_zio_root
, scn
->scn_dp
->dp_spa
,
1754 &spic
->spic_bp
, dsl_scan_prefetch_cb
, spic
->spic_spc
,
1755 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, &spic
->spic_zb
);
1757 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1760 ASSERT(scn
->scn_prefetch_stop
);
1762 /* free any prefetches we didn't get to complete */
1763 mutex_enter(&spa
->spa_scrub_lock
);
1764 while ((spic
= avl_first(&scn
->scn_prefetch_queue
)) != NULL
) {
1765 avl_remove(&scn
->scn_prefetch_queue
, spic
);
1766 scan_prefetch_ctx_rele(spic
->spic_spc
, scn
);
1767 kmem_free(spic
, sizeof (scan_prefetch_issue_ctx_t
));
1769 ASSERT0(avl_numnodes(&scn
->scn_prefetch_queue
));
1770 mutex_exit(&spa
->spa_scrub_lock
);
1774 dsl_scan_check_resume(dsl_scan_t
*scn
, const dnode_phys_t
*dnp
,
1775 const zbookmark_phys_t
*zb
)
1778 * We never skip over user/group accounting objects (obj<0)
1780 if (!ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
) &&
1781 (int64_t)zb
->zb_object
>= 0) {
1783 * If we already visited this bp & everything below (in
1784 * a prior txg sync), don't bother doing it again.
1786 if (zbookmark_subtree_completed(dnp
, zb
,
1787 &scn
->scn_phys
.scn_bookmark
))
1791 * If we found the block we're trying to resume from, or
1792 * we went past it to a different object, zero it out to
1793 * indicate that it's OK to start checking for suspending
1796 if (memcmp(zb
, &scn
->scn_phys
.scn_bookmark
,
1797 sizeof (*zb
)) == 0 ||
1798 zb
->zb_object
> scn
->scn_phys
.scn_bookmark
.zb_object
) {
1799 dprintf("resuming at %llx/%llx/%llx/%llx\n",
1800 (longlong_t
)zb
->zb_objset
,
1801 (longlong_t
)zb
->zb_object
,
1802 (longlong_t
)zb
->zb_level
,
1803 (longlong_t
)zb
->zb_blkid
);
1804 memset(&scn
->scn_phys
.scn_bookmark
, 0, sizeof (*zb
));
1810 static void dsl_scan_visitbp(blkptr_t
*bp
, const zbookmark_phys_t
*zb
,
1811 dnode_phys_t
*dnp
, dsl_dataset_t
*ds
, dsl_scan_t
*scn
,
1812 dmu_objset_type_t ostype
, dmu_tx_t
*tx
);
1813 inline __attribute__((always_inline
)) static void dsl_scan_visitdnode(
1814 dsl_scan_t
*, dsl_dataset_t
*ds
, dmu_objset_type_t ostype
,
1815 dnode_phys_t
*dnp
, uint64_t object
, dmu_tx_t
*tx
);
1818 * Return nonzero on i/o error.
1819 * Return new buf to write out in *bufp.
1821 inline __attribute__((always_inline
)) static int
1822 dsl_scan_recurse(dsl_scan_t
*scn
, dsl_dataset_t
*ds
, dmu_objset_type_t ostype
,
1823 dnode_phys_t
*dnp
, const blkptr_t
*bp
,
1824 const zbookmark_phys_t
*zb
, dmu_tx_t
*tx
)
1826 dsl_pool_t
*dp
= scn
->scn_dp
;
1827 int zio_flags
= ZIO_FLAG_CANFAIL
| ZIO_FLAG_SCAN_THREAD
;
1830 ASSERT(!BP_IS_REDACTED(bp
));
1833 * There is an unlikely case of encountering dnodes with contradicting
1834 * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
1835 * or modified before commit 4254acb was merged. As it is not possible
1836 * to know which of the two is correct, report an error.
1839 dnp
->dn_bonuslen
> DN_MAX_BONUS_LEN(dnp
)) {
1840 scn
->scn_phys
.scn_errors
++;
1841 spa_log_error(dp
->dp_spa
, zb
);
1842 return (SET_ERROR(EINVAL
));
1845 if (BP_GET_LEVEL(bp
) > 0) {
1846 arc_flags_t flags
= ARC_FLAG_WAIT
;
1849 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
1852 err
= arc_read(NULL
, dp
->dp_spa
, bp
, arc_getbuf_func
, &buf
,
1853 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
1855 scn
->scn_phys
.scn_errors
++;
1858 for (i
= 0, cbp
= buf
->b_data
; i
< epb
; i
++, cbp
++) {
1859 zbookmark_phys_t czb
;
1861 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
1863 zb
->zb_blkid
* epb
+ i
);
1864 dsl_scan_visitbp(cbp
, &czb
, dnp
,
1865 ds
, scn
, ostype
, tx
);
1867 arc_buf_destroy(buf
, &buf
);
1868 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
1869 arc_flags_t flags
= ARC_FLAG_WAIT
;
1872 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
1875 if (BP_IS_PROTECTED(bp
)) {
1876 ASSERT3U(BP_GET_COMPRESS(bp
), ==, ZIO_COMPRESS_OFF
);
1877 zio_flags
|= ZIO_FLAG_RAW
;
1880 err
= arc_read(NULL
, dp
->dp_spa
, bp
, arc_getbuf_func
, &buf
,
1881 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
1883 scn
->scn_phys
.scn_errors
++;
1886 for (i
= 0, cdnp
= buf
->b_data
; i
< epb
;
1887 i
+= cdnp
->dn_extra_slots
+ 1,
1888 cdnp
+= cdnp
->dn_extra_slots
+ 1) {
1889 dsl_scan_visitdnode(scn
, ds
, ostype
,
1890 cdnp
, zb
->zb_blkid
* epb
+ i
, tx
);
1893 arc_buf_destroy(buf
, &buf
);
1894 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
1895 arc_flags_t flags
= ARC_FLAG_WAIT
;
1899 err
= arc_read(NULL
, dp
->dp_spa
, bp
, arc_getbuf_func
, &buf
,
1900 ZIO_PRIORITY_SCRUB
, zio_flags
, &flags
, zb
);
1902 scn
->scn_phys
.scn_errors
++;
1908 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1909 &osp
->os_meta_dnode
, DMU_META_DNODE_OBJECT
, tx
);
1911 if (OBJSET_BUF_HAS_USERUSED(buf
)) {
1913 * We also always visit user/group/project accounting
1914 * objects, and never skip them, even if we are
1915 * suspending. This is necessary so that the
1916 * space deltas from this txg get integrated.
1918 if (OBJSET_BUF_HAS_PROJECTUSED(buf
))
1919 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1920 &osp
->os_projectused_dnode
,
1921 DMU_PROJECTUSED_OBJECT
, tx
);
1922 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1923 &osp
->os_groupused_dnode
,
1924 DMU_GROUPUSED_OBJECT
, tx
);
1925 dsl_scan_visitdnode(scn
, ds
, osp
->os_type
,
1926 &osp
->os_userused_dnode
,
1927 DMU_USERUSED_OBJECT
, tx
);
1929 arc_buf_destroy(buf
, &buf
);
1935 inline __attribute__((always_inline
)) static void
1936 dsl_scan_visitdnode(dsl_scan_t
*scn
, dsl_dataset_t
*ds
,
1937 dmu_objset_type_t ostype
, dnode_phys_t
*dnp
,
1938 uint64_t object
, dmu_tx_t
*tx
)
1942 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
1943 zbookmark_phys_t czb
;
1945 SET_BOOKMARK(&czb
, ds
? ds
->ds_object
: 0, object
,
1946 dnp
->dn_nlevels
- 1, j
);
1947 dsl_scan_visitbp(&dnp
->dn_blkptr
[j
],
1948 &czb
, dnp
, ds
, scn
, ostype
, tx
);
1951 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1952 zbookmark_phys_t czb
;
1953 SET_BOOKMARK(&czb
, ds
? ds
->ds_object
: 0, object
,
1954 0, DMU_SPILL_BLKID
);
1955 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp
),
1956 &czb
, dnp
, ds
, scn
, ostype
, tx
);
1961 * The arguments are in this order because mdb can only print the
1962 * first 5; we want them to be useful.
1965 dsl_scan_visitbp(blkptr_t
*bp
, const zbookmark_phys_t
*zb
,
1966 dnode_phys_t
*dnp
, dsl_dataset_t
*ds
, dsl_scan_t
*scn
,
1967 dmu_objset_type_t ostype
, dmu_tx_t
*tx
)
1969 dsl_pool_t
*dp
= scn
->scn_dp
;
1970 blkptr_t
*bp_toread
= NULL
;
1972 if (dsl_scan_check_suspend(scn
, zb
))
1975 if (dsl_scan_check_resume(scn
, dnp
, zb
))
1978 scn
->scn_visited_this_txg
++;
1981 * This debugging is commented out to conserve stack space. This
1982 * function is called recursively and the debugging adds several
1983 * bytes to the stack for each call. It can be commented back in
1984 * if required to debug an issue in dsl_scan_visitbp().
1987 * "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
1988 * ds, ds ? ds->ds_object : 0,
1989 * zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
1993 if (BP_IS_HOLE(bp
)) {
1994 scn
->scn_holes_this_txg
++;
1998 if (BP_IS_REDACTED(bp
)) {
1999 ASSERT(dsl_dataset_feature_is_active(ds
,
2000 SPA_FEATURE_REDACTED_DATASETS
));
2004 if (bp
->blk_birth
<= scn
->scn_phys
.scn_cur_min_txg
) {
2005 scn
->scn_lt_min_this_txg
++;
2009 bp_toread
= kmem_alloc(sizeof (blkptr_t
), KM_SLEEP
);
2012 if (dsl_scan_recurse(scn
, ds
, ostype
, dnp
, bp_toread
, zb
, tx
) != 0)
2016 * If dsl_scan_ddt() has already visited this block, it will have
2017 * already done any translations or scrubbing, so don't call the
2020 if (ddt_class_contains(dp
->dp_spa
,
2021 scn
->scn_phys
.scn_ddt_class_max
, bp
)) {
2022 scn
->scn_ddt_contained_this_txg
++;
2027 * If this block is from the future (after cur_max_txg), then we
2028 * are doing this on behalf of a deleted snapshot, and we will
2029 * revisit the future block on the next pass of this dataset.
2030 * Don't scan it now unless we need to because something
2031 * under it was modified.
2033 if (BP_PHYSICAL_BIRTH(bp
) > scn
->scn_phys
.scn_cur_max_txg
) {
2034 scn
->scn_gt_max_this_txg
++;
2038 scan_funcs
[scn
->scn_phys
.scn_func
](dp
, bp
, zb
);
2041 kmem_free(bp_toread
, sizeof (blkptr_t
));
2045 dsl_scan_visit_rootbp(dsl_scan_t
*scn
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
2048 zbookmark_phys_t zb
;
2049 scan_prefetch_ctx_t
*spc
;
2051 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
2052 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
2054 if (ZB_IS_ZERO(&scn
->scn_phys
.scn_bookmark
)) {
2055 SET_BOOKMARK(&scn
->scn_prefetch_bookmark
,
2056 zb
.zb_objset
, 0, 0, 0);
2058 scn
->scn_prefetch_bookmark
= scn
->scn_phys
.scn_bookmark
;
2061 scn
->scn_objsets_visited_this_txg
++;
2063 spc
= scan_prefetch_ctx_create(scn
, NULL
, FTAG
);
2064 dsl_scan_prefetch(spc
, bp
, &zb
);
2065 scan_prefetch_ctx_rele(spc
, FTAG
);
2067 dsl_scan_visitbp(bp
, &zb
, NULL
, ds
, scn
, DMU_OST_NONE
, tx
);
2069 dprintf_ds(ds
, "finished scan%s", "");
2073 ds_destroyed_scn_phys(dsl_dataset_t
*ds
, dsl_scan_phys_t
*scn_phys
)
2075 if (scn_phys
->scn_bookmark
.zb_objset
== ds
->ds_object
) {
2076 if (ds
->ds_is_snapshot
) {
2079 * - scn_cur_{min,max}_txg stays the same.
2080 * - Setting the flag is not really necessary if
2081 * scn_cur_max_txg == scn_max_txg, because there
2082 * is nothing after this snapshot that we care
2083 * about. However, we set it anyway and then
2084 * ignore it when we retraverse it in
2085 * dsl_scan_visitds().
2087 scn_phys
->scn_bookmark
.zb_objset
=
2088 dsl_dataset_phys(ds
)->ds_next_snap_obj
;
2089 zfs_dbgmsg("destroying ds %llu on %s; currently "
2090 "traversing; reset zb_objset to %llu",
2091 (u_longlong_t
)ds
->ds_object
,
2092 ds
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2093 (u_longlong_t
)dsl_dataset_phys(ds
)->
2095 scn_phys
->scn_flags
|= DSF_VISIT_DS_AGAIN
;
2097 SET_BOOKMARK(&scn_phys
->scn_bookmark
,
2098 ZB_DESTROYED_OBJSET
, 0, 0, 0);
2099 zfs_dbgmsg("destroying ds %llu on %s; currently "
2100 "traversing; reset bookmark to -1,0,0,0",
2101 (u_longlong_t
)ds
->ds_object
,
2102 ds
->ds_dir
->dd_pool
->dp_spa
->spa_name
);
2108 * Invoked when a dataset is destroyed. We need to make sure that:
2110 * 1) If it is the dataset that was currently being scanned, we write
2111 * a new dsl_scan_phys_t and marking the objset reference in it
2113 * 2) Remove it from the work queue, if it was present.
2115 * If the dataset was actually a snapshot, instead of marking the dataset
2116 * as destroyed, we instead substitute the next snapshot in line.
2119 dsl_scan_ds_destroyed(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
2121 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2122 dsl_scan_t
*scn
= dp
->dp_scan
;
2125 if (!dsl_scan_is_running(scn
))
2128 ds_destroyed_scn_phys(ds
, &scn
->scn_phys
);
2129 ds_destroyed_scn_phys(ds
, &scn
->scn_phys_cached
);
2131 if (scan_ds_queue_contains(scn
, ds
->ds_object
, &mintxg
)) {
2132 scan_ds_queue_remove(scn
, ds
->ds_object
);
2133 if (ds
->ds_is_snapshot
)
2134 scan_ds_queue_insert(scn
,
2135 dsl_dataset_phys(ds
)->ds_next_snap_obj
, mintxg
);
2138 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
2139 ds
->ds_object
, &mintxg
) == 0) {
2140 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
2141 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2142 scn
->scn_phys
.scn_queue_obj
, ds
->ds_object
, tx
));
2143 if (ds
->ds_is_snapshot
) {
2145 * We keep the same mintxg; it could be >
2146 * ds_creation_txg if the previous snapshot was
2149 VERIFY(zap_add_int_key(dp
->dp_meta_objset
,
2150 scn
->scn_phys
.scn_queue_obj
,
2151 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
2153 zfs_dbgmsg("destroying ds %llu on %s; in queue; "
2154 "replacing with %llu",
2155 (u_longlong_t
)ds
->ds_object
,
2156 dp
->dp_spa
->spa_name
,
2157 (u_longlong_t
)dsl_dataset_phys(ds
)->
2160 zfs_dbgmsg("destroying ds %llu on %s; in queue; "
2162 (u_longlong_t
)ds
->ds_object
,
2163 dp
->dp_spa
->spa_name
);
2168 * dsl_scan_sync() should be called after this, and should sync
2169 * out our changed state, but just to be safe, do it here.
2171 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2175 ds_snapshotted_bookmark(dsl_dataset_t
*ds
, zbookmark_phys_t
*scn_bookmark
)
2177 if (scn_bookmark
->zb_objset
== ds
->ds_object
) {
2178 scn_bookmark
->zb_objset
=
2179 dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
2180 zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; "
2181 "reset zb_objset to %llu",
2182 (u_longlong_t
)ds
->ds_object
,
2183 ds
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2184 (u_longlong_t
)dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
2189 * Called when a dataset is snapshotted. If we were currently traversing
2190 * this snapshot, we reset our bookmark to point at the newly created
2191 * snapshot. We also modify our work queue to remove the old snapshot and
2192 * replace with the new one.
2195 dsl_scan_ds_snapshotted(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
2197 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2198 dsl_scan_t
*scn
= dp
->dp_scan
;
2201 if (!dsl_scan_is_running(scn
))
2204 ASSERT(dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0);
2206 ds_snapshotted_bookmark(ds
, &scn
->scn_phys
.scn_bookmark
);
2207 ds_snapshotted_bookmark(ds
, &scn
->scn_phys_cached
.scn_bookmark
);
2209 if (scan_ds_queue_contains(scn
, ds
->ds_object
, &mintxg
)) {
2210 scan_ds_queue_remove(scn
, ds
->ds_object
);
2211 scan_ds_queue_insert(scn
,
2212 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, mintxg
);
2215 if (zap_lookup_int_key(dp
->dp_meta_objset
, scn
->scn_phys
.scn_queue_obj
,
2216 ds
->ds_object
, &mintxg
) == 0) {
2217 VERIFY3U(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2218 scn
->scn_phys
.scn_queue_obj
, ds
->ds_object
, tx
));
2219 VERIFY(zap_add_int_key(dp
->dp_meta_objset
,
2220 scn
->scn_phys
.scn_queue_obj
,
2221 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, mintxg
, tx
) == 0);
2222 zfs_dbgmsg("snapshotting ds %llu on %s; in queue; "
2223 "replacing with %llu",
2224 (u_longlong_t
)ds
->ds_object
,
2225 dp
->dp_spa
->spa_name
,
2226 (u_longlong_t
)dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
2229 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2233 ds_clone_swapped_bookmark(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
,
2234 zbookmark_phys_t
*scn_bookmark
)
2236 if (scn_bookmark
->zb_objset
== ds1
->ds_object
) {
2237 scn_bookmark
->zb_objset
= ds2
->ds_object
;
2238 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
2239 "reset zb_objset to %llu",
2240 (u_longlong_t
)ds1
->ds_object
,
2241 ds1
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2242 (u_longlong_t
)ds2
->ds_object
);
2243 } else if (scn_bookmark
->zb_objset
== ds2
->ds_object
) {
2244 scn_bookmark
->zb_objset
= ds1
->ds_object
;
2245 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
2246 "reset zb_objset to %llu",
2247 (u_longlong_t
)ds2
->ds_object
,
2248 ds2
->ds_dir
->dd_pool
->dp_spa
->spa_name
,
2249 (u_longlong_t
)ds1
->ds_object
);
2254 * Called when an origin dataset and its clone are swapped. If we were
2255 * currently traversing the dataset, we need to switch to traversing the
2256 * newly promoted clone.
2259 dsl_scan_ds_clone_swapped(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
, dmu_tx_t
*tx
)
2261 dsl_pool_t
*dp
= ds1
->ds_dir
->dd_pool
;
2262 dsl_scan_t
*scn
= dp
->dp_scan
;
2263 uint64_t mintxg1
, mintxg2
;
2264 boolean_t ds1_queued
, ds2_queued
;
2266 if (!dsl_scan_is_running(scn
))
2269 ds_clone_swapped_bookmark(ds1
, ds2
, &scn
->scn_phys
.scn_bookmark
);
2270 ds_clone_swapped_bookmark(ds1
, ds2
, &scn
->scn_phys_cached
.scn_bookmark
);
2273 * Handle the in-memory scan queue.
2275 ds1_queued
= scan_ds_queue_contains(scn
, ds1
->ds_object
, &mintxg1
);
2276 ds2_queued
= scan_ds_queue_contains(scn
, ds2
->ds_object
, &mintxg2
);
2278 /* Sanity checking. */
2280 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2281 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2284 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2285 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2288 if (ds1_queued
&& ds2_queued
) {
2290 * If both are queued, we don't need to do anything.
2291 * The swapping code below would not handle this case correctly,
2292 * since we can't insert ds2 if it is already there. That's
2293 * because scan_ds_queue_insert() prohibits a duplicate insert
2296 } else if (ds1_queued
) {
2297 scan_ds_queue_remove(scn
, ds1
->ds_object
);
2298 scan_ds_queue_insert(scn
, ds2
->ds_object
, mintxg1
);
2299 } else if (ds2_queued
) {
2300 scan_ds_queue_remove(scn
, ds2
->ds_object
);
2301 scan_ds_queue_insert(scn
, ds1
->ds_object
, mintxg2
);
2305 * Handle the on-disk scan queue.
2306 * The on-disk state is an out-of-date version of the in-memory state,
2307 * so the in-memory and on-disk values for ds1_queued and ds2_queued may
2308 * be different. Therefore we need to apply the swap logic to the
2309 * on-disk state independently of the in-memory state.
2311 ds1_queued
= zap_lookup_int_key(dp
->dp_meta_objset
,
2312 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, &mintxg1
) == 0;
2313 ds2_queued
= zap_lookup_int_key(dp
->dp_meta_objset
,
2314 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, &mintxg2
) == 0;
2316 /* Sanity checking. */
2318 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2319 ASSERT3U(mintxg1
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2322 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds1
)->ds_prev_snap_txg
);
2323 ASSERT3U(mintxg2
, ==, dsl_dataset_phys(ds2
)->ds_prev_snap_txg
);
2326 if (ds1_queued
&& ds2_queued
) {
2328 * If both are queued, we don't need to do anything.
2329 * Alternatively, we could check for EEXIST from
2330 * zap_add_int_key() and back out to the original state, but
2331 * that would be more work than checking for this case upfront.
2333 } else if (ds1_queued
) {
2334 VERIFY3S(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2335 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, tx
));
2336 VERIFY3S(0, ==, zap_add_int_key(dp
->dp_meta_objset
,
2337 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, mintxg1
, tx
));
2338 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
2339 "replacing with %llu",
2340 (u_longlong_t
)ds1
->ds_object
,
2341 dp
->dp_spa
->spa_name
,
2342 (u_longlong_t
)ds2
->ds_object
);
2343 } else if (ds2_queued
) {
2344 VERIFY3S(0, ==, zap_remove_int(dp
->dp_meta_objset
,
2345 scn
->scn_phys
.scn_queue_obj
, ds2
->ds_object
, tx
));
2346 VERIFY3S(0, ==, zap_add_int_key(dp
->dp_meta_objset
,
2347 scn
->scn_phys
.scn_queue_obj
, ds1
->ds_object
, mintxg2
, tx
));
2348 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
2349 "replacing with %llu",
2350 (u_longlong_t
)ds2
->ds_object
,
2351 dp
->dp_spa
->spa_name
,
2352 (u_longlong_t
)ds1
->ds_object
);
2355 dsl_scan_sync_state(scn
, tx
, SYNC_CACHED
);
2359 enqueue_clones_cb(dsl_pool_t
*dp
, dsl_dataset_t
*hds
, void *arg
)
2361 uint64_t originobj
= *(uint64_t *)arg
;
2364 dsl_scan_t
*scn
= dp
->dp_scan
;
2366 if (dsl_dir_phys(hds
->ds_dir
)->dd_origin_obj
!= originobj
)
2369 err
= dsl_dataset_hold_obj(dp
, hds
->ds_object
, FTAG
, &ds
);
2373 while (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= originobj
) {
2374 dsl_dataset_t
*prev
;
2375 err
= dsl_dataset_hold_obj(dp
,
2376 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2378 dsl_dataset_rele(ds
, FTAG
);
2383 scan_ds_queue_insert(scn
, ds
->ds_object
,
2384 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2385 dsl_dataset_rele(ds
, FTAG
);
2390 dsl_scan_visitds(dsl_scan_t
*scn
, uint64_t dsobj
, dmu_tx_t
*tx
)
2392 dsl_pool_t
*dp
= scn
->scn_dp
;
2395 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
2397 if (scn
->scn_phys
.scn_cur_min_txg
>=
2398 scn
->scn_phys
.scn_max_txg
) {
2400 * This can happen if this snapshot was created after the
2401 * scan started, and we already completed a previous snapshot
2402 * that was created after the scan started. This snapshot
2403 * only references blocks with:
2405 * birth < our ds_creation_txg
2406 * cur_min_txg is no less than ds_creation_txg.
2407 * We have already visited these blocks.
2409 * birth > scn_max_txg
2410 * The scan requested not to visit these blocks.
2412 * Subsequent snapshots (and clones) can reference our
2413 * blocks, or blocks with even higher birth times.
2414 * Therefore we do not need to visit them either,
2415 * so we do not add them to the work queue.
2417 * Note that checking for cur_min_txg >= cur_max_txg
2418 * is not sufficient, because in that case we may need to
2419 * visit subsequent snapshots. This happens when min_txg > 0,
2420 * which raises cur_min_txg. In this case we will visit
2421 * this dataset but skip all of its blocks, because the
2422 * rootbp's birth time is < cur_min_txg. Then we will
2423 * add the next snapshots/clones to the work queue.
2425 char *dsname
= kmem_alloc(ZFS_MAX_DATASET_NAME_LEN
, KM_SLEEP
);
2426 dsl_dataset_name(ds
, dsname
);
2427 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
2428 "cur_min_txg (%llu) >= max_txg (%llu)",
2429 (longlong_t
)dsobj
, dsname
,
2430 (longlong_t
)scn
->scn_phys
.scn_cur_min_txg
,
2431 (longlong_t
)scn
->scn_phys
.scn_max_txg
);
2432 kmem_free(dsname
, MAXNAMELEN
);
2438 * Only the ZIL in the head (non-snapshot) is valid. Even though
2439 * snapshots can have ZIL block pointers (which may be the same
2440 * BP as in the head), they must be ignored. In addition, $ORIGIN
2441 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
2442 * need to look for a ZIL in it either. So we traverse the ZIL here,
2443 * rather than in scan_recurse(), because the regular snapshot
2444 * block-sharing rules don't apply to it.
2446 if (!dsl_dataset_is_snapshot(ds
) &&
2447 (dp
->dp_origin_snap
== NULL
||
2448 ds
->ds_dir
!= dp
->dp_origin_snap
->ds_dir
)) {
2450 if (dmu_objset_from_ds(ds
, &os
) != 0) {
2453 dsl_scan_zil(dp
, &os
->os_zil_header
);
2457 * Iterate over the bps in this ds.
2459 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2460 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2461 dsl_scan_visit_rootbp(scn
, ds
, &dsl_dataset_phys(ds
)->ds_bp
, tx
);
2462 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2464 char *dsname
= kmem_alloc(ZFS_MAX_DATASET_NAME_LEN
, KM_SLEEP
);
2465 dsl_dataset_name(ds
, dsname
);
2466 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
2468 (longlong_t
)dsobj
, dsname
,
2469 (longlong_t
)scn
->scn_phys
.scn_cur_min_txg
,
2470 (longlong_t
)scn
->scn_phys
.scn_cur_max_txg
,
2471 (int)scn
->scn_suspending
);
2472 kmem_free(dsname
, ZFS_MAX_DATASET_NAME_LEN
);
2474 if (scn
->scn_suspending
)
2478 * We've finished this pass over this dataset.
2482 * If we did not completely visit this dataset, do another pass.
2484 if (scn
->scn_phys
.scn_flags
& DSF_VISIT_DS_AGAIN
) {
2485 zfs_dbgmsg("incomplete pass on %s; visiting again",
2486 dp
->dp_spa
->spa_name
);
2487 scn
->scn_phys
.scn_flags
&= ~DSF_VISIT_DS_AGAIN
;
2488 scan_ds_queue_insert(scn
, ds
->ds_object
,
2489 scn
->scn_phys
.scn_cur_max_txg
);
2494 * Add descendant datasets to work queue.
2496 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0) {
2497 scan_ds_queue_insert(scn
,
2498 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
2499 dsl_dataset_phys(ds
)->ds_creation_txg
);
2501 if (dsl_dataset_phys(ds
)->ds_num_children
> 1) {
2502 boolean_t usenext
= B_FALSE
;
2503 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
2506 * A bug in a previous version of the code could
2507 * cause upgrade_clones_cb() to not set
2508 * ds_next_snap_obj when it should, leading to a
2509 * missing entry. Therefore we can only use the
2510 * next_clones_obj when its count is correct.
2512 int err
= zap_count(dp
->dp_meta_objset
,
2513 dsl_dataset_phys(ds
)->ds_next_clones_obj
, &count
);
2515 count
== dsl_dataset_phys(ds
)->ds_num_children
- 1)
2522 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2523 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
2524 zap_cursor_retrieve(&zc
, &za
) == 0;
2525 (void) zap_cursor_advance(&zc
)) {
2526 scan_ds_queue_insert(scn
,
2527 zfs_strtonum(za
.za_name
, NULL
),
2528 dsl_dataset_phys(ds
)->ds_creation_txg
);
2530 zap_cursor_fini(&zc
);
2532 VERIFY0(dmu_objset_find_dp(dp
, dp
->dp_root_dir_obj
,
2533 enqueue_clones_cb
, &ds
->ds_object
,
2539 dsl_dataset_rele(ds
, FTAG
);
2543 enqueue_cb(dsl_pool_t
*dp
, dsl_dataset_t
*hds
, void *arg
)
2548 dsl_scan_t
*scn
= dp
->dp_scan
;
2550 err
= dsl_dataset_hold_obj(dp
, hds
->ds_object
, FTAG
, &ds
);
2554 while (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
2555 dsl_dataset_t
*prev
;
2556 err
= dsl_dataset_hold_obj(dp
,
2557 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2559 dsl_dataset_rele(ds
, FTAG
);
2564 * If this is a clone, we don't need to worry about it for now.
2566 if (dsl_dataset_phys(prev
)->ds_next_snap_obj
!= ds
->ds_object
) {
2567 dsl_dataset_rele(ds
, FTAG
);
2568 dsl_dataset_rele(prev
, FTAG
);
2571 dsl_dataset_rele(ds
, FTAG
);
2575 scan_ds_queue_insert(scn
, ds
->ds_object
,
2576 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2577 dsl_dataset_rele(ds
, FTAG
);
2582 dsl_scan_ddt_entry(dsl_scan_t
*scn
, enum zio_checksum checksum
,
2583 ddt_entry_t
*dde
, dmu_tx_t
*tx
)
2586 const ddt_key_t
*ddk
= &dde
->dde_key
;
2587 ddt_phys_t
*ddp
= dde
->dde_phys
;
2589 zbookmark_phys_t zb
= { 0 };
2591 if (!dsl_scan_is_running(scn
))
2595 * This function is special because it is the only thing
2596 * that can add scan_io_t's to the vdev scan queues from
2597 * outside dsl_scan_sync(). For the most part this is ok
2598 * as long as it is called from within syncing context.
2599 * However, dsl_scan_sync() expects that no new sio's will
2600 * be added between when all the work for a scan is done
2601 * and the next txg when the scan is actually marked as
2602 * completed. This check ensures we do not issue new sio's
2603 * during this period.
2605 if (scn
->scn_done_txg
!= 0)
2608 for (int p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
2609 if (ddp
->ddp_phys_birth
== 0 ||
2610 ddp
->ddp_phys_birth
> scn
->scn_phys
.scn_max_txg
)
2612 ddt_bp_create(checksum
, ddk
, ddp
, &bp
);
2614 scn
->scn_visited_this_txg
++;
2615 scan_funcs
[scn
->scn_phys
.scn_func
](scn
->scn_dp
, &bp
, &zb
);
2620 * Scrub/dedup interaction.
2622 * If there are N references to a deduped block, we don't want to scrub it
2623 * N times -- ideally, we should scrub it exactly once.
2625 * We leverage the fact that the dde's replication class (enum ddt_class)
2626 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
2627 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
2629 * To prevent excess scrubbing, the scrub begins by walking the DDT
2630 * to find all blocks with refcnt > 1, and scrubs each of these once.
2631 * Since there are two replication classes which contain blocks with
2632 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
2633 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
2635 * There would be nothing more to say if a block's refcnt couldn't change
2636 * during a scrub, but of course it can so we must account for changes
2637 * in a block's replication class.
2639 * Here's an example of what can occur:
2641 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
2642 * when visited during the top-down scrub phase, it will be scrubbed twice.
2643 * This negates our scrub optimization, but is otherwise harmless.
2645 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
2646 * on each visit during the top-down scrub phase, it will never be scrubbed.
2647 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
2648 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
2649 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
2650 * while a scrub is in progress, it scrubs the block right then.
2653 dsl_scan_ddt(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
2655 ddt_bookmark_t
*ddb
= &scn
->scn_phys
.scn_ddt_bookmark
;
2656 ddt_entry_t dde
= {{{{0}}}};
2660 while ((error
= ddt_walk(scn
->scn_dp
->dp_spa
, ddb
, &dde
)) == 0) {
2663 if (ddb
->ddb_class
> scn
->scn_phys
.scn_ddt_class_max
)
2665 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
2666 (longlong_t
)ddb
->ddb_class
,
2667 (longlong_t
)ddb
->ddb_type
,
2668 (longlong_t
)ddb
->ddb_checksum
,
2669 (longlong_t
)ddb
->ddb_cursor
);
2671 /* There should be no pending changes to the dedup table */
2672 ddt
= scn
->scn_dp
->dp_spa
->spa_ddt
[ddb
->ddb_checksum
];
2673 ASSERT(avl_first(&ddt
->ddt_tree
) == NULL
);
2675 dsl_scan_ddt_entry(scn
, ddb
->ddb_checksum
, &dde
, tx
);
2678 if (dsl_scan_check_suspend(scn
, NULL
))
2682 zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; "
2683 "suspending=%u", (longlong_t
)n
, scn
->scn_dp
->dp_spa
->spa_name
,
2684 (int)scn
->scn_phys
.scn_ddt_class_max
, (int)scn
->scn_suspending
);
2686 ASSERT(error
== 0 || error
== ENOENT
);
2687 ASSERT(error
!= ENOENT
||
2688 ddb
->ddb_class
> scn
->scn_phys
.scn_ddt_class_max
);
2692 dsl_scan_ds_maxtxg(dsl_dataset_t
*ds
)
2694 uint64_t smt
= ds
->ds_dir
->dd_pool
->dp_scan
->scn_phys
.scn_max_txg
;
2695 if (ds
->ds_is_snapshot
)
2696 return (MIN(smt
, dsl_dataset_phys(ds
)->ds_creation_txg
));
2701 dsl_scan_visit(dsl_scan_t
*scn
, dmu_tx_t
*tx
)
2704 dsl_pool_t
*dp
= scn
->scn_dp
;
2706 if (scn
->scn_phys
.scn_ddt_bookmark
.ddb_class
<=
2707 scn
->scn_phys
.scn_ddt_class_max
) {
2708 scn
->scn_phys
.scn_cur_min_txg
= scn
->scn_phys
.scn_min_txg
;
2709 scn
->scn_phys
.scn_cur_max_txg
= scn
->scn_phys
.scn_max_txg
;
2710 dsl_scan_ddt(scn
, tx
);
2711 if (scn
->scn_suspending
)
2715 if (scn
->scn_phys
.scn_bookmark
.zb_objset
== DMU_META_OBJSET
) {
2716 /* First do the MOS & ORIGIN */
2718 scn
->scn_phys
.scn_cur_min_txg
= scn
->scn_phys
.scn_min_txg
;
2719 scn
->scn_phys
.scn_cur_max_txg
= scn
->scn_phys
.scn_max_txg
;
2720 dsl_scan_visit_rootbp(scn
, NULL
,
2721 &dp
->dp_meta_rootbp
, tx
);
2722 spa_set_rootblkptr(dp
->dp_spa
, &dp
->dp_meta_rootbp
);
2723 if (scn
->scn_suspending
)
2726 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
) {
2727 VERIFY0(dmu_objset_find_dp(dp
, dp
->dp_root_dir_obj
,
2728 enqueue_cb
, NULL
, DS_FIND_CHILDREN
));
2730 dsl_scan_visitds(scn
,
2731 dp
->dp_origin_snap
->ds_object
, tx
);
2733 ASSERT(!scn
->scn_suspending
);
2734 } else if (scn
->scn_phys
.scn_bookmark
.zb_objset
!=
2735 ZB_DESTROYED_OBJSET
) {
2736 uint64_t dsobj
= scn
->scn_phys
.scn_bookmark
.zb_objset
;
2738 * If we were suspended, continue from here. Note if the
2739 * ds we were suspended on was deleted, the zb_objset may
2740 * be -1, so we will skip this and find a new objset
2743 dsl_scan_visitds(scn
, dsobj
, tx
);
2744 if (scn
->scn_suspending
)
2749 * In case we suspended right at the end of the ds, zero the
2750 * bookmark so we don't think that we're still trying to resume.
2752 memset(&scn
->scn_phys
.scn_bookmark
, 0, sizeof (zbookmark_phys_t
));
2755 * Keep pulling things out of the dataset avl queue. Updates to the
2756 * persistent zap-object-as-queue happen only at checkpoints.
2758 while ((sds
= avl_first(&scn
->scn_queue
)) != NULL
) {
2760 uint64_t dsobj
= sds
->sds_dsobj
;
2761 uint64_t txg
= sds
->sds_txg
;
2763 /* dequeue and free the ds from the queue */
2764 scan_ds_queue_remove(scn
, dsobj
);
2767 /* set up min / max txg */
2768 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
2770 scn
->scn_phys
.scn_cur_min_txg
=
2771 MAX(scn
->scn_phys
.scn_min_txg
, txg
);
2773 scn
->scn_phys
.scn_cur_min_txg
=
2774 MAX(scn
->scn_phys
.scn_min_txg
,
2775 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
2777 scn
->scn_phys
.scn_cur_max_txg
= dsl_scan_ds_maxtxg(ds
);
2778 dsl_dataset_rele(ds
, FTAG
);
2780 dsl_scan_visitds(scn
, dsobj
, tx
);
2781 if (scn
->scn_suspending
)
2785 /* No more objsets to fetch, we're done */
2786 scn
->scn_phys
.scn_bookmark
.zb_objset
= ZB_DESTROYED_OBJSET
;
2787 ASSERT0(scn
->scn_suspending
);
2791 dsl_scan_count_data_disks(vdev_t
*rvd
)
2793 uint64_t i
, leaves
= 0;
2795 for (i
= 0; i
< rvd
->vdev_children
; i
++) {
2796 vdev_t
*vd
= rvd
->vdev_child
[i
];
2797 if (vd
->vdev_islog
|| vd
->vdev_isspare
|| vd
->vdev_isl2cache
)
2799 leaves
+= vdev_get_ndisks(vd
) - vdev_get_nparity(vd
);
2805 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t
*q
, const blkptr_t
*bp
)
2808 uint64_t cur_size
= 0;
2810 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2811 cur_size
+= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2814 q
->q_total_zio_size_this_txg
+= cur_size
;
2815 q
->q_zios_this_txg
++;
2819 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t
*q
, uint64_t start
,
2822 q
->q_total_seg_size_this_txg
+= end
- start
;
2823 q
->q_segs_this_txg
++;
2827 scan_io_queue_check_suspend(dsl_scan_t
*scn
)
2829 /* See comment in dsl_scan_check_suspend() */
2830 uint64_t curr_time_ns
= gethrtime();
2831 uint64_t scan_time_ns
= curr_time_ns
- scn
->scn_sync_start_time
;
2832 uint64_t sync_time_ns
= curr_time_ns
-
2833 scn
->scn_dp
->dp_spa
->spa_sync_starttime
;
2834 int dirty_pct
= scn
->scn_dp
->dp_dirty_total
* 100 / zfs_dirty_data_max
;
2835 int mintime
= (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) ?
2836 zfs_resilver_min_time_ms
: zfs_scrub_min_time_ms
;
2838 return ((NSEC2MSEC(scan_time_ns
) > mintime
&&
2839 (dirty_pct
>= zfs_vdev_async_write_active_min_dirty_percent
||
2840 txg_sync_waiting(scn
->scn_dp
) ||
2841 NSEC2SEC(sync_time_ns
) >= zfs_txg_timeout
)) ||
2842 spa_shutting_down(scn
->scn_dp
->dp_spa
));
2846 * Given a list of scan_io_t's in io_list, this issues the I/Os out to
2847 * disk. This consumes the io_list and frees the scan_io_t's. This is
2848 * called when emptying queues, either when we're up against the memory
2849 * limit or when we have finished scanning. Returns B_TRUE if we stopped
2850 * processing the list before we finished. Any sios that were not issued
2851 * will remain in the io_list.
2854 scan_io_queue_issue(dsl_scan_io_queue_t
*queue
, list_t
*io_list
)
2856 dsl_scan_t
*scn
= queue
->q_scn
;
2858 int64_t bytes_issued
= 0;
2859 boolean_t suspended
= B_FALSE
;
2861 while ((sio
= list_head(io_list
)) != NULL
) {
2864 if (scan_io_queue_check_suspend(scn
)) {
2870 bytes_issued
+= SIO_GET_ASIZE(sio
);
2871 scan_exec_io(scn
->scn_dp
, &bp
, sio
->sio_flags
,
2872 &sio
->sio_zb
, queue
);
2873 (void) list_remove_head(io_list
);
2874 scan_io_queues_update_zio_stats(queue
, &bp
);
2878 atomic_add_64(&scn
->scn_bytes_pending
, -bytes_issued
);
2884 * This function removes sios from an IO queue which reside within a given
2885 * range_seg_t and inserts them (in offset order) into a list. Note that
2886 * we only ever return a maximum of 32 sios at once. If there are more sios
2887 * to process within this segment that did not make it onto the list we
2888 * return B_TRUE and otherwise B_FALSE.
2891 scan_io_queue_gather(dsl_scan_io_queue_t
*queue
, range_seg_t
*rs
, list_t
*list
)
2893 scan_io_t
*srch_sio
, *sio
, *next_sio
;
2895 uint_t num_sios
= 0;
2896 int64_t bytes_issued
= 0;
2899 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
2901 srch_sio
= sio_alloc(1);
2902 srch_sio
->sio_nr_dvas
= 1;
2903 SIO_SET_OFFSET(srch_sio
, rs_get_start(rs
, queue
->q_exts_by_addr
));
2906 * The exact start of the extent might not contain any matching zios,
2907 * so if that's the case, examine the next one in the tree.
2909 sio
= avl_find(&queue
->q_sios_by_addr
, srch_sio
, &idx
);
2913 sio
= avl_nearest(&queue
->q_sios_by_addr
, idx
, AVL_AFTER
);
2915 while (sio
!= NULL
&& SIO_GET_OFFSET(sio
) < rs_get_end(rs
,
2916 queue
->q_exts_by_addr
) && num_sios
<= 32) {
2917 ASSERT3U(SIO_GET_OFFSET(sio
), >=, rs_get_start(rs
,
2918 queue
->q_exts_by_addr
));
2919 ASSERT3U(SIO_GET_END_OFFSET(sio
), <=, rs_get_end(rs
,
2920 queue
->q_exts_by_addr
));
2922 next_sio
= AVL_NEXT(&queue
->q_sios_by_addr
, sio
);
2923 avl_remove(&queue
->q_sios_by_addr
, sio
);
2924 queue
->q_sio_memused
-= SIO_GET_MUSED(sio
);
2926 bytes_issued
+= SIO_GET_ASIZE(sio
);
2928 list_insert_tail(list
, sio
);
2933 * We limit the number of sios we process at once to 32 to avoid
2934 * biting off more than we can chew. If we didn't take everything
2935 * in the segment we update it to reflect the work we were able to
2936 * complete. Otherwise, we remove it from the range tree entirely.
2938 if (sio
!= NULL
&& SIO_GET_OFFSET(sio
) < rs_get_end(rs
,
2939 queue
->q_exts_by_addr
)) {
2940 range_tree_adjust_fill(queue
->q_exts_by_addr
, rs
,
2942 range_tree_resize_segment(queue
->q_exts_by_addr
, rs
,
2943 SIO_GET_OFFSET(sio
), rs_get_end(rs
,
2944 queue
->q_exts_by_addr
) - SIO_GET_OFFSET(sio
));
2948 uint64_t rstart
= rs_get_start(rs
, queue
->q_exts_by_addr
);
2949 uint64_t rend
= rs_get_end(rs
, queue
->q_exts_by_addr
);
2950 range_tree_remove(queue
->q_exts_by_addr
, rstart
, rend
- rstart
);
2956 * This is called from the queue emptying thread and selects the next
2957 * extent from which we are to issue I/Os. The behavior of this function
2958 * depends on the state of the scan, the current memory consumption and
2959 * whether or not we are performing a scan shutdown.
2960 * 1) We select extents in an elevator algorithm (LBA-order) if the scan
2961 * needs to perform a checkpoint
2962 * 2) We select the largest available extent if we are up against the
2964 * 3) Otherwise we don't select any extents.
2966 static range_seg_t
*
2967 scan_io_queue_fetch_ext(dsl_scan_io_queue_t
*queue
)
2969 dsl_scan_t
*scn
= queue
->q_scn
;
2970 range_tree_t
*rt
= queue
->q_exts_by_addr
;
2972 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
2973 ASSERT(scn
->scn_is_sorted
);
2975 /* handle tunable overrides */
2976 if (scn
->scn_checkpointing
|| scn
->scn_clearing
) {
2977 if (zfs_scan_issue_strategy
== 1) {
2978 return (range_tree_first(rt
));
2979 } else if (zfs_scan_issue_strategy
== 2) {
2981 * We need to get the original entry in the by_addr
2982 * tree so we can modify it.
2984 range_seg_t
*size_rs
=
2985 zfs_btree_first(&queue
->q_exts_by_size
, NULL
);
2986 if (size_rs
== NULL
)
2988 uint64_t start
= rs_get_start(size_rs
, rt
);
2989 uint64_t size
= rs_get_end(size_rs
, rt
) - start
;
2990 range_seg_t
*addr_rs
= range_tree_find(rt
, start
,
2992 ASSERT3P(addr_rs
, !=, NULL
);
2993 ASSERT3U(rs_get_start(size_rs
, rt
), ==,
2994 rs_get_start(addr_rs
, rt
));
2995 ASSERT3U(rs_get_end(size_rs
, rt
), ==,
2996 rs_get_end(addr_rs
, rt
));
3002 * During normal clearing, we want to issue our largest segments
3003 * first, keeping IO as sequential as possible, and leaving the
3004 * smaller extents for later with the hope that they might eventually
3005 * grow to larger sequential segments. However, when the scan is
3006 * checkpointing, no new extents will be added to the sorting queue,
3007 * so the way we are sorted now is as good as it will ever get.
3008 * In this case, we instead switch to issuing extents in LBA order.
3010 if (scn
->scn_checkpointing
) {
3011 return (range_tree_first(rt
));
3012 } else if (scn
->scn_clearing
) {
3014 * We need to get the original entry in the by_addr
3015 * tree so we can modify it.
3017 range_seg_t
*size_rs
= zfs_btree_first(&queue
->q_exts_by_size
,
3019 if (size_rs
== NULL
)
3021 uint64_t start
= rs_get_start(size_rs
, rt
);
3022 uint64_t size
= rs_get_end(size_rs
, rt
) - start
;
3023 range_seg_t
*addr_rs
= range_tree_find(rt
, start
, size
);
3024 ASSERT3P(addr_rs
, !=, NULL
);
3025 ASSERT3U(rs_get_start(size_rs
, rt
), ==, rs_get_start(addr_rs
,
3027 ASSERT3U(rs_get_end(size_rs
, rt
), ==, rs_get_end(addr_rs
, rt
));
3035 scan_io_queues_run_one(void *arg
)
3037 dsl_scan_io_queue_t
*queue
= arg
;
3038 kmutex_t
*q_lock
= &queue
->q_vd
->vdev_scan_io_queue_lock
;
3039 boolean_t suspended
= B_FALSE
;
3040 range_seg_t
*rs
= NULL
;
3041 scan_io_t
*sio
= NULL
;
3044 ASSERT(queue
->q_scn
->scn_is_sorted
);
3046 list_create(&sio_list
, sizeof (scan_io_t
),
3047 offsetof(scan_io_t
, sio_nodes
.sio_list_node
));
3048 mutex_enter(q_lock
);
3050 /* Calculate maximum in-flight bytes for this vdev. */
3051 queue
->q_maxinflight_bytes
= MAX(1, zfs_scan_vdev_limit
*
3052 (vdev_get_ndisks(queue
->q_vd
) - vdev_get_nparity(queue
->q_vd
)));
3054 /* reset per-queue scan statistics for this txg */
3055 queue
->q_total_seg_size_this_txg
= 0;
3056 queue
->q_segs_this_txg
= 0;
3057 queue
->q_total_zio_size_this_txg
= 0;
3058 queue
->q_zios_this_txg
= 0;
3060 /* loop until we run out of time or sios */
3061 while ((rs
= scan_io_queue_fetch_ext(queue
)) != NULL
) {
3062 uint64_t seg_start
= 0, seg_end
= 0;
3063 boolean_t more_left
= B_TRUE
;
3065 ASSERT(list_is_empty(&sio_list
));
3067 /* loop while we still have sios left to process in this rs */
3069 scan_io_t
*first_sio
, *last_sio
;
3072 * We have selected which extent needs to be
3073 * processed next. Gather up the corresponding sios.
3075 more_left
= scan_io_queue_gather(queue
, rs
, &sio_list
);
3076 ASSERT(!list_is_empty(&sio_list
));
3077 first_sio
= list_head(&sio_list
);
3078 last_sio
= list_tail(&sio_list
);
3080 seg_end
= SIO_GET_END_OFFSET(last_sio
);
3082 seg_start
= SIO_GET_OFFSET(first_sio
);
3085 * Issuing sios can take a long time so drop the
3086 * queue lock. The sio queue won't be updated by
3087 * other threads since we're in syncing context so
3088 * we can be sure that our trees will remain exactly
3092 suspended
= scan_io_queue_issue(queue
, &sio_list
);
3093 mutex_enter(q_lock
);
3099 /* update statistics for debugging purposes */
3100 scan_io_queues_update_seg_stats(queue
, seg_start
, seg_end
);
3107 * If we were suspended in the middle of processing,
3108 * requeue any unfinished sios and exit.
3110 while ((sio
= list_head(&sio_list
)) != NULL
) {
3111 list_remove(&sio_list
, sio
);
3112 scan_io_queue_insert_impl(queue
, sio
);
3116 list_destroy(&sio_list
);
3120 * Performs an emptying run on all scan queues in the pool. This just
3121 * punches out one thread per top-level vdev, each of which processes
3122 * only that vdev's scan queue. We can parallelize the I/O here because
3123 * we know that each queue's I/Os only affect its own top-level vdev.
3125 * This function waits for the queue runs to complete, and must be
3126 * called from dsl_scan_sync (or in general, syncing context).
3129 scan_io_queues_run(dsl_scan_t
*scn
)
3131 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3133 ASSERT(scn
->scn_is_sorted
);
3134 ASSERT(spa_config_held(spa
, SCL_CONFIG
, RW_READER
));
3136 if (scn
->scn_bytes_pending
== 0)
3139 if (scn
->scn_taskq
== NULL
) {
3140 int nthreads
= spa
->spa_root_vdev
->vdev_children
;
3143 * We need to make this taskq *always* execute as many
3144 * threads in parallel as we have top-level vdevs and no
3145 * less, otherwise strange serialization of the calls to
3146 * scan_io_queues_run_one can occur during spa_sync runs
3147 * and that significantly impacts performance.
3149 scn
->scn_taskq
= taskq_create("dsl_scan_iss", nthreads
,
3150 minclsyspri
, nthreads
, nthreads
, TASKQ_PREPOPULATE
);
3153 for (uint64_t i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
3154 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
3156 mutex_enter(&vd
->vdev_scan_io_queue_lock
);
3157 if (vd
->vdev_scan_io_queue
!= NULL
) {
3158 VERIFY(taskq_dispatch(scn
->scn_taskq
,
3159 scan_io_queues_run_one
, vd
->vdev_scan_io_queue
,
3160 TQ_SLEEP
) != TASKQID_INVALID
);
3162 mutex_exit(&vd
->vdev_scan_io_queue_lock
);
3166 * Wait for the queues to finish issuing their IOs for this run
3167 * before we return. There may still be IOs in flight at this
3170 taskq_wait(scn
->scn_taskq
);
3174 dsl_scan_async_block_should_pause(dsl_scan_t
*scn
)
3176 uint64_t elapsed_nanosecs
;
3181 if (zfs_async_block_max_blocks
!= 0 &&
3182 scn
->scn_visited_this_txg
>= zfs_async_block_max_blocks
) {
3186 if (zfs_max_async_dedup_frees
!= 0 &&
3187 scn
->scn_dedup_frees_this_txg
>= zfs_max_async_dedup_frees
) {
3191 elapsed_nanosecs
= gethrtime() - scn
->scn_sync_start_time
;
3192 return (elapsed_nanosecs
/ NANOSEC
> zfs_txg_timeout
||
3193 (NSEC2MSEC(elapsed_nanosecs
) > scn
->scn_async_block_min_time_ms
&&
3194 txg_sync_waiting(scn
->scn_dp
)) ||
3195 spa_shutting_down(scn
->scn_dp
->dp_spa
));
3199 dsl_scan_free_block_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
3201 dsl_scan_t
*scn
= arg
;
3203 if (!scn
->scn_is_bptree
||
3204 (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_OBJSET
)) {
3205 if (dsl_scan_async_block_should_pause(scn
))
3206 return (SET_ERROR(ERESTART
));
3209 zio_nowait(zio_free_sync(scn
->scn_zio_root
, scn
->scn_dp
->dp_spa
,
3210 dmu_tx_get_txg(tx
), bp
, 0));
3211 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
, DD_USED_HEAD
,
3212 -bp_get_dsize_sync(scn
->scn_dp
->dp_spa
, bp
),
3213 -BP_GET_PSIZE(bp
), -BP_GET_UCSIZE(bp
), tx
);
3214 scn
->scn_visited_this_txg
++;
3215 if (BP_GET_DEDUP(bp
))
3216 scn
->scn_dedup_frees_this_txg
++;
3221 dsl_scan_update_stats(dsl_scan_t
*scn
)
3223 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3225 uint64_t seg_size_total
= 0, zio_size_total
= 0;
3226 uint64_t seg_count_total
= 0, zio_count_total
= 0;
3228 for (i
= 0; i
< spa
->spa_root_vdev
->vdev_children
; i
++) {
3229 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[i
];
3230 dsl_scan_io_queue_t
*queue
= vd
->vdev_scan_io_queue
;
3235 seg_size_total
+= queue
->q_total_seg_size_this_txg
;
3236 zio_size_total
+= queue
->q_total_zio_size_this_txg
;
3237 seg_count_total
+= queue
->q_segs_this_txg
;
3238 zio_count_total
+= queue
->q_zios_this_txg
;
3241 if (seg_count_total
== 0 || zio_count_total
== 0) {
3242 scn
->scn_avg_seg_size_this_txg
= 0;
3243 scn
->scn_avg_zio_size_this_txg
= 0;
3244 scn
->scn_segs_this_txg
= 0;
3245 scn
->scn_zios_this_txg
= 0;
3249 scn
->scn_avg_seg_size_this_txg
= seg_size_total
/ seg_count_total
;
3250 scn
->scn_avg_zio_size_this_txg
= zio_size_total
/ zio_count_total
;
3251 scn
->scn_segs_this_txg
= seg_count_total
;
3252 scn
->scn_zios_this_txg
= zio_count_total
;
3256 bpobj_dsl_scan_free_block_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
3260 return (dsl_scan_free_block_cb(arg
, bp
, tx
));
3264 dsl_scan_obsolete_block_cb(void *arg
, const blkptr_t
*bp
, boolean_t bp_freed
,
3268 dsl_scan_t
*scn
= arg
;
3269 const dva_t
*dva
= &bp
->blk_dva
[0];
3271 if (dsl_scan_async_block_should_pause(scn
))
3272 return (SET_ERROR(ERESTART
));
3274 spa_vdev_indirect_mark_obsolete(scn
->scn_dp
->dp_spa
,
3275 DVA_GET_VDEV(dva
), DVA_GET_OFFSET(dva
),
3276 DVA_GET_ASIZE(dva
), tx
);
3277 scn
->scn_visited_this_txg
++;
3282 dsl_scan_active(dsl_scan_t
*scn
)
3284 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3285 uint64_t used
= 0, comp
, uncomp
;
3286 boolean_t clones_left
;
3288 if (spa
->spa_load_state
!= SPA_LOAD_NONE
)
3290 if (spa_shutting_down(spa
))
3292 if ((dsl_scan_is_running(scn
) && !dsl_scan_is_paused_scrub(scn
)) ||
3293 (scn
->scn_async_destroying
&& !scn
->scn_async_stalled
))
3296 if (spa_version(scn
->scn_dp
->dp_spa
) >= SPA_VERSION_DEADLISTS
) {
3297 (void) bpobj_space(&scn
->scn_dp
->dp_free_bpobj
,
3298 &used
, &comp
, &uncomp
);
3300 clones_left
= spa_livelist_delete_check(spa
);
3301 return ((used
!= 0) || (clones_left
));
3305 dsl_scan_check_deferred(vdev_t
*vd
)
3307 boolean_t need_resilver
= B_FALSE
;
3309 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
3311 dsl_scan_check_deferred(vd
->vdev_child
[c
]);
3314 if (!vdev_is_concrete(vd
) || vd
->vdev_aux
||
3315 !vd
->vdev_ops
->vdev_op_leaf
)
3316 return (need_resilver
);
3318 if (!vd
->vdev_resilver_deferred
)
3319 need_resilver
= B_TRUE
;
3321 return (need_resilver
);
3325 dsl_scan_need_resilver(spa_t
*spa
, const dva_t
*dva
, size_t psize
,
3326 uint64_t phys_birth
)
3330 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(dva
));
3332 if (vd
->vdev_ops
== &vdev_indirect_ops
) {
3334 * The indirect vdev can point to multiple
3335 * vdevs. For simplicity, always create
3336 * the resilver zio_t. zio_vdev_io_start()
3337 * will bypass the child resilver i/o's if
3338 * they are on vdevs that don't have DTL's.
3343 if (DVA_GET_GANG(dva
)) {
3345 * Gang members may be spread across multiple
3346 * vdevs, so the best estimate we have is the
3347 * scrub range, which has already been checked.
3348 * XXX -- it would be better to change our
3349 * allocation policy to ensure that all
3350 * gang members reside on the same vdev.
3356 * Check if the top-level vdev must resilver this offset.
3357 * When the offset does not intersect with a dirty leaf DTL
3358 * then it may be possible to skip the resilver IO. The psize
3359 * is provided instead of asize to simplify the check for RAIDZ.
3361 if (!vdev_dtl_need_resilver(vd
, dva
, psize
, phys_birth
))
3365 * Check that this top-level vdev has a device under it which
3366 * is resilvering and is not deferred.
3368 if (!dsl_scan_check_deferred(vd
))
3375 dsl_process_async_destroys(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
3377 dsl_scan_t
*scn
= dp
->dp_scan
;
3378 spa_t
*spa
= dp
->dp_spa
;
3381 if (spa_suspend_async_destroy(spa
))
3384 if (zfs_free_bpobj_enabled
&&
3385 spa_version(spa
) >= SPA_VERSION_DEADLISTS
) {
3386 scn
->scn_is_bptree
= B_FALSE
;
3387 scn
->scn_async_block_min_time_ms
= zfs_free_min_time_ms
;
3388 scn
->scn_zio_root
= zio_root(spa
, NULL
,
3389 NULL
, ZIO_FLAG_MUSTSUCCEED
);
3390 err
= bpobj_iterate(&dp
->dp_free_bpobj
,
3391 bpobj_dsl_scan_free_block_cb
, scn
, tx
);
3392 VERIFY0(zio_wait(scn
->scn_zio_root
));
3393 scn
->scn_zio_root
= NULL
;
3395 if (err
!= 0 && err
!= ERESTART
)
3396 zfs_panic_recover("error %u from bpobj_iterate()", err
);
3399 if (err
== 0 && spa_feature_is_active(spa
, SPA_FEATURE_ASYNC_DESTROY
)) {
3400 ASSERT(scn
->scn_async_destroying
);
3401 scn
->scn_is_bptree
= B_TRUE
;
3402 scn
->scn_zio_root
= zio_root(spa
, NULL
,
3403 NULL
, ZIO_FLAG_MUSTSUCCEED
);
3404 err
= bptree_iterate(dp
->dp_meta_objset
,
3405 dp
->dp_bptree_obj
, B_TRUE
, dsl_scan_free_block_cb
, scn
, tx
);
3406 VERIFY0(zio_wait(scn
->scn_zio_root
));
3407 scn
->scn_zio_root
= NULL
;
3409 if (err
== EIO
|| err
== ECKSUM
) {
3411 } else if (err
!= 0 && err
!= ERESTART
) {
3412 zfs_panic_recover("error %u from "
3413 "traverse_dataset_destroyed()", err
);
3416 if (bptree_is_empty(dp
->dp_meta_objset
, dp
->dp_bptree_obj
)) {
3417 /* finished; deactivate async destroy feature */
3418 spa_feature_decr(spa
, SPA_FEATURE_ASYNC_DESTROY
, tx
);
3419 ASSERT(!spa_feature_is_active(spa
,
3420 SPA_FEATURE_ASYNC_DESTROY
));
3421 VERIFY0(zap_remove(dp
->dp_meta_objset
,
3422 DMU_POOL_DIRECTORY_OBJECT
,
3423 DMU_POOL_BPTREE_OBJ
, tx
));
3424 VERIFY0(bptree_free(dp
->dp_meta_objset
,
3425 dp
->dp_bptree_obj
, tx
));
3426 dp
->dp_bptree_obj
= 0;
3427 scn
->scn_async_destroying
= B_FALSE
;
3428 scn
->scn_async_stalled
= B_FALSE
;
3431 * If we didn't make progress, mark the async
3432 * destroy as stalled, so that we will not initiate
3433 * a spa_sync() on its behalf. Note that we only
3434 * check this if we are not finished, because if the
3435 * bptree had no blocks for us to visit, we can
3436 * finish without "making progress".
3438 scn
->scn_async_stalled
=
3439 (scn
->scn_visited_this_txg
== 0);
3442 if (scn
->scn_visited_this_txg
) {
3443 zfs_dbgmsg("freed %llu blocks in %llums from "
3444 "free_bpobj/bptree on %s in txg %llu; err=%u",
3445 (longlong_t
)scn
->scn_visited_this_txg
,
3447 NSEC2MSEC(gethrtime() - scn
->scn_sync_start_time
),
3448 spa
->spa_name
, (longlong_t
)tx
->tx_txg
, err
);
3449 scn
->scn_visited_this_txg
= 0;
3450 scn
->scn_dedup_frees_this_txg
= 0;
3453 * Write out changes to the DDT that may be required as a
3454 * result of the blocks freed. This ensures that the DDT
3455 * is clean when a scrub/resilver runs.
3457 ddt_sync(spa
, tx
->tx_txg
);
3461 if (dp
->dp_free_dir
!= NULL
&& !scn
->scn_async_destroying
&&
3462 zfs_free_leak_on_eio
&&
3463 (dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
!= 0 ||
3464 dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
!= 0 ||
3465 dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
!= 0)) {
3467 * We have finished background destroying, but there is still
3468 * some space left in the dp_free_dir. Transfer this leaked
3469 * space to the dp_leak_dir.
3471 if (dp
->dp_leak_dir
== NULL
) {
3472 rrw_enter(&dp
->dp_config_rwlock
, RW_WRITER
, FTAG
);
3473 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
,
3475 VERIFY0(dsl_pool_open_special_dir(dp
,
3476 LEAK_DIR_NAME
, &dp
->dp_leak_dir
));
3477 rrw_exit(&dp
->dp_config_rwlock
, FTAG
);
3479 dsl_dir_diduse_space(dp
->dp_leak_dir
, DD_USED_HEAD
,
3480 dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
,
3481 dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
,
3482 dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
, tx
);
3483 dsl_dir_diduse_space(dp
->dp_free_dir
, DD_USED_HEAD
,
3484 -dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
,
3485 -dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
,
3486 -dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
, tx
);
3489 if (dp
->dp_free_dir
!= NULL
&& !scn
->scn_async_destroying
&&
3490 !spa_livelist_delete_check(spa
)) {
3491 /* finished; verify that space accounting went to zero */
3492 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_used_bytes
);
3493 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_compressed_bytes
);
3494 ASSERT0(dsl_dir_phys(dp
->dp_free_dir
)->dd_uncompressed_bytes
);
3497 spa_notify_waiters(spa
);
3499 EQUIV(bpobj_is_open(&dp
->dp_obsolete_bpobj
),
3500 0 == zap_contains(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
3501 DMU_POOL_OBSOLETE_BPOBJ
));
3502 if (err
== 0 && bpobj_is_open(&dp
->dp_obsolete_bpobj
)) {
3503 ASSERT(spa_feature_is_active(dp
->dp_spa
,
3504 SPA_FEATURE_OBSOLETE_COUNTS
));
3506 scn
->scn_is_bptree
= B_FALSE
;
3507 scn
->scn_async_block_min_time_ms
= zfs_obsolete_min_time_ms
;
3508 err
= bpobj_iterate(&dp
->dp_obsolete_bpobj
,
3509 dsl_scan_obsolete_block_cb
, scn
, tx
);
3510 if (err
!= 0 && err
!= ERESTART
)
3511 zfs_panic_recover("error %u from bpobj_iterate()", err
);
3513 if (bpobj_is_empty(&dp
->dp_obsolete_bpobj
))
3514 dsl_pool_destroy_obsolete_bpobj(dp
, tx
);
3520 * This is the primary entry point for scans that is called from syncing
3521 * context. Scans must happen entirely during syncing context so that we
3522 * can guarantee that blocks we are currently scanning will not change out
3523 * from under us. While a scan is active, this function controls how quickly
3524 * transaction groups proceed, instead of the normal handling provided by
3525 * txg_sync_thread().
3528 dsl_scan_sync(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
3531 dsl_scan_t
*scn
= dp
->dp_scan
;
3532 spa_t
*spa
= dp
->dp_spa
;
3533 state_sync_type_t sync_type
= SYNC_OPTIONAL
;
3535 if (spa
->spa_resilver_deferred
&&
3536 !spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_RESILVER_DEFER
))
3537 spa_feature_incr(spa
, SPA_FEATURE_RESILVER_DEFER
, tx
);
3540 * Check for scn_restart_txg before checking spa_load_state, so
3541 * that we can restart an old-style scan while the pool is being
3542 * imported (see dsl_scan_init). We also restart scans if there
3543 * is a deferred resilver and the user has manually disabled
3544 * deferred resilvers via the tunable.
3546 if (dsl_scan_restarting(scn
, tx
) ||
3547 (spa
->spa_resilver_deferred
&& zfs_resilver_disable_defer
)) {
3548 pool_scan_func_t func
= POOL_SCAN_SCRUB
;
3549 dsl_scan_done(scn
, B_FALSE
, tx
);
3550 if (vdev_resilver_needed(spa
->spa_root_vdev
, NULL
, NULL
))
3551 func
= POOL_SCAN_RESILVER
;
3552 zfs_dbgmsg("restarting scan func=%u on %s txg=%llu",
3553 func
, dp
->dp_spa
->spa_name
, (longlong_t
)tx
->tx_txg
);
3554 dsl_scan_setup_sync(&func
, tx
);
3558 * Only process scans in sync pass 1.
3560 if (spa_sync_pass(spa
) > 1)
3564 * If the spa is shutting down, then stop scanning. This will
3565 * ensure that the scan does not dirty any new data during the
3568 if (spa_shutting_down(spa
))
3572 * If the scan is inactive due to a stalled async destroy, try again.
3574 if (!scn
->scn_async_stalled
&& !dsl_scan_active(scn
))
3577 /* reset scan statistics */
3578 scn
->scn_visited_this_txg
= 0;
3579 scn
->scn_dedup_frees_this_txg
= 0;
3580 scn
->scn_holes_this_txg
= 0;
3581 scn
->scn_lt_min_this_txg
= 0;
3582 scn
->scn_gt_max_this_txg
= 0;
3583 scn
->scn_ddt_contained_this_txg
= 0;
3584 scn
->scn_objsets_visited_this_txg
= 0;
3585 scn
->scn_avg_seg_size_this_txg
= 0;
3586 scn
->scn_segs_this_txg
= 0;
3587 scn
->scn_avg_zio_size_this_txg
= 0;
3588 scn
->scn_zios_this_txg
= 0;
3589 scn
->scn_suspending
= B_FALSE
;
3590 scn
->scn_sync_start_time
= gethrtime();
3591 spa
->spa_scrub_active
= B_TRUE
;
3594 * First process the async destroys. If we suspend, don't do
3595 * any scrubbing or resilvering. This ensures that there are no
3596 * async destroys while we are scanning, so the scan code doesn't
3597 * have to worry about traversing it. It is also faster to free the
3598 * blocks than to scrub them.
3600 err
= dsl_process_async_destroys(dp
, tx
);
3604 if (!dsl_scan_is_running(scn
) || dsl_scan_is_paused_scrub(scn
))
3608 * Wait a few txgs after importing to begin scanning so that
3609 * we can get the pool imported quickly.
3611 if (spa
->spa_syncing_txg
< spa
->spa_first_txg
+ SCAN_IMPORT_WAIT_TXGS
)
3615 * zfs_scan_suspend_progress can be set to disable scan progress.
3616 * We don't want to spin the txg_sync thread, so we add a delay
3617 * here to simulate the time spent doing a scan. This is mostly
3618 * useful for testing and debugging.
3620 if (zfs_scan_suspend_progress
) {
3621 uint64_t scan_time_ns
= gethrtime() - scn
->scn_sync_start_time
;
3622 int mintime
= (scn
->scn_phys
.scn_func
== POOL_SCAN_RESILVER
) ?
3623 zfs_resilver_min_time_ms
: zfs_scrub_min_time_ms
;
3625 while (zfs_scan_suspend_progress
&&
3626 !txg_sync_waiting(scn
->scn_dp
) &&
3627 !spa_shutting_down(scn
->scn_dp
->dp_spa
) &&
3628 NSEC2MSEC(scan_time_ns
) < mintime
) {
3630 scan_time_ns
= gethrtime() - scn
->scn_sync_start_time
;
3636 * It is possible to switch from unsorted to sorted at any time,
3637 * but afterwards the scan will remain sorted unless reloaded from
3638 * a checkpoint after a reboot.
3640 if (!zfs_scan_legacy
) {
3641 scn
->scn_is_sorted
= B_TRUE
;
3642 if (scn
->scn_last_checkpoint
== 0)
3643 scn
->scn_last_checkpoint
= ddi_get_lbolt();
3647 * For sorted scans, determine what kind of work we will be doing
3648 * this txg based on our memory limitations and whether or not we
3649 * need to perform a checkpoint.
3651 if (scn
->scn_is_sorted
) {
3653 * If we are over our checkpoint interval, set scn_clearing
3654 * so that we can begin checkpointing immediately. The
3655 * checkpoint allows us to save a consistent bookmark
3656 * representing how much data we have scrubbed so far.
3657 * Otherwise, use the memory limit to determine if we should
3658 * scan for metadata or start issue scrub IOs. We accumulate
3659 * metadata until we hit our hard memory limit at which point
3660 * we issue scrub IOs until we are at our soft memory limit.
3662 if (scn
->scn_checkpointing
||
3663 ddi_get_lbolt() - scn
->scn_last_checkpoint
>
3664 SEC_TO_TICK(zfs_scan_checkpoint_intval
)) {
3665 if (!scn
->scn_checkpointing
)
3666 zfs_dbgmsg("begin scan checkpoint for %s",
3669 scn
->scn_checkpointing
= B_TRUE
;
3670 scn
->scn_clearing
= B_TRUE
;
3672 boolean_t should_clear
= dsl_scan_should_clear(scn
);
3673 if (should_clear
&& !scn
->scn_clearing
) {
3674 zfs_dbgmsg("begin scan clearing for %s",
3676 scn
->scn_clearing
= B_TRUE
;
3677 } else if (!should_clear
&& scn
->scn_clearing
) {
3678 zfs_dbgmsg("finish scan clearing for %s",
3680 scn
->scn_clearing
= B_FALSE
;
3684 ASSERT0(scn
->scn_checkpointing
);
3685 ASSERT0(scn
->scn_clearing
);
3688 if (!scn
->scn_clearing
&& scn
->scn_done_txg
== 0) {
3689 /* Need to scan metadata for more blocks to scrub */
3690 dsl_scan_phys_t
*scnp
= &scn
->scn_phys
;
3691 taskqid_t prefetch_tqid
;
3694 * Recalculate the max number of in-flight bytes for pool-wide
3695 * scanning operations (minimum 1MB). Limits for the issuing
3696 * phase are done per top-level vdev and are handled separately.
3698 scn
->scn_maxinflight_bytes
= MAX(zfs_scan_vdev_limit
*
3699 dsl_scan_count_data_disks(spa
->spa_root_vdev
), 1ULL << 20);
3701 if (scnp
->scn_ddt_bookmark
.ddb_class
<=
3702 scnp
->scn_ddt_class_max
) {
3703 ASSERT(ZB_IS_ZERO(&scnp
->scn_bookmark
));
3704 zfs_dbgmsg("doing scan sync for %s txg %llu; "
3705 "ddt bm=%llu/%llu/%llu/%llx",
3707 (longlong_t
)tx
->tx_txg
,
3708 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_class
,
3709 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_type
,
3710 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_checksum
,
3711 (longlong_t
)scnp
->scn_ddt_bookmark
.ddb_cursor
);
3713 zfs_dbgmsg("doing scan sync for %s txg %llu; "
3714 "bm=%llu/%llu/%llu/%llu",
3716 (longlong_t
)tx
->tx_txg
,
3717 (longlong_t
)scnp
->scn_bookmark
.zb_objset
,
3718 (longlong_t
)scnp
->scn_bookmark
.zb_object
,
3719 (longlong_t
)scnp
->scn_bookmark
.zb_level
,
3720 (longlong_t
)scnp
->scn_bookmark
.zb_blkid
);
3723 scn
->scn_zio_root
= zio_root(dp
->dp_spa
, NULL
,
3724 NULL
, ZIO_FLAG_CANFAIL
);
3726 scn
->scn_prefetch_stop
= B_FALSE
;
3727 prefetch_tqid
= taskq_dispatch(dp
->dp_sync_taskq
,
3728 dsl_scan_prefetch_thread
, scn
, TQ_SLEEP
);
3729 ASSERT(prefetch_tqid
!= TASKQID_INVALID
);
3731 dsl_pool_config_enter(dp
, FTAG
);
3732 dsl_scan_visit(scn
, tx
);
3733 dsl_pool_config_exit(dp
, FTAG
);
3735 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
3736 scn
->scn_prefetch_stop
= B_TRUE
;
3737 cv_broadcast(&spa
->spa_scrub_io_cv
);
3738 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
3740 taskq_wait_id(dp
->dp_sync_taskq
, prefetch_tqid
);
3741 (void) zio_wait(scn
->scn_zio_root
);
3742 scn
->scn_zio_root
= NULL
;
3744 zfs_dbgmsg("scan visited %llu blocks of %s in %llums "
3745 "(%llu os's, %llu holes, %llu < mintxg, "
3746 "%llu in ddt, %llu > maxtxg)",
3747 (longlong_t
)scn
->scn_visited_this_txg
,
3749 (longlong_t
)NSEC2MSEC(gethrtime() -
3750 scn
->scn_sync_start_time
),
3751 (longlong_t
)scn
->scn_objsets_visited_this_txg
,
3752 (longlong_t
)scn
->scn_holes_this_txg
,
3753 (longlong_t
)scn
->scn_lt_min_this_txg
,
3754 (longlong_t
)scn
->scn_ddt_contained_this_txg
,
3755 (longlong_t
)scn
->scn_gt_max_this_txg
);
3757 if (!scn
->scn_suspending
) {
3758 ASSERT0(avl_numnodes(&scn
->scn_queue
));
3759 scn
->scn_done_txg
= tx
->tx_txg
+ 1;
3760 if (scn
->scn_is_sorted
) {
3761 scn
->scn_checkpointing
= B_TRUE
;
3762 scn
->scn_clearing
= B_TRUE
;
3764 zfs_dbgmsg("scan complete for %s txg %llu",
3766 (longlong_t
)tx
->tx_txg
);
3768 } else if (scn
->scn_is_sorted
&& scn
->scn_bytes_pending
!= 0) {
3769 ASSERT(scn
->scn_clearing
);
3771 /* need to issue scrubbing IOs from per-vdev queues */
3772 scn
->scn_zio_root
= zio_root(dp
->dp_spa
, NULL
,
3773 NULL
, ZIO_FLAG_CANFAIL
);
3774 scan_io_queues_run(scn
);
3775 (void) zio_wait(scn
->scn_zio_root
);
3776 scn
->scn_zio_root
= NULL
;
3778 /* calculate and dprintf the current memory usage */
3779 (void) dsl_scan_should_clear(scn
);
3780 dsl_scan_update_stats(scn
);
3782 zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) "
3783 "in %llums (avg_block_size = %llu, avg_seg_size = %llu)",
3784 (longlong_t
)scn
->scn_zios_this_txg
,
3786 (longlong_t
)scn
->scn_segs_this_txg
,
3787 (longlong_t
)NSEC2MSEC(gethrtime() -
3788 scn
->scn_sync_start_time
),
3789 (longlong_t
)scn
->scn_avg_zio_size_this_txg
,
3790 (longlong_t
)scn
->scn_avg_seg_size_this_txg
);
3791 } else if (scn
->scn_done_txg
!= 0 && scn
->scn_done_txg
<= tx
->tx_txg
) {
3792 /* Finished with everything. Mark the scrub as complete */
3793 zfs_dbgmsg("scan issuing complete txg %llu for %s",
3794 (longlong_t
)tx
->tx_txg
,
3796 ASSERT3U(scn
->scn_done_txg
, !=, 0);
3797 ASSERT0(spa
->spa_scrub_inflight
);
3798 ASSERT0(scn
->scn_bytes_pending
);
3799 dsl_scan_done(scn
, B_TRUE
, tx
);
3800 sync_type
= SYNC_MANDATORY
;
3803 dsl_scan_sync_state(scn
, tx
, sync_type
);
3807 count_block(dsl_scan_t
*scn
, zfs_all_blkstats_t
*zab
, const blkptr_t
*bp
)
3812 * Don't count embedded bp's, since we already did the work of
3813 * scanning these when we scanned the containing block.
3815 if (BP_IS_EMBEDDED(bp
))
3819 * Update the spa's stats on how many bytes we have issued.
3820 * Sequential scrubs create a zio for each DVA of the bp. Each
3821 * of these will include all DVAs for repair purposes, but the
3822 * zio code will only try the first one unless there is an issue.
3823 * Therefore, we should only count the first DVA for these IOs.
3825 if (scn
->scn_is_sorted
) {
3826 atomic_add_64(&scn
->scn_dp
->dp_spa
->spa_scan_pass_issued
,
3827 DVA_GET_ASIZE(&bp
->blk_dva
[0]));
3829 spa_t
*spa
= scn
->scn_dp
->dp_spa
;
3831 for (i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
3832 atomic_add_64(&spa
->spa_scan_pass_issued
,
3833 DVA_GET_ASIZE(&bp
->blk_dva
[i
]));
3838 * If we resume after a reboot, zab will be NULL; don't record
3839 * incomplete stats in that case.
3844 mutex_enter(&zab
->zab_lock
);
3846 for (i
= 0; i
< 4; i
++) {
3847 int l
= (i
< 2) ? BP_GET_LEVEL(bp
) : DN_MAX_LEVELS
;
3848 int t
= (i
& 1) ? BP_GET_TYPE(bp
) : DMU_OT_TOTAL
;
3850 if (t
& DMU_OT_NEWTYPE
)
3852 zfs_blkstat_t
*zb
= &zab
->zab_type
[l
][t
];
3856 zb
->zb_asize
+= BP_GET_ASIZE(bp
);
3857 zb
->zb_lsize
+= BP_GET_LSIZE(bp
);
3858 zb
->zb_psize
+= BP_GET_PSIZE(bp
);
3859 zb
->zb_gangs
+= BP_COUNT_GANG(bp
);
3861 switch (BP_GET_NDVAS(bp
)) {
3863 if (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
3864 DVA_GET_VDEV(&bp
->blk_dva
[1]))
3865 zb
->zb_ditto_2_of_2_samevdev
++;
3868 equal
= (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
3869 DVA_GET_VDEV(&bp
->blk_dva
[1])) +
3870 (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
3871 DVA_GET_VDEV(&bp
->blk_dva
[2])) +
3872 (DVA_GET_VDEV(&bp
->blk_dva
[1]) ==
3873 DVA_GET_VDEV(&bp
->blk_dva
[2]));
3875 zb
->zb_ditto_2_of_3_samevdev
++;
3876 else if (equal
== 3)
3877 zb
->zb_ditto_3_of_3_samevdev
++;
3882 mutex_exit(&zab
->zab_lock
);
3886 scan_io_queue_insert_impl(dsl_scan_io_queue_t
*queue
, scan_io_t
*sio
)
3889 int64_t asize
= SIO_GET_ASIZE(sio
);
3890 dsl_scan_t
*scn
= queue
->q_scn
;
3892 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
3894 if (avl_find(&queue
->q_sios_by_addr
, sio
, &idx
) != NULL
) {
3895 /* block is already scheduled for reading */
3896 atomic_add_64(&scn
->scn_bytes_pending
, -asize
);
3900 avl_insert(&queue
->q_sios_by_addr
, sio
, idx
);
3901 queue
->q_sio_memused
+= SIO_GET_MUSED(sio
);
3902 range_tree_add(queue
->q_exts_by_addr
, SIO_GET_OFFSET(sio
), asize
);
3906 * Given all the info we got from our metadata scanning process, we
3907 * construct a scan_io_t and insert it into the scan sorting queue. The
3908 * I/O must already be suitable for us to process. This is controlled
3909 * by dsl_scan_enqueue().
3912 scan_io_queue_insert(dsl_scan_io_queue_t
*queue
, const blkptr_t
*bp
, int dva_i
,
3913 int zio_flags
, const zbookmark_phys_t
*zb
)
3915 dsl_scan_t
*scn
= queue
->q_scn
;
3916 scan_io_t
*sio
= sio_alloc(BP_GET_NDVAS(bp
));
3918 ASSERT0(BP_IS_GANG(bp
));
3919 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
3921 bp2sio(bp
, sio
, dva_i
);
3922 sio
->sio_flags
= zio_flags
;
3926 * Increment the bytes pending counter now so that we can't
3927 * get an integer underflow in case the worker processes the
3928 * zio before we get to incrementing this counter.
3930 atomic_add_64(&scn
->scn_bytes_pending
, SIO_GET_ASIZE(sio
));
3932 scan_io_queue_insert_impl(queue
, sio
);
3936 * Given a set of I/O parameters as discovered by the metadata traversal
3937 * process, attempts to place the I/O into the sorted queues (if allowed),
3938 * or immediately executes the I/O.
3941 dsl_scan_enqueue(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
3942 const zbookmark_phys_t
*zb
)
3944 spa_t
*spa
= dp
->dp_spa
;
3946 ASSERT(!BP_IS_EMBEDDED(bp
));
3949 * Gang blocks are hard to issue sequentially, so we just issue them
3950 * here immediately instead of queuing them.
3952 if (!dp
->dp_scan
->scn_is_sorted
|| BP_IS_GANG(bp
)) {
3953 scan_exec_io(dp
, bp
, zio_flags
, zb
, NULL
);
3957 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
3961 dva
= bp
->blk_dva
[i
];
3962 vdev
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
));
3963 ASSERT(vdev
!= NULL
);
3965 mutex_enter(&vdev
->vdev_scan_io_queue_lock
);
3966 if (vdev
->vdev_scan_io_queue
== NULL
)
3967 vdev
->vdev_scan_io_queue
= scan_io_queue_create(vdev
);
3968 ASSERT(dp
->dp_scan
!= NULL
);
3969 scan_io_queue_insert(vdev
->vdev_scan_io_queue
, bp
,
3971 mutex_exit(&vdev
->vdev_scan_io_queue_lock
);
3976 dsl_scan_scrub_cb(dsl_pool_t
*dp
,
3977 const blkptr_t
*bp
, const zbookmark_phys_t
*zb
)
3979 dsl_scan_t
*scn
= dp
->dp_scan
;
3980 spa_t
*spa
= dp
->dp_spa
;
3981 uint64_t phys_birth
= BP_PHYSICAL_BIRTH(bp
);
3982 size_t psize
= BP_GET_PSIZE(bp
);
3983 boolean_t needs_io
= B_FALSE
;
3984 int zio_flags
= ZIO_FLAG_SCAN_THREAD
| ZIO_FLAG_RAW
| ZIO_FLAG_CANFAIL
;
3987 if (phys_birth
<= scn
->scn_phys
.scn_min_txg
||
3988 phys_birth
>= scn
->scn_phys
.scn_max_txg
) {
3989 count_block(scn
, dp
->dp_blkstats
, bp
);
3993 /* Embedded BP's have phys_birth==0, so we reject them above. */
3994 ASSERT(!BP_IS_EMBEDDED(bp
));
3996 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn
));
3997 if (scn
->scn_phys
.scn_func
== POOL_SCAN_SCRUB
) {
3998 zio_flags
|= ZIO_FLAG_SCRUB
;
4001 ASSERT3U(scn
->scn_phys
.scn_func
, ==, POOL_SCAN_RESILVER
);
4002 zio_flags
|= ZIO_FLAG_RESILVER
;
4006 /* If it's an intent log block, failure is expected. */
4007 if (zb
->zb_level
== ZB_ZIL_LEVEL
)
4008 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
4010 for (int d
= 0; d
< BP_GET_NDVAS(bp
); d
++) {
4011 const dva_t
*dva
= &bp
->blk_dva
[d
];
4014 * Keep track of how much data we've examined so that
4015 * zpool(8) status can make useful progress reports.
4017 scn
->scn_phys
.scn_examined
+= DVA_GET_ASIZE(dva
);
4018 spa
->spa_scan_pass_exam
+= DVA_GET_ASIZE(dva
);
4020 /* if it's a resilver, this may not be in the target range */
4022 needs_io
= dsl_scan_need_resilver(spa
, dva
, psize
,
4026 if (needs_io
&& !zfs_no_scrub_io
) {
4027 dsl_scan_enqueue(dp
, bp
, zio_flags
, zb
);
4029 count_block(scn
, dp
->dp_blkstats
, bp
);
4032 /* do not relocate this block */
4037 dsl_scan_scrub_done(zio_t
*zio
)
4039 spa_t
*spa
= zio
->io_spa
;
4040 blkptr_t
*bp
= zio
->io_bp
;
4041 dsl_scan_io_queue_t
*queue
= zio
->io_private
;
4043 abd_free(zio
->io_abd
);
4045 if (queue
== NULL
) {
4046 mutex_enter(&spa
->spa_scrub_lock
);
4047 ASSERT3U(spa
->spa_scrub_inflight
, >=, BP_GET_PSIZE(bp
));
4048 spa
->spa_scrub_inflight
-= BP_GET_PSIZE(bp
);
4049 cv_broadcast(&spa
->spa_scrub_io_cv
);
4050 mutex_exit(&spa
->spa_scrub_lock
);
4052 mutex_enter(&queue
->q_vd
->vdev_scan_io_queue_lock
);
4053 ASSERT3U(queue
->q_inflight_bytes
, >=, BP_GET_PSIZE(bp
));
4054 queue
->q_inflight_bytes
-= BP_GET_PSIZE(bp
);
4055 cv_broadcast(&queue
->q_zio_cv
);
4056 mutex_exit(&queue
->q_vd
->vdev_scan_io_queue_lock
);
4059 if (zio
->io_error
&& (zio
->io_error
!= ECKSUM
||
4060 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
))) {
4061 atomic_inc_64(&spa
->spa_dsl_pool
->dp_scan
->scn_phys
.scn_errors
);
4066 * Given a scanning zio's information, executes the zio. The zio need
4067 * not necessarily be only sortable, this function simply executes the
4068 * zio, no matter what it is. The optional queue argument allows the
4069 * caller to specify that they want per top level vdev IO rate limiting
4070 * instead of the legacy global limiting.
4073 scan_exec_io(dsl_pool_t
*dp
, const blkptr_t
*bp
, int zio_flags
,
4074 const zbookmark_phys_t
*zb
, dsl_scan_io_queue_t
*queue
)
4076 spa_t
*spa
= dp
->dp_spa
;
4077 dsl_scan_t
*scn
= dp
->dp_scan
;
4078 size_t size
= BP_GET_PSIZE(bp
);
4079 abd_t
*data
= abd_alloc_for_io(size
, B_FALSE
);
4081 if (queue
== NULL
) {
4082 ASSERT3U(scn
->scn_maxinflight_bytes
, >, 0);
4083 mutex_enter(&spa
->spa_scrub_lock
);
4084 while (spa
->spa_scrub_inflight
>= scn
->scn_maxinflight_bytes
)
4085 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
4086 spa
->spa_scrub_inflight
+= BP_GET_PSIZE(bp
);
4087 mutex_exit(&spa
->spa_scrub_lock
);
4089 kmutex_t
*q_lock
= &queue
->q_vd
->vdev_scan_io_queue_lock
;
4091 ASSERT3U(queue
->q_maxinflight_bytes
, >, 0);
4092 mutex_enter(q_lock
);
4093 while (queue
->q_inflight_bytes
>= queue
->q_maxinflight_bytes
)
4094 cv_wait(&queue
->q_zio_cv
, q_lock
);
4095 queue
->q_inflight_bytes
+= BP_GET_PSIZE(bp
);
4099 count_block(scn
, dp
->dp_blkstats
, bp
);
4100 zio_nowait(zio_read(scn
->scn_zio_root
, spa
, bp
, data
, size
,
4101 dsl_scan_scrub_done
, queue
, ZIO_PRIORITY_SCRUB
, zio_flags
, zb
));
4105 * This is the primary extent sorting algorithm. We balance two parameters:
4106 * 1) how many bytes of I/O are in an extent
4107 * 2) how well the extent is filled with I/O (as a fraction of its total size)
4108 * Since we allow extents to have gaps between their constituent I/Os, it's
4109 * possible to have a fairly large extent that contains the same amount of
4110 * I/O bytes than a much smaller extent, which just packs the I/O more tightly.
4111 * The algorithm sorts based on a score calculated from the extent's size,
4112 * the relative fill volume (in %) and a "fill weight" parameter that controls
4113 * the split between whether we prefer larger extents or more well populated
4116 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
4119 * 1) assume extsz = 64 MiB
4120 * 2) assume fill = 32 MiB (extent is half full)
4121 * 3) assume fill_weight = 3
4122 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
4123 * SCORE = 32M + (50 * 3 * 32M) / 100
4124 * SCORE = 32M + (4800M / 100)
4127 * | +--- final total relative fill-based score
4128 * +--------- final total fill-based score
4131 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
4132 * extents that are more completely filled (in a 3:2 ratio) vs just larger.
4133 * Note that as an optimization, we replace multiplication and division by
4134 * 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
4137 ext_size_compare(const void *x
, const void *y
)
4139 const range_seg_gap_t
*rsa
= x
, *rsb
= y
;
4141 uint64_t sa
= rsa
->rs_end
- rsa
->rs_start
;
4142 uint64_t sb
= rsb
->rs_end
- rsb
->rs_start
;
4143 uint64_t score_a
, score_b
;
4145 score_a
= rsa
->rs_fill
+ ((((rsa
->rs_fill
<< 7) / sa
) *
4146 fill_weight
* rsa
->rs_fill
) >> 7);
4147 score_b
= rsb
->rs_fill
+ ((((rsb
->rs_fill
<< 7) / sb
) *
4148 fill_weight
* rsb
->rs_fill
) >> 7);
4150 if (score_a
> score_b
)
4152 if (score_a
== score_b
) {
4153 if (rsa
->rs_start
< rsb
->rs_start
)
4155 if (rsa
->rs_start
== rsb
->rs_start
)
4163 * Comparator for the q_sios_by_addr tree. Sorting is simply performed
4164 * based on LBA-order (from lowest to highest).
4167 sio_addr_compare(const void *x
, const void *y
)
4169 const scan_io_t
*a
= x
, *b
= y
;
4171 return (TREE_CMP(SIO_GET_OFFSET(a
), SIO_GET_OFFSET(b
)));
4174 /* IO queues are created on demand when they are needed. */
4175 static dsl_scan_io_queue_t
*
4176 scan_io_queue_create(vdev_t
*vd
)
4178 dsl_scan_t
*scn
= vd
->vdev_spa
->spa_dsl_pool
->dp_scan
;
4179 dsl_scan_io_queue_t
*q
= kmem_zalloc(sizeof (*q
), KM_SLEEP
);
4183 q
->q_sio_memused
= 0;
4184 cv_init(&q
->q_zio_cv
, NULL
, CV_DEFAULT
, NULL
);
4185 q
->q_exts_by_addr
= range_tree_create_impl(&rt_btree_ops
, RANGE_SEG_GAP
,
4186 &q
->q_exts_by_size
, 0, 0, ext_size_compare
, zfs_scan_max_ext_gap
);
4187 avl_create(&q
->q_sios_by_addr
, sio_addr_compare
,
4188 sizeof (scan_io_t
), offsetof(scan_io_t
, sio_nodes
.sio_addr_node
));
4194 * Destroys a scan queue and all segments and scan_io_t's contained in it.
4195 * No further execution of I/O occurs, anything pending in the queue is
4196 * simply freed without being executed.
4199 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t
*queue
)
4201 dsl_scan_t
*scn
= queue
->q_scn
;
4203 void *cookie
= NULL
;
4204 int64_t bytes_dequeued
= 0;
4206 ASSERT(MUTEX_HELD(&queue
->q_vd
->vdev_scan_io_queue_lock
));
4208 while ((sio
= avl_destroy_nodes(&queue
->q_sios_by_addr
, &cookie
)) !=
4210 ASSERT(range_tree_contains(queue
->q_exts_by_addr
,
4211 SIO_GET_OFFSET(sio
), SIO_GET_ASIZE(sio
)));
4212 bytes_dequeued
+= SIO_GET_ASIZE(sio
);
4213 queue
->q_sio_memused
-= SIO_GET_MUSED(sio
);
4217 ASSERT0(queue
->q_sio_memused
);
4218 atomic_add_64(&scn
->scn_bytes_pending
, -bytes_dequeued
);
4219 range_tree_vacate(queue
->q_exts_by_addr
, NULL
, queue
);
4220 range_tree_destroy(queue
->q_exts_by_addr
);
4221 avl_destroy(&queue
->q_sios_by_addr
);
4222 cv_destroy(&queue
->q_zio_cv
);
4224 kmem_free(queue
, sizeof (*queue
));
4228 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
4229 * called on behalf of vdev_top_transfer when creating or destroying
4230 * a mirror vdev due to zpool attach/detach.
4233 dsl_scan_io_queue_vdev_xfer(vdev_t
*svd
, vdev_t
*tvd
)
4235 mutex_enter(&svd
->vdev_scan_io_queue_lock
);
4236 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
4238 VERIFY3P(tvd
->vdev_scan_io_queue
, ==, NULL
);
4239 tvd
->vdev_scan_io_queue
= svd
->vdev_scan_io_queue
;
4240 svd
->vdev_scan_io_queue
= NULL
;
4241 if (tvd
->vdev_scan_io_queue
!= NULL
)
4242 tvd
->vdev_scan_io_queue
->q_vd
= tvd
;
4244 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
4245 mutex_exit(&svd
->vdev_scan_io_queue_lock
);
4249 scan_io_queues_destroy(dsl_scan_t
*scn
)
4251 vdev_t
*rvd
= scn
->scn_dp
->dp_spa
->spa_root_vdev
;
4253 for (uint64_t i
= 0; i
< rvd
->vdev_children
; i
++) {
4254 vdev_t
*tvd
= rvd
->vdev_child
[i
];
4256 mutex_enter(&tvd
->vdev_scan_io_queue_lock
);
4257 if (tvd
->vdev_scan_io_queue
!= NULL
)
4258 dsl_scan_io_queue_destroy(tvd
->vdev_scan_io_queue
);
4259 tvd
->vdev_scan_io_queue
= NULL
;
4260 mutex_exit(&tvd
->vdev_scan_io_queue_lock
);
4265 dsl_scan_freed_dva(spa_t
*spa
, const blkptr_t
*bp
, int dva_i
)
4267 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
4268 dsl_scan_t
*scn
= dp
->dp_scan
;
4271 dsl_scan_io_queue_t
*queue
;
4272 scan_io_t
*srch_sio
, *sio
;
4274 uint64_t start
, size
;
4276 vdev
= vdev_lookup_top(spa
, DVA_GET_VDEV(&bp
->blk_dva
[dva_i
]));
4277 ASSERT(vdev
!= NULL
);
4278 q_lock
= &vdev
->vdev_scan_io_queue_lock
;
4279 queue
= vdev
->vdev_scan_io_queue
;
4281 mutex_enter(q_lock
);
4282 if (queue
== NULL
) {
4287 srch_sio
= sio_alloc(BP_GET_NDVAS(bp
));
4288 bp2sio(bp
, srch_sio
, dva_i
);
4289 start
= SIO_GET_OFFSET(srch_sio
);
4290 size
= SIO_GET_ASIZE(srch_sio
);
4293 * We can find the zio in two states:
4294 * 1) Cold, just sitting in the queue of zio's to be issued at
4295 * some point in the future. In this case, all we do is
4296 * remove the zio from the q_sios_by_addr tree, decrement
4297 * its data volume from the containing range_seg_t and
4298 * resort the q_exts_by_size tree to reflect that the
4299 * range_seg_t has lost some of its 'fill'. We don't shorten
4300 * the range_seg_t - this is usually rare enough not to be
4301 * worth the extra hassle of trying keep track of precise
4302 * extent boundaries.
4303 * 2) Hot, where the zio is currently in-flight in
4304 * dsl_scan_issue_ios. In this case, we can't simply
4305 * reach in and stop the in-flight zio's, so we instead
4306 * block the caller. Eventually, dsl_scan_issue_ios will
4307 * be done with issuing the zio's it gathered and will
4310 sio
= avl_find(&queue
->q_sios_by_addr
, srch_sio
, &idx
);
4314 int64_t asize
= SIO_GET_ASIZE(sio
);
4317 /* Got it while it was cold in the queue */
4318 ASSERT3U(start
, ==, SIO_GET_OFFSET(sio
));
4319 ASSERT3U(size
, ==, asize
);
4320 avl_remove(&queue
->q_sios_by_addr
, sio
);
4321 queue
->q_sio_memused
-= SIO_GET_MUSED(sio
);
4323 ASSERT(range_tree_contains(queue
->q_exts_by_addr
, start
, size
));
4324 range_tree_remove_fill(queue
->q_exts_by_addr
, start
, size
);
4327 * We only update scn_bytes_pending in the cold path,
4328 * otherwise it will already have been accounted for as
4329 * part of the zio's execution.
4331 atomic_add_64(&scn
->scn_bytes_pending
, -asize
);
4333 /* count the block as though we issued it */
4334 sio2bp(sio
, &tmpbp
);
4335 count_block(scn
, dp
->dp_blkstats
, &tmpbp
);
4343 * Callback invoked when a zio_free() zio is executing. This needs to be
4344 * intercepted to prevent the zio from deallocating a particular portion
4345 * of disk space and it then getting reallocated and written to, while we
4346 * still have it queued up for processing.
4349 dsl_scan_freed(spa_t
*spa
, const blkptr_t
*bp
)
4351 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
4352 dsl_scan_t
*scn
= dp
->dp_scan
;
4354 ASSERT(!BP_IS_EMBEDDED(bp
));
4355 ASSERT(scn
!= NULL
);
4356 if (!dsl_scan_is_running(scn
))
4359 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++)
4360 dsl_scan_freed_dva(spa
, bp
, i
);
4364 * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
4365 * not started, start it. Otherwise, only restart if max txg in DTL range is
4366 * greater than the max txg in the current scan. If the DTL max is less than
4367 * the scan max, then the vdev has not missed any new data since the resilver
4368 * started, so a restart is not needed.
4371 dsl_scan_assess_vdev(dsl_pool_t
*dp
, vdev_t
*vd
)
4375 if (!vdev_resilver_needed(vd
, &min
, &max
))
4378 if (!dsl_scan_resilvering(dp
)) {
4379 spa_async_request(dp
->dp_spa
, SPA_ASYNC_RESILVER
);
4383 if (max
<= dp
->dp_scan
->scn_phys
.scn_max_txg
)
4386 /* restart is needed, check if it can be deferred */
4387 if (spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_RESILVER_DEFER
))
4388 vdev_defer_resilver(vd
);
4390 spa_async_request(dp
->dp_spa
, SPA_ASYNC_RESILVER
);
4393 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_vdev_limit
, ULONG
, ZMOD_RW
,
4394 "Max bytes in flight per leaf vdev for scrubs and resilvers");
4396 ZFS_MODULE_PARAM(zfs
, zfs_
, scrub_min_time_ms
, INT
, ZMOD_RW
,
4397 "Min millisecs to scrub per txg");
4399 ZFS_MODULE_PARAM(zfs
, zfs_
, obsolete_min_time_ms
, INT
, ZMOD_RW
,
4400 "Min millisecs to obsolete per txg");
4402 ZFS_MODULE_PARAM(zfs
, zfs_
, free_min_time_ms
, INT
, ZMOD_RW
,
4403 "Min millisecs to free per txg");
4405 ZFS_MODULE_PARAM(zfs
, zfs_
, resilver_min_time_ms
, INT
, ZMOD_RW
,
4406 "Min millisecs to resilver per txg");
4408 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_suspend_progress
, INT
, ZMOD_RW
,
4409 "Set to prevent scans from progressing");
4411 ZFS_MODULE_PARAM(zfs
, zfs_
, no_scrub_io
, INT
, ZMOD_RW
,
4412 "Set to disable scrub I/O");
4414 ZFS_MODULE_PARAM(zfs
, zfs_
, no_scrub_prefetch
, INT
, ZMOD_RW
,
4415 "Set to disable scrub prefetching");
4417 ZFS_MODULE_PARAM(zfs
, zfs_
, async_block_max_blocks
, ULONG
, ZMOD_RW
,
4418 "Max number of blocks freed in one txg");
4420 ZFS_MODULE_PARAM(zfs
, zfs_
, max_async_dedup_frees
, ULONG
, ZMOD_RW
,
4421 "Max number of dedup blocks freed in one txg");
4423 ZFS_MODULE_PARAM(zfs
, zfs_
, free_bpobj_enabled
, INT
, ZMOD_RW
,
4424 "Enable processing of the free_bpobj");
4426 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_mem_lim_fact
, INT
, ZMOD_RW
,
4427 "Fraction of RAM for scan hard limit");
4429 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_issue_strategy
, INT
, ZMOD_RW
,
4430 "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
4432 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_legacy
, INT
, ZMOD_RW
,
4433 "Scrub using legacy non-sequential method");
4435 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_checkpoint_intval
, INT
, ZMOD_RW
,
4436 "Scan progress on-disk checkpointing interval");
4438 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_max_ext_gap
, ULONG
, ZMOD_RW
,
4439 "Max gap in bytes between sequential scrub / resilver I/Os");
4441 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_mem_lim_soft_fact
, INT
, ZMOD_RW
,
4442 "Fraction of hard limit used as soft limit");
4444 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_strict_mem_lim
, INT
, ZMOD_RW
,
4445 "Tunable to attempt to reduce lock contention");
4447 ZFS_MODULE_PARAM(zfs
, zfs_
, scan_fill_weight
, INT
, ZMOD_RW
,
4448 "Tunable to adjust bias towards more filled segments during scans");
4450 ZFS_MODULE_PARAM(zfs
, zfs_
, resilver_disable_defer
, INT
, ZMOD_RW
,
4451 "Process all resilvers immediately");