]>
Commit | Line | Data |
---|---|---|
428870ff BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
a1d477c2 | 23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
7c9abfa7 | 24 | * Copyright 2016 Gary Mills |
0ea05c64 | 25 | * Copyright (c) 2017 Datto Inc. |
12fa0466 | 26 | * Copyright 2017 Joyent, Inc. |
428870ff BB |
27 | */ |
28 | ||
29 | #include <sys/dsl_scan.h> | |
30 | #include <sys/dsl_pool.h> | |
31 | #include <sys/dsl_dataset.h> | |
32 | #include <sys/dsl_prop.h> | |
33 | #include <sys/dsl_dir.h> | |
34 | #include <sys/dsl_synctask.h> | |
35 | #include <sys/dnode.h> | |
36 | #include <sys/dmu_tx.h> | |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/arc.h> | |
39 | #include <sys/zap.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/zfs_context.h> | |
42 | #include <sys/fs/zfs.h> | |
43 | #include <sys/zfs_znode.h> | |
44 | #include <sys/spa_impl.h> | |
45 | #include <sys/vdev_impl.h> | |
46 | #include <sys/zil_impl.h> | |
47 | #include <sys/zio_checksum.h> | |
48 | #include <sys/ddt.h> | |
49 | #include <sys/sa.h> | |
50 | #include <sys/sa_impl.h> | |
9ae529ec | 51 | #include <sys/zfeature.h> |
a6255b7f | 52 | #include <sys/abd.h> |
d4a72f23 | 53 | #include <sys/range_tree.h> |
428870ff BB |
54 | #ifdef _KERNEL |
55 | #include <sys/zfs_vfsops.h> | |
56 | #endif | |
57 | ||
d4a72f23 TC |
58 | /* |
59 | * Grand theory statement on scan queue sorting | |
60 | * | |
61 | * Scanning is implemented by recursively traversing all indirection levels | |
62 | * in an object and reading all blocks referenced from said objects. This | |
63 | * results in us approximately traversing the object from lowest logical | |
64 | * offset to the highest. For best performance, we would want the logical | |
65 | * blocks to be physically contiguous. However, this is frequently not the | |
66 | * case with pools given the allocation patterns of copy-on-write filesystems. | |
67 | * So instead, we put the I/Os into a reordering queue and issue them in a | |
68 | * way that will most benefit physical disks (LBA-order). | |
69 | * | |
70 | * Queue management: | |
71 | * | |
72 | * Ideally, we would want to scan all metadata and queue up all block I/O | |
73 | * prior to starting to issue it, because that allows us to do an optimal | |
74 | * sorting job. This can however consume large amounts of memory. Therefore | |
75 | * we continuously monitor the size of the queues and constrain them to 5% | |
76 | * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this | |
77 | * limit, we clear out a few of the largest extents at the head of the queues | |
78 | * to make room for more scanning. Hopefully, these extents will be fairly | |
79 | * large and contiguous, allowing us to approach sequential I/O throughput | |
80 | * even without a fully sorted tree. | |
81 | * | |
82 | * Metadata scanning takes place in dsl_scan_visit(), which is called from | |
83 | * dsl_scan_sync() every spa_sync(). If we have either fully scanned all | |
84 | * metadata on the pool, or we need to make room in memory because our | |
85 | * queues are too large, dsl_scan_visit() is postponed and | |
86 | * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies | |
87 | * that metadata scanning and queued I/O issuing are mutually exclusive. This | |
88 | * allows us to provide maximum sequential I/O throughput for the majority of | |
89 | * I/O's issued since sequential I/O performance is significantly negatively | |
90 | * impacted if it is interleaved with random I/O. | |
91 | * | |
92 | * Implementation Notes | |
93 | * | |
94 | * One side effect of the queued scanning algorithm is that the scanning code | |
95 | * needs to be notified whenever a block is freed. This is needed to allow | |
96 | * the scanning code to remove these I/Os from the issuing queue. Additionally, | |
97 | * we do not attempt to queue gang blocks to be issued sequentially since this | |
13a2ff27 | 98 | * is very hard to do and would have an extremely limited performance benefit. |
d4a72f23 TC |
99 | * Instead, we simply issue gang I/Os as soon as we find them using the legacy |
100 | * algorithm. | |
101 | * | |
102 | * Backwards compatibility | |
103 | * | |
104 | * This new algorithm is backwards compatible with the legacy on-disk data | |
105 | * structures (and therefore does not require a new feature flag). | |
106 | * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan | |
107 | * will stop scanning metadata (in logical order) and wait for all outstanding | |
108 | * sorted I/O to complete. Once this is done, we write out a checkpoint | |
109 | * bookmark, indicating that we have scanned everything logically before it. | |
110 | * If the pool is imported on a machine without the new sorting algorithm, | |
111 | * the scan simply resumes from the last checkpoint using the legacy algorithm. | |
112 | */ | |
113 | ||
5dbd68a3 MA |
114 | typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, |
115 | const zbookmark_phys_t *); | |
428870ff | 116 | |
428870ff | 117 | static scan_cb_t dsl_scan_scrub_cb; |
428870ff | 118 | |
d4a72f23 TC |
119 | static int scan_ds_queue_compare(const void *a, const void *b); |
120 | static int scan_prefetch_queue_compare(const void *a, const void *b); | |
121 | static void scan_ds_queue_clear(dsl_scan_t *scn); | |
d6496040 | 122 | static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn); |
d4a72f23 TC |
123 | static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, |
124 | uint64_t *txg); | |
125 | static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); | |
126 | static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); | |
127 | static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); | |
f90a30ad | 128 | static uint64_t dsl_scan_count_leaves(vdev_t *vd); |
d4a72f23 TC |
129 | |
130 | extern int zfs_vdev_async_write_active_min_dirty_percent; | |
131 | ||
132 | /* | |
133 | * By default zfs will check to ensure it is not over the hard memory | |
134 | * limit before each txg. If finer-grained control of this is needed | |
135 | * this value can be set to 1 to enable checking before scanning each | |
136 | * block. | |
137 | */ | |
138 | int zfs_scan_strict_mem_lim = B_FALSE; | |
139 | ||
140 | /* | |
141 | * Maximum number of parallelly executed bytes per leaf vdev. We attempt | |
142 | * to strike a balance here between keeping the vdev queues full of I/Os | |
143 | * at all times and not overflowing the queues to cause long latency, | |
144 | * which would cause long txg sync times. No matter what, we will not | |
145 | * overload the drives with I/O, since that is protected by | |
146 | * zfs_vdev_scrub_max_active. | |
147 | */ | |
148 | unsigned long zfs_scan_vdev_limit = 4 << 20; | |
149 | ||
150 | int zfs_scan_issue_strategy = 0; | |
151 | int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ | |
63f88c12 | 152 | unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ |
d4a72f23 TC |
153 | |
154 | /* | |
155 | * fill_weight is non-tunable at runtime, so we copy it at module init from | |
156 | * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would | |
157 | * break queue sorting. | |
158 | */ | |
159 | int zfs_scan_fill_weight = 3; | |
160 | static uint64_t fill_weight; | |
161 | ||
162 | /* See dsl_scan_should_clear() for details on the memory limit tunables */ | |
163 | uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ | |
164 | uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ | |
165 | int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ | |
166 | int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ | |
572e2857 | 167 | |
d4a72f23 | 168 | int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */ |
a1d477c2 | 169 | int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */ |
428870ff BB |
170 | int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ |
171 | int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */ | |
d4a72f23 | 172 | int zfs_scan_checkpoint_intval = 7200; /* in seconds */ |
cef48f14 | 173 | int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ |
c409e464 | 174 | int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ |
fbeddd60 | 175 | int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ |
428870ff | 176 | enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; |
36283ca2 | 177 | /* max number of blocks to free in a single TXG */ |
a1d477c2 | 178 | unsigned long zfs_async_block_max_blocks = 100000; |
428870ff | 179 | |
80a91e74 TC |
180 | int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */ |
181 | ||
d4a72f23 TC |
182 | /* |
183 | * We wait a few txgs after importing a pool to begin scanning so that | |
184 | * the import / mounting code isn't held up by scrub / resilver IO. | |
185 | * Unfortunately, it is a bit difficult to determine exactly how long | |
186 | * this will take since userspace will trigger fs mounts asynchronously | |
187 | * and the kernel will create zvol minors asynchronously. As a result, | |
188 | * the value provided here is a bit arbitrary, but represents a | |
189 | * reasonable estimate of how many txgs it will take to finish fully | |
190 | * importing a pool | |
191 | */ | |
192 | #define SCAN_IMPORT_WAIT_TXGS 5 | |
193 | ||
428870ff BB |
194 | #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ |
195 | ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ | |
196 | (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) | |
197 | ||
ba5ad9a4 GW |
198 | /* |
199 | * Enable/disable the processing of the free_bpobj object. | |
200 | */ | |
201 | int zfs_free_bpobj_enabled = 1; | |
202 | ||
428870ff BB |
203 | /* the order has to match pool_scan_type */ |
204 | static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { | |
205 | NULL, | |
206 | dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ | |
207 | dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ | |
208 | }; | |
209 | ||
d4a72f23 TC |
210 | /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ |
211 | typedef struct { | |
212 | uint64_t sds_dsobj; | |
213 | uint64_t sds_txg; | |
214 | avl_node_t sds_node; | |
215 | } scan_ds_t; | |
216 | ||
217 | /* | |
218 | * This controls what conditions are placed on dsl_scan_sync_state(): | |
219 | * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0 | |
220 | * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0. | |
221 | * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise | |
222 | * write out the scn_phys_cached version. | |
223 | * See dsl_scan_sync_state for details. | |
224 | */ | |
225 | typedef enum { | |
226 | SYNC_OPTIONAL, | |
227 | SYNC_MANDATORY, | |
228 | SYNC_CACHED | |
229 | } state_sync_type_t; | |
230 | ||
231 | /* | |
232 | * This struct represents the minimum information needed to reconstruct a | |
233 | * zio for sequential scanning. This is useful because many of these will | |
234 | * accumulate in the sequential IO queues before being issued, so saving | |
235 | * memory matters here. | |
236 | */ | |
237 | typedef struct scan_io { | |
238 | /* fields from blkptr_t */ | |
239 | uint64_t sio_offset; | |
240 | uint64_t sio_blk_prop; | |
241 | uint64_t sio_phys_birth; | |
242 | uint64_t sio_birth; | |
243 | zio_cksum_t sio_cksum; | |
244 | uint32_t sio_asize; | |
245 | ||
246 | /* fields from zio_t */ | |
247 | int sio_flags; | |
248 | zbookmark_phys_t sio_zb; | |
249 | ||
250 | /* members for queue sorting */ | |
251 | union { | |
252 | avl_node_t sio_addr_node; /* link into issueing queue */ | |
253 | list_node_t sio_list_node; /* link for issuing to disk */ | |
254 | } sio_nodes; | |
255 | } scan_io_t; | |
256 | ||
257 | struct dsl_scan_io_queue { | |
258 | dsl_scan_t *q_scn; /* associated dsl_scan_t */ | |
259 | vdev_t *q_vd; /* top-level vdev that this queue represents */ | |
260 | ||
261 | /* trees used for sorting I/Os and extents of I/Os */ | |
262 | range_tree_t *q_exts_by_addr; | |
263 | avl_tree_t q_exts_by_size; | |
264 | avl_tree_t q_sios_by_addr; | |
265 | ||
266 | /* members for zio rate limiting */ | |
267 | uint64_t q_maxinflight_bytes; | |
268 | uint64_t q_inflight_bytes; | |
269 | kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ | |
270 | ||
271 | /* per txg statistics */ | |
272 | uint64_t q_total_seg_size_this_txg; | |
273 | uint64_t q_segs_this_txg; | |
274 | uint64_t q_total_zio_size_this_txg; | |
275 | uint64_t q_zios_this_txg; | |
276 | }; | |
277 | ||
278 | /* private data for dsl_scan_prefetch_cb() */ | |
279 | typedef struct scan_prefetch_ctx { | |
c13060e4 | 280 | zfs_refcount_t spc_refcnt; /* refcount for memory management */ |
d4a72f23 TC |
281 | dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ |
282 | boolean_t spc_root; /* is this prefetch for an objset? */ | |
283 | uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ | |
284 | uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ | |
285 | } scan_prefetch_ctx_t; | |
286 | ||
287 | /* private data for dsl_scan_prefetch() */ | |
288 | typedef struct scan_prefetch_issue_ctx { | |
289 | avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ | |
290 | scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ | |
291 | blkptr_t spic_bp; /* bp to prefetch */ | |
292 | zbookmark_phys_t spic_zb; /* bookmark to prefetch */ | |
293 | } scan_prefetch_issue_ctx_t; | |
294 | ||
295 | static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
296 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); | |
297 | static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, | |
298 | scan_io_t *sio); | |
299 | ||
300 | static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); | |
301 | static void scan_io_queues_destroy(dsl_scan_t *scn); | |
302 | ||
303 | static kmem_cache_t *sio_cache; | |
304 | ||
305 | void | |
306 | scan_init(void) | |
307 | { | |
308 | /* | |
309 | * This is used in ext_size_compare() to weight segments | |
310 | * based on how sparse they are. This cannot be changed | |
311 | * mid-scan and the tree comparison functions don't currently | |
13a2ff27 | 312 | * have a mechanism for passing additional context to the |
d4a72f23 | 313 | * compare functions. Thus we store this value globally and |
13a2ff27 | 314 | * we only allow it to be set at module initialization time |
d4a72f23 TC |
315 | */ |
316 | fill_weight = zfs_scan_fill_weight; | |
317 | ||
318 | sio_cache = kmem_cache_create("sio_cache", | |
319 | sizeof (scan_io_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
320 | } | |
321 | ||
322 | void | |
323 | scan_fini(void) | |
324 | { | |
325 | kmem_cache_destroy(sio_cache); | |
326 | } | |
327 | ||
328 | static inline boolean_t | |
329 | dsl_scan_is_running(const dsl_scan_t *scn) | |
330 | { | |
331 | return (scn->scn_phys.scn_state == DSS_SCANNING); | |
332 | } | |
333 | ||
334 | boolean_t | |
335 | dsl_scan_resilvering(dsl_pool_t *dp) | |
336 | { | |
337 | return (dsl_scan_is_running(dp->dp_scan) && | |
338 | dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); | |
339 | } | |
340 | ||
341 | static inline void | |
342 | sio2bp(const scan_io_t *sio, blkptr_t *bp, uint64_t vdev_id) | |
343 | { | |
344 | bzero(bp, sizeof (*bp)); | |
345 | DVA_SET_ASIZE(&bp->blk_dva[0], sio->sio_asize); | |
346 | DVA_SET_VDEV(&bp->blk_dva[0], vdev_id); | |
347 | DVA_SET_OFFSET(&bp->blk_dva[0], sio->sio_offset); | |
348 | bp->blk_prop = sio->sio_blk_prop; | |
349 | bp->blk_phys_birth = sio->sio_phys_birth; | |
350 | bp->blk_birth = sio->sio_birth; | |
351 | bp->blk_fill = 1; /* we always only work with data pointers */ | |
352 | bp->blk_cksum = sio->sio_cksum; | |
353 | } | |
354 | ||
355 | static inline void | |
356 | bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) | |
357 | { | |
358 | /* we discard the vdev id, since we can deduce it from the queue */ | |
359 | sio->sio_offset = DVA_GET_OFFSET(&bp->blk_dva[dva_i]); | |
360 | sio->sio_asize = DVA_GET_ASIZE(&bp->blk_dva[dva_i]); | |
361 | sio->sio_blk_prop = bp->blk_prop; | |
362 | sio->sio_phys_birth = bp->blk_phys_birth; | |
363 | sio->sio_birth = bp->blk_birth; | |
364 | sio->sio_cksum = bp->blk_cksum; | |
365 | } | |
366 | ||
428870ff BB |
367 | int |
368 | dsl_scan_init(dsl_pool_t *dp, uint64_t txg) | |
369 | { | |
370 | int err; | |
371 | dsl_scan_t *scn; | |
372 | spa_t *spa = dp->dp_spa; | |
373 | uint64_t f; | |
374 | ||
375 | scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); | |
376 | scn->scn_dp = dp; | |
377 | ||
2696dfaf GW |
378 | /* |
379 | * It's possible that we're resuming a scan after a reboot so | |
380 | * make sure that the scan_async_destroying flag is initialized | |
381 | * appropriately. | |
382 | */ | |
383 | ASSERT(!scn->scn_async_destroying); | |
384 | scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, | |
fa86b5db | 385 | SPA_FEATURE_ASYNC_DESTROY); |
2696dfaf | 386 | |
f90a30ad BB |
387 | /* |
388 | * Calculate the max number of in-flight bytes for pool-wide | |
389 | * scanning operations (minimum 1MB). Limits for the issuing | |
390 | * phase are done per top-level vdev and are handled separately. | |
391 | */ | |
392 | scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit * | |
393 | dsl_scan_count_leaves(spa->spa_root_vdev), 1ULL << 20); | |
394 | ||
d4a72f23 TC |
395 | avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), |
396 | offsetof(scan_ds_t, sds_node)); | |
397 | avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, | |
398 | sizeof (scan_prefetch_issue_ctx_t), | |
399 | offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); | |
400 | ||
428870ff BB |
401 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
402 | "scrub_func", sizeof (uint64_t), 1, &f); | |
403 | if (err == 0) { | |
404 | /* | |
405 | * There was an old-style scrub in progress. Restart a | |
406 | * new-style scrub from the beginning. | |
407 | */ | |
408 | scn->scn_restart_txg = txg; | |
409 | zfs_dbgmsg("old-style scrub was in progress; " | |
410 | "restarting new-style scrub in txg %llu", | |
d4a72f23 | 411 | (longlong_t)scn->scn_restart_txg); |
428870ff BB |
412 | |
413 | /* | |
414 | * Load the queue obj from the old location so that it | |
415 | * can be freed by dsl_scan_done(). | |
416 | */ | |
417 | (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
418 | "scrub_queue", sizeof (uint64_t), 1, | |
419 | &scn->scn_phys.scn_queue_obj); | |
420 | } else { | |
421 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
422 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
423 | &scn->scn_phys); | |
4f2dcb3e RY |
424 | /* |
425 | * Detect if the pool contains the signature of #2094. If it | |
426 | * does properly update the scn->scn_phys structure and notify | |
427 | * the administrator by setting an errata for the pool. | |
428 | */ | |
429 | if (err == EOVERFLOW) { | |
430 | uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; | |
431 | VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); | |
432 | VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, | |
433 | (23 * sizeof (uint64_t))); | |
434 | ||
435 | err = zap_lookup(dp->dp_meta_objset, | |
436 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, | |
437 | sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); | |
438 | if (err == 0) { | |
439 | uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; | |
440 | ||
441 | if (overflow & ~DSL_SCAN_FLAGS_MASK || | |
442 | scn->scn_async_destroying) { | |
443 | spa->spa_errata = | |
444 | ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; | |
d4a72f23 | 445 | return (EOVERFLOW); |
4f2dcb3e RY |
446 | } |
447 | ||
448 | bcopy(zaptmp, &scn->scn_phys, | |
449 | SCAN_PHYS_NUMINTS * sizeof (uint64_t)); | |
450 | scn->scn_phys.scn_flags = overflow; | |
451 | ||
452 | /* Required scrub already in progress. */ | |
453 | if (scn->scn_phys.scn_state == DSS_FINISHED || | |
454 | scn->scn_phys.scn_state == DSS_CANCELED) | |
455 | spa->spa_errata = | |
456 | ZPOOL_ERRATA_ZOL_2094_SCRUB; | |
457 | } | |
458 | } | |
459 | ||
428870ff BB |
460 | if (err == ENOENT) |
461 | return (0); | |
462 | else if (err) | |
463 | return (err); | |
464 | ||
d4a72f23 TC |
465 | /* |
466 | * We might be restarting after a reboot, so jump the issued | |
467 | * counter to how far we've scanned. We know we're consistent | |
468 | * up to here. | |
469 | */ | |
470 | scn->scn_issued_before_pass = scn->scn_phys.scn_examined; | |
471 | ||
472 | if (dsl_scan_is_running(scn) && | |
428870ff BB |
473 | spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { |
474 | /* | |
475 | * A new-type scrub was in progress on an old | |
476 | * pool, and the pool was accessed by old | |
477 | * software. Restart from the beginning, since | |
478 | * the old software may have changed the pool in | |
479 | * the meantime. | |
480 | */ | |
481 | scn->scn_restart_txg = txg; | |
482 | zfs_dbgmsg("new-style scrub was modified " | |
483 | "by old software; restarting in txg %llu", | |
d4a72f23 TC |
484 | (longlong_t)scn->scn_restart_txg); |
485 | } | |
486 | } | |
487 | ||
8cb119e3 TC |
488 | bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); |
489 | ||
d4a72f23 TC |
490 | /* reload the queue into the in-core state */ |
491 | if (scn->scn_phys.scn_queue_obj != 0) { | |
492 | zap_cursor_t zc; | |
493 | zap_attribute_t za; | |
494 | ||
495 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
496 | scn->scn_phys.scn_queue_obj); | |
497 | zap_cursor_retrieve(&zc, &za) == 0; | |
498 | (void) zap_cursor_advance(&zc)) { | |
499 | scan_ds_queue_insert(scn, | |
500 | zfs_strtonum(za.za_name, NULL), | |
501 | za.za_first_integer); | |
428870ff | 502 | } |
d4a72f23 | 503 | zap_cursor_fini(&zc); |
428870ff BB |
504 | } |
505 | ||
506 | spa_scan_stat_init(spa); | |
507 | return (0); | |
508 | } | |
509 | ||
510 | void | |
511 | dsl_scan_fini(dsl_pool_t *dp) | |
512 | { | |
d4a72f23 TC |
513 | if (dp->dp_scan != NULL) { |
514 | dsl_scan_t *scn = dp->dp_scan; | |
515 | ||
516 | if (scn->scn_taskq != NULL) | |
517 | taskq_destroy(scn->scn_taskq); | |
d6496040 | 518 | |
d4a72f23 TC |
519 | scan_ds_queue_clear(scn); |
520 | avl_destroy(&scn->scn_queue); | |
d6496040 | 521 | scan_ds_prefetch_queue_clear(scn); |
d4a72f23 TC |
522 | avl_destroy(&scn->scn_prefetch_queue); |
523 | ||
428870ff BB |
524 | kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); |
525 | dp->dp_scan = NULL; | |
526 | } | |
527 | } | |
528 | ||
d4a72f23 TC |
529 | static boolean_t |
530 | dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) | |
531 | { | |
532 | return (scn->scn_restart_txg != 0 && | |
533 | scn->scn_restart_txg <= tx->tx_txg); | |
534 | } | |
535 | ||
536 | boolean_t | |
537 | dsl_scan_scrubbing(const dsl_pool_t *dp) | |
538 | { | |
539 | dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; | |
540 | ||
541 | return (scn_phys->scn_state == DSS_SCANNING && | |
542 | scn_phys->scn_func == POOL_SCAN_SCRUB); | |
543 | } | |
544 | ||
545 | boolean_t | |
546 | dsl_scan_is_paused_scrub(const dsl_scan_t *scn) | |
547 | { | |
548 | return (dsl_scan_scrubbing(scn->scn_dp) && | |
549 | scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); | |
550 | } | |
551 | ||
552 | /* | |
553 | * Writes out a persistent dsl_scan_phys_t record to the pool directory. | |
554 | * Because we can be running in the block sorting algorithm, we do not always | |
555 | * want to write out the record, only when it is "safe" to do so. This safety | |
556 | * condition is achieved by making sure that the sorting queues are empty | |
557 | * (scn_bytes_pending == 0). When this condition is not true, the sync'd state | |
558 | * is inconsistent with how much actual scanning progress has been made. The | |
559 | * kind of sync to be performed is specified by the sync_type argument. If the | |
560 | * sync is optional, we only sync if the queues are empty. If the sync is | |
561 | * mandatory, we do a hard ASSERT to make sure that the queues are empty. The | |
562 | * third possible state is a "cached" sync. This is done in response to: | |
563 | * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
564 | * destroyed, so we wouldn't be able to restart scanning from it. | |
565 | * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been | |
566 | * superseded by a newer snapshot. | |
567 | * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
568 | * swapped with its clone. | |
569 | * In all cases, a cached sync simply rewrites the last record we've written, | |
570 | * just slightly modified. For the modifications that are performed to the | |
571 | * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, | |
572 | * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. | |
573 | */ | |
574 | static void | |
575 | dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) | |
576 | { | |
577 | int i; | |
578 | spa_t *spa = scn->scn_dp->dp_spa; | |
579 | ||
580 | ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0); | |
581 | if (scn->scn_bytes_pending == 0) { | |
582 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
583 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
584 | dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; | |
585 | ||
586 | if (q == NULL) | |
587 | continue; | |
588 | ||
589 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
590 | ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); | |
591 | ASSERT3P(avl_first(&q->q_exts_by_size), ==, NULL); | |
592 | ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); | |
593 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
594 | } | |
595 | ||
596 | if (scn->scn_phys.scn_queue_obj != 0) | |
597 | scan_ds_queue_sync(scn, tx); | |
598 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
599 | DMU_POOL_DIRECTORY_OBJECT, | |
600 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
601 | &scn->scn_phys, tx)); | |
602 | bcopy(&scn->scn_phys, &scn->scn_phys_cached, | |
603 | sizeof (scn->scn_phys)); | |
604 | ||
605 | if (scn->scn_checkpointing) | |
606 | zfs_dbgmsg("finish scan checkpoint"); | |
607 | ||
608 | scn->scn_checkpointing = B_FALSE; | |
609 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
610 | } else if (sync_type == SYNC_CACHED) { | |
611 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
612 | DMU_POOL_DIRECTORY_OBJECT, | |
613 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
614 | &scn->scn_phys_cached, tx)); | |
615 | } | |
616 | } | |
617 | ||
428870ff BB |
618 | /* ARGSUSED */ |
619 | static int | |
13fe0198 | 620 | dsl_scan_setup_check(void *arg, dmu_tx_t *tx) |
428870ff | 621 | { |
13fe0198 | 622 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff | 623 | |
d4a72f23 | 624 | if (dsl_scan_is_running(scn)) |
2e528b49 | 625 | return (SET_ERROR(EBUSY)); |
428870ff BB |
626 | |
627 | return (0); | |
628 | } | |
629 | ||
428870ff | 630 | static void |
13fe0198 | 631 | dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) |
428870ff | 632 | { |
13fe0198 MA |
633 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
634 | pool_scan_func_t *funcp = arg; | |
428870ff BB |
635 | dmu_object_type_t ot = 0; |
636 | dsl_pool_t *dp = scn->scn_dp; | |
637 | spa_t *spa = dp->dp_spa; | |
638 | ||
d4a72f23 | 639 | ASSERT(!dsl_scan_is_running(scn)); |
428870ff BB |
640 | ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); |
641 | bzero(&scn->scn_phys, sizeof (scn->scn_phys)); | |
642 | scn->scn_phys.scn_func = *funcp; | |
643 | scn->scn_phys.scn_state = DSS_SCANNING; | |
644 | scn->scn_phys.scn_min_txg = 0; | |
645 | scn->scn_phys.scn_max_txg = tx->tx_txg; | |
646 | scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ | |
647 | scn->scn_phys.scn_start_time = gethrestime_sec(); | |
648 | scn->scn_phys.scn_errors = 0; | |
649 | scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; | |
d4a72f23 | 650 | scn->scn_issued_before_pass = 0; |
428870ff | 651 | scn->scn_restart_txg = 0; |
5d1f7fb6 | 652 | scn->scn_done_txg = 0; |
d4a72f23 TC |
653 | scn->scn_last_checkpoint = 0; |
654 | scn->scn_checkpointing = B_FALSE; | |
428870ff BB |
655 | spa_scan_stat_init(spa); |
656 | ||
657 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
658 | scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; | |
659 | ||
660 | /* rewrite all disk labels */ | |
661 | vdev_config_dirty(spa->spa_root_vdev); | |
662 | ||
663 | if (vdev_resilver_needed(spa->spa_root_vdev, | |
664 | &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { | |
12fa0466 DE |
665 | spa_event_notify(spa, NULL, NULL, |
666 | ESC_ZFS_RESILVER_START); | |
428870ff | 667 | } else { |
12fa0466 | 668 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); |
428870ff BB |
669 | } |
670 | ||
671 | spa->spa_scrub_started = B_TRUE; | |
672 | /* | |
673 | * If this is an incremental scrub, limit the DDT scrub phase | |
674 | * to just the auto-ditto class (for correctness); the rest | |
675 | * of the scrub should go faster using top-down pruning. | |
676 | */ | |
677 | if (scn->scn_phys.scn_min_txg > TXG_INITIAL) | |
678 | scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; | |
679 | ||
680 | } | |
681 | ||
682 | /* back to the generic stuff */ | |
683 | ||
684 | if (dp->dp_blkstats == NULL) { | |
79c76d5b BB |
685 | dp->dp_blkstats = |
686 | vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); | |
d4a72f23 TC |
687 | mutex_init(&dp->dp_blkstats->zab_lock, NULL, |
688 | MUTEX_DEFAULT, NULL); | |
428870ff | 689 | } |
d4a72f23 | 690 | bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type)); |
428870ff BB |
691 | |
692 | if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) | |
693 | ot = DMU_OT_ZAP_OTHER; | |
694 | ||
695 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, | |
696 | ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); | |
697 | ||
d4a72f23 TC |
698 | bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); |
699 | ||
700 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); | |
428870ff | 701 | |
6f1ffb06 | 702 | spa_history_log_internal(spa, "scan setup", tx, |
428870ff BB |
703 | "func=%u mintxg=%llu maxtxg=%llu", |
704 | *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); | |
705 | } | |
706 | ||
d4a72f23 TC |
707 | /* |
708 | * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver. | |
709 | * Can also be called to resume a paused scrub. | |
710 | */ | |
711 | int | |
712 | dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) | |
713 | { | |
714 | spa_t *spa = dp->dp_spa; | |
715 | dsl_scan_t *scn = dp->dp_scan; | |
716 | ||
717 | /* | |
718 | * Purge all vdev caches and probe all devices. We do this here | |
719 | * rather than in sync context because this requires a writer lock | |
720 | * on the spa_config lock, which we can't do from sync context. The | |
721 | * spa_scrub_reopen flag indicates that vdev_open() should not | |
722 | * attempt to start another scrub. | |
723 | */ | |
724 | spa_vdev_state_enter(spa, SCL_NONE); | |
725 | spa->spa_scrub_reopen = B_TRUE; | |
726 | vdev_reopen(spa->spa_root_vdev); | |
727 | spa->spa_scrub_reopen = B_FALSE; | |
728 | (void) spa_vdev_state_exit(spa, NULL, 0); | |
729 | ||
80a91e74 TC |
730 | if (func == POOL_SCAN_RESILVER) { |
731 | dsl_resilver_restart(spa->spa_dsl_pool, 0); | |
732 | return (0); | |
733 | } | |
734 | ||
d4a72f23 TC |
735 | if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { |
736 | /* got scrub start cmd, resume paused scrub */ | |
737 | int err = dsl_scrub_set_pause_resume(scn->scn_dp, | |
738 | POOL_SCRUB_NORMAL); | |
43cb30b3 SEF |
739 | if (err == 0) { |
740 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); | |
d4a72f23 | 741 | return (ECANCELED); |
43cb30b3 | 742 | } |
d4a72f23 TC |
743 | |
744 | return (SET_ERROR(err)); | |
745 | } | |
746 | ||
747 | return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, | |
d2734cce | 748 | dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); |
d4a72f23 TC |
749 | } |
750 | ||
80a91e74 TC |
751 | /* |
752 | * Sets the resilver defer flag to B_FALSE on all leaf devs under vd. Returns | |
753 | * B_TRUE if we have devices that need to be resilvered and are available to | |
754 | * accept resilver I/Os. | |
755 | */ | |
756 | static boolean_t | |
757 | dsl_scan_clear_deferred(vdev_t *vd, dmu_tx_t *tx) | |
758 | { | |
759 | boolean_t resilver_needed = B_FALSE; | |
760 | spa_t *spa = vd->vdev_spa; | |
761 | ||
762 | for (int c = 0; c < vd->vdev_children; c++) { | |
763 | resilver_needed |= | |
764 | dsl_scan_clear_deferred(vd->vdev_child[c], tx); | |
765 | } | |
766 | ||
767 | if (vd == spa->spa_root_vdev && | |
768 | spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) { | |
769 | spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx); | |
770 | vdev_config_dirty(vd); | |
771 | spa->spa_resilver_deferred = B_FALSE; | |
772 | return (resilver_needed); | |
773 | } | |
774 | ||
775 | if (!vdev_is_concrete(vd) || vd->vdev_aux || | |
776 | !vd->vdev_ops->vdev_op_leaf) | |
777 | return (resilver_needed); | |
778 | ||
779 | if (vd->vdev_resilver_deferred) | |
780 | vd->vdev_resilver_deferred = B_FALSE; | |
781 | ||
782 | return (!vdev_is_dead(vd) && !vd->vdev_offline && | |
783 | vdev_resilver_needed(vd, NULL, NULL)); | |
784 | } | |
785 | ||
428870ff BB |
786 | /* ARGSUSED */ |
787 | static void | |
788 | dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | |
789 | { | |
790 | static const char *old_names[] = { | |
791 | "scrub_bookmark", | |
792 | "scrub_ddt_bookmark", | |
793 | "scrub_ddt_class_max", | |
794 | "scrub_queue", | |
795 | "scrub_min_txg", | |
796 | "scrub_max_txg", | |
797 | "scrub_func", | |
798 | "scrub_errors", | |
799 | NULL | |
800 | }; | |
801 | ||
802 | dsl_pool_t *dp = scn->scn_dp; | |
803 | spa_t *spa = dp->dp_spa; | |
804 | int i; | |
805 | ||
806 | /* Remove any remnants of an old-style scrub. */ | |
807 | for (i = 0; old_names[i]; i++) { | |
808 | (void) zap_remove(dp->dp_meta_objset, | |
809 | DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); | |
810 | } | |
811 | ||
812 | if (scn->scn_phys.scn_queue_obj != 0) { | |
d4a72f23 | 813 | VERIFY0(dmu_object_free(dp->dp_meta_objset, |
428870ff BB |
814 | scn->scn_phys.scn_queue_obj, tx)); |
815 | scn->scn_phys.scn_queue_obj = 0; | |
816 | } | |
d4a72f23 | 817 | scan_ds_queue_clear(scn); |
d6496040 | 818 | scan_ds_prefetch_queue_clear(scn); |
428870ff | 819 | |
0ea05c64 AP |
820 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; |
821 | ||
428870ff BB |
822 | /* |
823 | * If we were "restarted" from a stopped state, don't bother | |
824 | * with anything else. | |
825 | */ | |
d4a72f23 TC |
826 | if (!dsl_scan_is_running(scn)) { |
827 | ASSERT(!scn->scn_is_sorted); | |
428870ff | 828 | return; |
d4a72f23 | 829 | } |
428870ff | 830 | |
d4a72f23 TC |
831 | if (scn->scn_is_sorted) { |
832 | scan_io_queues_destroy(scn); | |
833 | scn->scn_is_sorted = B_FALSE; | |
834 | ||
835 | if (scn->scn_taskq != NULL) { | |
836 | taskq_destroy(scn->scn_taskq); | |
837 | scn->scn_taskq = NULL; | |
838 | } | |
839 | } | |
840 | ||
841 | scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; | |
428870ff | 842 | |
784d15c1 NR |
843 | if (dsl_scan_restarting(scn, tx)) |
844 | spa_history_log_internal(spa, "scan aborted, restarting", tx, | |
845 | "errors=%llu", spa_get_errlog_size(spa)); | |
846 | else if (!complete) | |
847 | spa_history_log_internal(spa, "scan cancelled", tx, | |
848 | "errors=%llu", spa_get_errlog_size(spa)); | |
849 | else | |
850 | spa_history_log_internal(spa, "scan done", tx, | |
851 | "errors=%llu", spa_get_errlog_size(spa)); | |
428870ff BB |
852 | |
853 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
428870ff BB |
854 | spa->spa_scrub_started = B_FALSE; |
855 | spa->spa_scrub_active = B_FALSE; | |
856 | ||
857 | /* | |
858 | * If the scrub/resilver completed, update all DTLs to | |
859 | * reflect this. Whether it succeeded or not, vacate | |
860 | * all temporary scrub DTLs. | |
d2734cce SD |
861 | * |
862 | * As the scrub does not currently support traversing | |
863 | * data that have been freed but are part of a checkpoint, | |
864 | * we don't mark the scrub as done in the DTLs as faults | |
865 | * may still exist in those vdevs. | |
428870ff | 866 | */ |
d2734cce SD |
867 | if (complete && |
868 | !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { | |
869 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
870 | scn->scn_phys.scn_max_txg, B_TRUE); | |
871 | ||
12fa0466 DE |
872 | spa_event_notify(spa, NULL, NULL, |
873 | scn->scn_phys.scn_min_txg ? | |
fb390aaf | 874 | ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); |
d2734cce SD |
875 | } else { |
876 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
877 | 0, B_TRUE); | |
428870ff BB |
878 | } |
879 | spa_errlog_rotate(spa); | |
880 | ||
881 | /* | |
882 | * We may have finished replacing a device. | |
883 | * Let the async thread assess this and handle the detach. | |
884 | */ | |
885 | spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); | |
80a91e74 TC |
886 | |
887 | /* | |
888 | * Clear any deferred_resilver flags in the config. | |
889 | * If there are drives that need resilvering, kick | |
890 | * off an asynchronous request to start resilver. | |
891 | * dsl_scan_clear_deferred() may update the config | |
892 | * before the resilver can restart. In the event of | |
893 | * a crash during this period, the spa loading code | |
894 | * will find the drives that need to be resilvered | |
895 | * when the machine reboots and start the resilver then. | |
896 | */ | |
897 | boolean_t resilver_needed = | |
898 | dsl_scan_clear_deferred(spa->spa_root_vdev, tx); | |
899 | if (resilver_needed) { | |
900 | spa_history_log_internal(spa, | |
901 | "starting deferred resilver", tx, | |
902 | "errors=%llu", spa_get_errlog_size(spa)); | |
903 | spa_async_request(spa, SPA_ASYNC_RESILVER); | |
904 | } | |
428870ff BB |
905 | } |
906 | ||
907 | scn->scn_phys.scn_end_time = gethrestime_sec(); | |
4f2dcb3e RY |
908 | |
909 | if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) | |
910 | spa->spa_errata = 0; | |
d4a72f23 TC |
911 | |
912 | ASSERT(!dsl_scan_is_running(scn)); | |
428870ff BB |
913 | } |
914 | ||
915 | /* ARGSUSED */ | |
916 | static int | |
13fe0198 | 917 | dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) |
428870ff | 918 | { |
13fe0198 | 919 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff | 920 | |
d4a72f23 | 921 | if (!dsl_scan_is_running(scn)) |
2e528b49 | 922 | return (SET_ERROR(ENOENT)); |
428870ff BB |
923 | return (0); |
924 | } | |
925 | ||
926 | /* ARGSUSED */ | |
927 | static void | |
13fe0198 | 928 | dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) |
428870ff | 929 | { |
13fe0198 | 930 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff BB |
931 | |
932 | dsl_scan_done(scn, B_FALSE, tx); | |
d4a72f23 | 933 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); |
43cb30b3 | 934 | spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); |
428870ff BB |
935 | } |
936 | ||
937 | int | |
938 | dsl_scan_cancel(dsl_pool_t *dp) | |
939 | { | |
13fe0198 | 940 | return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, |
3d45fdd6 | 941 | dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); |
428870ff BB |
942 | } |
943 | ||
0ea05c64 AP |
944 | static int |
945 | dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) | |
946 | { | |
947 | pool_scrub_cmd_t *cmd = arg; | |
948 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
949 | dsl_scan_t *scn = dp->dp_scan; | |
950 | ||
951 | if (*cmd == POOL_SCRUB_PAUSE) { | |
952 | /* can't pause a scrub when there is no in-progress scrub */ | |
953 | if (!dsl_scan_scrubbing(dp)) | |
954 | return (SET_ERROR(ENOENT)); | |
955 | ||
956 | /* can't pause a paused scrub */ | |
957 | if (dsl_scan_is_paused_scrub(scn)) | |
958 | return (SET_ERROR(EBUSY)); | |
959 | } else if (*cmd != POOL_SCRUB_NORMAL) { | |
960 | return (SET_ERROR(ENOTSUP)); | |
961 | } | |
962 | ||
963 | return (0); | |
964 | } | |
965 | ||
966 | static void | |
967 | dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) | |
968 | { | |
969 | pool_scrub_cmd_t *cmd = arg; | |
970 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
971 | spa_t *spa = dp->dp_spa; | |
972 | dsl_scan_t *scn = dp->dp_scan; | |
973 | ||
0ea05c64 AP |
974 | if (*cmd == POOL_SCRUB_PAUSE) { |
975 | /* can't pause a scrub when there is no in-progress scrub */ | |
976 | spa->spa_scan_pass_scrub_pause = gethrestime_sec(); | |
977 | scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; | |
8cb119e3 | 978 | scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED; |
d4a72f23 | 979 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
43cb30b3 | 980 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); |
0ea05c64 AP |
981 | } else { |
982 | ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); | |
983 | if (dsl_scan_is_paused_scrub(scn)) { | |
984 | /* | |
985 | * We need to keep track of how much time we spend | |
986 | * paused per pass so that we can adjust the scrub rate | |
987 | * shown in the output of 'zpool status' | |
988 | */ | |
989 | spa->spa_scan_pass_scrub_spent_paused += | |
990 | gethrestime_sec() - spa->spa_scan_pass_scrub_pause; | |
991 | spa->spa_scan_pass_scrub_pause = 0; | |
992 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; | |
8cb119e3 | 993 | scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED; |
d4a72f23 | 994 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
0ea05c64 AP |
995 | } |
996 | } | |
997 | } | |
998 | ||
999 | /* | |
1000 | * Set scrub pause/resume state if it makes sense to do so | |
1001 | */ | |
1002 | int | |
1003 | dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) | |
1004 | { | |
1005 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
1006 | dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, | |
1007 | ZFS_SPACE_CHECK_RESERVED)); | |
1008 | } | |
1009 | ||
0ea05c64 | 1010 | |
d4a72f23 TC |
1011 | /* start a new scan, or restart an existing one. */ |
1012 | void | |
1013 | dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) | |
1014 | { | |
1015 | if (txg == 0) { | |
1016 | dmu_tx_t *tx; | |
1017 | tx = dmu_tx_create_dd(dp->dp_mos_dir); | |
1018 | VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); | |
0ea05c64 | 1019 | |
d4a72f23 TC |
1020 | txg = dmu_tx_get_txg(tx); |
1021 | dp->dp_scan->scn_restart_txg = txg; | |
1022 | dmu_tx_commit(tx); | |
1023 | } else { | |
1024 | dp->dp_scan->scn_restart_txg = txg; | |
1025 | } | |
1026 | zfs_dbgmsg("restarting resilver txg=%llu", (longlong_t)txg); | |
0ea05c64 AP |
1027 | } |
1028 | ||
428870ff BB |
1029 | void |
1030 | dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) | |
1031 | { | |
1032 | zio_free(dp->dp_spa, txg, bp); | |
1033 | } | |
1034 | ||
1035 | void | |
1036 | dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) | |
1037 | { | |
1038 | ASSERT(dsl_pool_sync_context(dp)); | |
1039 | zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); | |
1040 | } | |
1041 | ||
d4a72f23 TC |
1042 | static int |
1043 | scan_ds_queue_compare(const void *a, const void *b) | |
428870ff | 1044 | { |
d4a72f23 TC |
1045 | const scan_ds_t *sds_a = a, *sds_b = b; |
1046 | ||
1047 | if (sds_a->sds_dsobj < sds_b->sds_dsobj) | |
1048 | return (-1); | |
1049 | if (sds_a->sds_dsobj == sds_b->sds_dsobj) | |
1050 | return (0); | |
1051 | return (1); | |
428870ff BB |
1052 | } |
1053 | ||
1054 | static void | |
d4a72f23 TC |
1055 | scan_ds_queue_clear(dsl_scan_t *scn) |
1056 | { | |
1057 | void *cookie = NULL; | |
1058 | scan_ds_t *sds; | |
1059 | while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { | |
1060 | kmem_free(sds, sizeof (*sds)); | |
1061 | } | |
1062 | } | |
1063 | ||
1064 | static boolean_t | |
1065 | scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) | |
428870ff | 1066 | { |
d4a72f23 TC |
1067 | scan_ds_t srch, *sds; |
1068 | ||
1069 | srch.sds_dsobj = dsobj; | |
1070 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1071 | if (sds != NULL && txg != NULL) | |
1072 | *txg = sds->sds_txg; | |
1073 | return (sds != NULL); | |
428870ff BB |
1074 | } |
1075 | ||
d4a72f23 TC |
1076 | static void |
1077 | scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) | |
1078 | { | |
1079 | scan_ds_t *sds; | |
1080 | avl_index_t where; | |
1081 | ||
1082 | sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); | |
1083 | sds->sds_dsobj = dsobj; | |
1084 | sds->sds_txg = txg; | |
1085 | ||
1086 | VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); | |
1087 | avl_insert(&scn->scn_queue, sds, where); | |
1088 | } | |
1089 | ||
1090 | static void | |
1091 | scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) | |
1092 | { | |
1093 | scan_ds_t srch, *sds; | |
1094 | ||
1095 | srch.sds_dsobj = dsobj; | |
1096 | ||
1097 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1098 | VERIFY(sds != NULL); | |
1099 | avl_remove(&scn->scn_queue, sds); | |
1100 | kmem_free(sds, sizeof (*sds)); | |
1101 | } | |
1102 | ||
1103 | static void | |
1104 | scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) | |
1105 | { | |
1106 | dsl_pool_t *dp = scn->scn_dp; | |
1107 | spa_t *spa = dp->dp_spa; | |
1108 | dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? | |
1109 | DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; | |
1110 | ||
1111 | ASSERT0(scn->scn_bytes_pending); | |
1112 | ASSERT(scn->scn_phys.scn_queue_obj != 0); | |
1113 | ||
1114 | VERIFY0(dmu_object_free(dp->dp_meta_objset, | |
1115 | scn->scn_phys.scn_queue_obj, tx)); | |
1116 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, | |
1117 | DMU_OT_NONE, 0, tx); | |
1118 | for (scan_ds_t *sds = avl_first(&scn->scn_queue); | |
1119 | sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { | |
1120 | VERIFY0(zap_add_int_key(dp->dp_meta_objset, | |
1121 | scn->scn_phys.scn_queue_obj, sds->sds_dsobj, | |
1122 | sds->sds_txg, tx)); | |
1123 | } | |
1124 | } | |
1125 | ||
1126 | /* | |
1127 | * Computes the memory limit state that we're currently in. A sorted scan | |
1128 | * needs quite a bit of memory to hold the sorting queue, so we need to | |
1129 | * reasonably constrain the size so it doesn't impact overall system | |
1130 | * performance. We compute two limits: | |
1131 | * 1) Hard memory limit: if the amount of memory used by the sorting | |
1132 | * queues on a pool gets above this value, we stop the metadata | |
1133 | * scanning portion and start issuing the queued up and sorted | |
1134 | * I/Os to reduce memory usage. | |
1135 | * This limit is calculated as a fraction of physmem (by default 5%). | |
1136 | * We constrain the lower bound of the hard limit to an absolute | |
1137 | * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain | |
1138 | * the upper bound to 5% of the total pool size - no chance we'll | |
1139 | * ever need that much memory, but just to keep the value in check. | |
1140 | * 2) Soft memory limit: once we hit the hard memory limit, we start | |
1141 | * issuing I/O to reduce queue memory usage, but we don't want to | |
1142 | * completely empty out the queues, since we might be able to find I/Os | |
1143 | * that will fill in the gaps of our non-sequential IOs at some point | |
1144 | * in the future. So we stop the issuing of I/Os once the amount of | |
1145 | * memory used drops below the soft limit (at which point we stop issuing | |
1146 | * I/O and start scanning metadata again). | |
1147 | * | |
1148 | * This limit is calculated by subtracting a fraction of the hard | |
1149 | * limit from the hard limit. By default this fraction is 5%, so | |
1150 | * the soft limit is 95% of the hard limit. We cap the size of the | |
1151 | * difference between the hard and soft limits at an absolute | |
1152 | * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is | |
1153 | * sufficient to not cause too frequent switching between the | |
1154 | * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's | |
1155 | * worth of queues is about 1.2 GiB of on-pool data, so scanning | |
1156 | * that should take at least a decent fraction of a second). | |
1157 | */ | |
1158 | static boolean_t | |
1159 | dsl_scan_should_clear(dsl_scan_t *scn) | |
1160 | { | |
1161 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; | |
1162 | uint64_t mlim_hard, mlim_soft, mused; | |
1163 | uint64_t alloc = metaslab_class_get_alloc(spa_normal_class( | |
1164 | scn->scn_dp->dp_spa)); | |
1165 | ||
1166 | mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, | |
1167 | zfs_scan_mem_lim_min); | |
1168 | mlim_hard = MIN(mlim_hard, alloc / 20); | |
1169 | mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, | |
1170 | zfs_scan_mem_lim_soft_max); | |
1171 | mused = 0; | |
1172 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
1173 | vdev_t *tvd = rvd->vdev_child[i]; | |
1174 | dsl_scan_io_queue_t *queue; | |
1175 | ||
1176 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
1177 | queue = tvd->vdev_scan_io_queue; | |
1178 | if (queue != NULL) { | |
1179 | /* #extents in exts_by_size = # in exts_by_addr */ | |
1180 | mused += avl_numnodes(&queue->q_exts_by_size) * | |
1181 | sizeof (range_seg_t) + | |
1182 | avl_numnodes(&queue->q_sios_by_addr) * | |
1183 | sizeof (scan_io_t); | |
1184 | } | |
1185 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
1186 | } | |
1187 | ||
1188 | dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); | |
1189 | ||
1190 | if (mused == 0) | |
1191 | ASSERT0(scn->scn_bytes_pending); | |
1192 | ||
1193 | /* | |
1194 | * If we are above our hard limit, we need to clear out memory. | |
1195 | * If we are below our soft limit, we need to accumulate sequential IOs. | |
1196 | * Otherwise, we should keep doing whatever we are currently doing. | |
1197 | */ | |
1198 | if (mused >= mlim_hard) | |
1199 | return (B_TRUE); | |
1200 | else if (mused < mlim_soft) | |
1201 | return (B_FALSE); | |
1202 | else | |
1203 | return (scn->scn_clearing); | |
1204 | } | |
10400bfe | 1205 | |
428870ff | 1206 | static boolean_t |
0ea05c64 | 1207 | dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) |
428870ff | 1208 | { |
428870ff BB |
1209 | /* we never skip user/group accounting objects */ |
1210 | if (zb && (int64_t)zb->zb_object < 0) | |
1211 | return (B_FALSE); | |
1212 | ||
0ea05c64 AP |
1213 | if (scn->scn_suspending) |
1214 | return (B_TRUE); /* we're already suspending */ | |
428870ff | 1215 | |
9ae529ec | 1216 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) |
428870ff BB |
1217 | return (B_FALSE); /* we're resuming */ |
1218 | ||
1219 | /* We only know how to resume from level-0 blocks. */ | |
1220 | if (zb && zb->zb_level != 0) | |
1221 | return (B_FALSE); | |
1222 | ||
10400bfe | 1223 | /* |
0ea05c64 | 1224 | * We suspend if: |
10400bfe MA |
1225 | * - we have scanned for at least the minimum time (default 1 sec |
1226 | * for scrub, 3 sec for resilver), and either we have sufficient | |
1227 | * dirty data that we are starting to write more quickly | |
d4a72f23 TC |
1228 | * (default 30%), someone is explicitly waiting for this txg |
1229 | * to complete, or we have used up all of the time in the txg | |
1230 | * timeout (default 5 sec). | |
10400bfe MA |
1231 | * or |
1232 | * - the spa is shutting down because this pool is being exported | |
1233 | * or the machine is rebooting. | |
d4a72f23 TC |
1234 | * or |
1235 | * - the scan queue has reached its memory use limit | |
10400bfe | 1236 | */ |
d4a72f23 TC |
1237 | uint64_t curr_time_ns = gethrtime(); |
1238 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
1239 | uint64_t sync_time_ns = curr_time_ns - | |
1240 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1c27024e | 1241 | int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; |
d4a72f23 TC |
1242 | int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? |
1243 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; | |
1244 | ||
1245 | if ((NSEC2MSEC(scan_time_ns) > mintime && | |
1246 | (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || | |
1247 | txg_sync_waiting(scn->scn_dp) || | |
1248 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
1249 | spa_shutting_down(scn->scn_dp->dp_spa) || | |
1250 | (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { | |
428870ff | 1251 | if (zb) { |
0ea05c64 | 1252 | dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", |
428870ff BB |
1253 | (longlong_t)zb->zb_objset, |
1254 | (longlong_t)zb->zb_object, | |
1255 | (longlong_t)zb->zb_level, | |
1256 | (longlong_t)zb->zb_blkid); | |
1257 | scn->scn_phys.scn_bookmark = *zb; | |
d4a72f23 | 1258 | } else { |
21a4f5cc | 1259 | #ifdef ZFS_DEBUG |
d4a72f23 | 1260 | dsl_scan_phys_t *scnp = &scn->scn_phys; |
d4a72f23 TC |
1261 | dprintf("suspending at at DDT bookmark " |
1262 | "%llx/%llx/%llx/%llx\n", | |
1263 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
1264 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
1265 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
1266 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
21a4f5cc | 1267 | #endif |
428870ff | 1268 | } |
0ea05c64 | 1269 | scn->scn_suspending = B_TRUE; |
428870ff BB |
1270 | return (B_TRUE); |
1271 | } | |
1272 | return (B_FALSE); | |
1273 | } | |
1274 | ||
1275 | typedef struct zil_scan_arg { | |
1276 | dsl_pool_t *zsa_dp; | |
1277 | zil_header_t *zsa_zh; | |
1278 | } zil_scan_arg_t; | |
1279 | ||
1280 | /* ARGSUSED */ | |
1281 | static int | |
1282 | dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) | |
1283 | { | |
1284 | zil_scan_arg_t *zsa = arg; | |
1285 | dsl_pool_t *dp = zsa->zsa_dp; | |
1286 | dsl_scan_t *scn = dp->dp_scan; | |
1287 | zil_header_t *zh = zsa->zsa_zh; | |
5dbd68a3 | 1288 | zbookmark_phys_t zb; |
428870ff | 1289 | |
b0bc7a84 | 1290 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) |
428870ff BB |
1291 | return (0); |
1292 | ||
1293 | /* | |
1294 | * One block ("stubby") can be allocated a long time ago; we | |
1295 | * want to visit that one because it has been allocated | |
1296 | * (on-disk) even if it hasn't been claimed (even though for | |
1297 | * scrub there's nothing to do to it). | |
1298 | */ | |
d2734cce | 1299 | if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) |
428870ff BB |
1300 | return (0); |
1301 | ||
1302 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], | |
1303 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
1304 | ||
1305 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1306 | return (0); | |
1307 | } | |
1308 | ||
1309 | /* ARGSUSED */ | |
1310 | static int | |
1311 | dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) | |
1312 | { | |
1313 | if (lrc->lrc_txtype == TX_WRITE) { | |
1314 | zil_scan_arg_t *zsa = arg; | |
1315 | dsl_pool_t *dp = zsa->zsa_dp; | |
1316 | dsl_scan_t *scn = dp->dp_scan; | |
1317 | zil_header_t *zh = zsa->zsa_zh; | |
1318 | lr_write_t *lr = (lr_write_t *)lrc; | |
1319 | blkptr_t *bp = &lr->lr_blkptr; | |
5dbd68a3 | 1320 | zbookmark_phys_t zb; |
428870ff | 1321 | |
b0bc7a84 MG |
1322 | if (BP_IS_HOLE(bp) || |
1323 | bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) | |
428870ff BB |
1324 | return (0); |
1325 | ||
1326 | /* | |
1327 | * birth can be < claim_txg if this record's txg is | |
1328 | * already txg sync'ed (but this log block contains | |
1329 | * other records that are not synced) | |
1330 | */ | |
1331 | if (claim_txg == 0 || bp->blk_birth < claim_txg) | |
1332 | return (0); | |
1333 | ||
1334 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], | |
1335 | lr->lr_foid, ZB_ZIL_LEVEL, | |
1336 | lr->lr_offset / BP_GET_LSIZE(bp)); | |
1337 | ||
1338 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1339 | } | |
1340 | return (0); | |
1341 | } | |
1342 | ||
1343 | static void | |
1344 | dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) | |
1345 | { | |
1346 | uint64_t claim_txg = zh->zh_claim_txg; | |
1347 | zil_scan_arg_t zsa = { dp, zh }; | |
1348 | zilog_t *zilog; | |
1349 | ||
d2734cce SD |
1350 | ASSERT(spa_writeable(dp->dp_spa)); |
1351 | ||
428870ff BB |
1352 | /* |
1353 | * We only want to visit blocks that have been claimed but not yet | |
1354 | * replayed (or, in read-only mode, blocks that *would* be claimed). | |
1355 | */ | |
d2734cce | 1356 | if (claim_txg == 0) |
428870ff BB |
1357 | return; |
1358 | ||
1359 | zilog = zil_alloc(dp->dp_meta_objset, zh); | |
1360 | ||
1361 | (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, | |
b5256303 | 1362 | claim_txg, B_FALSE); |
428870ff BB |
1363 | |
1364 | zil_free(zilog); | |
1365 | } | |
1366 | ||
d4a72f23 TC |
1367 | /* |
1368 | * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea | |
1369 | * here is to sort the AVL tree by the order each block will be needed. | |
1370 | */ | |
1371 | static int | |
1372 | scan_prefetch_queue_compare(const void *a, const void *b) | |
428870ff | 1373 | { |
d4a72f23 TC |
1374 | const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; |
1375 | const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; | |
1376 | const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; | |
428870ff | 1377 | |
d4a72f23 TC |
1378 | return (zbookmark_compare(spc_a->spc_datablkszsec, |
1379 | spc_a->spc_indblkshift, spc_b->spc_datablkszsec, | |
1380 | spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); | |
1381 | } | |
428870ff | 1382 | |
d4a72f23 TC |
1383 | static void |
1384 | scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag) | |
1385 | { | |
424fd7c3 TS |
1386 | if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { |
1387 | zfs_refcount_destroy(&spc->spc_refcnt); | |
d4a72f23 TC |
1388 | kmem_free(spc, sizeof (scan_prefetch_ctx_t)); |
1389 | } | |
1390 | } | |
1391 | ||
1392 | static scan_prefetch_ctx_t * | |
1393 | scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag) | |
1394 | { | |
1395 | scan_prefetch_ctx_t *spc; | |
1396 | ||
1397 | spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); | |
424fd7c3 | 1398 | zfs_refcount_create(&spc->spc_refcnt); |
c13060e4 | 1399 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1400 | spc->spc_scn = scn; |
1401 | if (dnp != NULL) { | |
1402 | spc->spc_datablkszsec = dnp->dn_datablkszsec; | |
1403 | spc->spc_indblkshift = dnp->dn_indblkshift; | |
1404 | spc->spc_root = B_FALSE; | |
1405 | } else { | |
1406 | spc->spc_datablkszsec = 0; | |
1407 | spc->spc_indblkshift = 0; | |
1408 | spc->spc_root = B_TRUE; | |
1409 | } | |
1410 | ||
1411 | return (spc); | |
1412 | } | |
1413 | ||
1414 | static void | |
1415 | scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag) | |
1416 | { | |
c13060e4 | 1417 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1418 | } |
1419 | ||
d6496040 TC |
1420 | static void |
1421 | scan_ds_prefetch_queue_clear(dsl_scan_t *scn) | |
1422 | { | |
1423 | spa_t *spa = scn->scn_dp->dp_spa; | |
1424 | void *cookie = NULL; | |
1425 | scan_prefetch_issue_ctx_t *spic = NULL; | |
1426 | ||
1427 | mutex_enter(&spa->spa_scrub_lock); | |
1428 | while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue, | |
1429 | &cookie)) != NULL) { | |
1430 | scan_prefetch_ctx_rele(spic->spic_spc, scn); | |
1431 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1432 | } | |
1433 | mutex_exit(&spa->spa_scrub_lock); | |
1434 | } | |
1435 | ||
d4a72f23 TC |
1436 | static boolean_t |
1437 | dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, | |
1438 | const zbookmark_phys_t *zb) | |
1439 | { | |
1440 | zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; | |
1441 | dnode_phys_t tmp_dnp; | |
1442 | dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; | |
1443 | ||
1444 | if (zb->zb_objset != last_zb->zb_objset) | |
1445 | return (B_TRUE); | |
1446 | if ((int64_t)zb->zb_object < 0) | |
1447 | return (B_FALSE); | |
1448 | ||
1449 | tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; | |
1450 | tmp_dnp.dn_indblkshift = spc->spc_indblkshift; | |
1451 | ||
1452 | if (zbookmark_subtree_completed(dnp, zb, last_zb)) | |
1453 | return (B_TRUE); | |
1454 | ||
1455 | return (B_FALSE); | |
1456 | } | |
1457 | ||
1458 | static void | |
1459 | dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) | |
1460 | { | |
1461 | avl_index_t idx; | |
1462 | dsl_scan_t *scn = spc->spc_scn; | |
1463 | spa_t *spa = scn->scn_dp->dp_spa; | |
1464 | scan_prefetch_issue_ctx_t *spic; | |
1465 | ||
1466 | if (zfs_no_scrub_prefetch) | |
1467 | return; | |
1468 | ||
1469 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || | |
1470 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && | |
1471 | BP_GET_TYPE(bp) != DMU_OT_OBJSET)) | |
1472 | return; | |
1473 | ||
1474 | if (dsl_scan_check_prefetch_resume(spc, zb)) | |
1475 | return; | |
1476 | ||
1477 | scan_prefetch_ctx_add_ref(spc, scn); | |
1478 | spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); | |
1479 | spic->spic_spc = spc; | |
1480 | spic->spic_bp = *bp; | |
1481 | spic->spic_zb = *zb; | |
1482 | ||
1483 | /* | |
1484 | * Add the IO to the queue of blocks to prefetch. This allows us to | |
1485 | * prioritize blocks that we will need first for the main traversal | |
1486 | * thread. | |
1487 | */ | |
1488 | mutex_enter(&spa->spa_scrub_lock); | |
1489 | if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { | |
1490 | /* this block is already queued for prefetch */ | |
1491 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1492 | scan_prefetch_ctx_rele(spc, scn); | |
1493 | mutex_exit(&spa->spa_scrub_lock); | |
1494 | return; | |
1495 | } | |
1496 | ||
1497 | avl_insert(&scn->scn_prefetch_queue, spic, idx); | |
1498 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1499 | mutex_exit(&spa->spa_scrub_lock); | |
1500 | } | |
1501 | ||
1502 | static void | |
1503 | dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, | |
1504 | uint64_t objset, uint64_t object) | |
1505 | { | |
1506 | int i; | |
1507 | zbookmark_phys_t zb; | |
1508 | scan_prefetch_ctx_t *spc; | |
1509 | ||
1510 | if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
1511 | return; | |
1512 | ||
1513 | SET_BOOKMARK(&zb, objset, object, 0, 0); | |
1514 | ||
1515 | spc = scan_prefetch_ctx_create(scn, dnp, FTAG); | |
1516 | ||
1517 | for (i = 0; i < dnp->dn_nblkptr; i++) { | |
1518 | zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); | |
1519 | zb.zb_blkid = i; | |
1520 | dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); | |
1521 | } | |
1522 | ||
1523 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
1524 | zb.zb_level = 0; | |
1525 | zb.zb_blkid = DMU_SPILL_BLKID; | |
1526 | dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb); | |
1527 | } | |
1528 | ||
1529 | scan_prefetch_ctx_rele(spc, FTAG); | |
1530 | } | |
1531 | ||
1532 | void | |
1533 | dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | |
1534 | arc_buf_t *buf, void *private) | |
1535 | { | |
1536 | scan_prefetch_ctx_t *spc = private; | |
1537 | dsl_scan_t *scn = spc->spc_scn; | |
1538 | spa_t *spa = scn->scn_dp->dp_spa; | |
1539 | ||
13a2ff27 | 1540 | /* broadcast that the IO has completed for rate limiting purposes */ |
d4a72f23 TC |
1541 | mutex_enter(&spa->spa_scrub_lock); |
1542 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
1543 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
1544 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1545 | mutex_exit(&spa->spa_scrub_lock); | |
1546 | ||
1547 | /* if there was an error or we are done prefetching, just cleanup */ | |
13a2ff27 | 1548 | if (buf == NULL || scn->scn_prefetch_stop) |
d4a72f23 TC |
1549 | goto out; |
1550 | ||
1551 | if (BP_GET_LEVEL(bp) > 0) { | |
1552 | int i; | |
1553 | blkptr_t *cbp; | |
1554 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
1555 | zbookmark_phys_t czb; | |
1556 | ||
1557 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { | |
1558 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
1559 | zb->zb_level - 1, zb->zb_blkid * epb + i); | |
1560 | dsl_scan_prefetch(spc, cbp, &czb); | |
1561 | } | |
1562 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { | |
1563 | dnode_phys_t *cdnp; | |
1564 | int i; | |
1565 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; | |
1566 | ||
1567 | for (i = 0, cdnp = buf->b_data; i < epb; | |
1568 | i += cdnp->dn_extra_slots + 1, | |
1569 | cdnp += cdnp->dn_extra_slots + 1) { | |
1570 | dsl_scan_prefetch_dnode(scn, cdnp, | |
1571 | zb->zb_objset, zb->zb_blkid * epb + i); | |
1572 | } | |
1573 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { | |
1574 | objset_phys_t *osp = buf->b_data; | |
1575 | ||
1576 | dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, | |
1577 | zb->zb_objset, DMU_META_DNODE_OBJECT); | |
1578 | ||
1579 | if (OBJSET_BUF_HAS_USERUSED(buf)) { | |
1580 | dsl_scan_prefetch_dnode(scn, | |
1581 | &osp->os_groupused_dnode, zb->zb_objset, | |
1582 | DMU_GROUPUSED_OBJECT); | |
1583 | dsl_scan_prefetch_dnode(scn, | |
1584 | &osp->os_userused_dnode, zb->zb_objset, | |
1585 | DMU_USERUSED_OBJECT); | |
1586 | } | |
1587 | } | |
1588 | ||
1589 | out: | |
1590 | if (buf != NULL) | |
1591 | arc_buf_destroy(buf, private); | |
1592 | scan_prefetch_ctx_rele(spc, scn); | |
1593 | } | |
1594 | ||
1595 | /* ARGSUSED */ | |
1596 | static void | |
1597 | dsl_scan_prefetch_thread(void *arg) | |
1598 | { | |
1599 | dsl_scan_t *scn = arg; | |
1600 | spa_t *spa = scn->scn_dp->dp_spa; | |
1601 | scan_prefetch_issue_ctx_t *spic; | |
1602 | ||
1603 | /* loop until we are told to stop */ | |
1604 | while (!scn->scn_prefetch_stop) { | |
1605 | arc_flags_t flags = ARC_FLAG_NOWAIT | | |
1606 | ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; | |
1607 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; | |
1608 | ||
1609 | mutex_enter(&spa->spa_scrub_lock); | |
1610 | ||
1611 | /* | |
1612 | * Wait until we have an IO to issue and are not above our | |
1613 | * maximum in flight limit. | |
1614 | */ | |
1615 | while (!scn->scn_prefetch_stop && | |
1616 | (avl_numnodes(&scn->scn_prefetch_queue) == 0 || | |
1617 | spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { | |
1618 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); | |
1619 | } | |
1620 | ||
1621 | /* recheck if we should stop since we waited for the cv */ | |
1622 | if (scn->scn_prefetch_stop) { | |
1623 | mutex_exit(&spa->spa_scrub_lock); | |
1624 | break; | |
1625 | } | |
1626 | ||
1627 | /* remove the prefetch IO from the tree */ | |
1628 | spic = avl_first(&scn->scn_prefetch_queue); | |
1629 | spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); | |
1630 | avl_remove(&scn->scn_prefetch_queue, spic); | |
1631 | ||
1632 | mutex_exit(&spa->spa_scrub_lock); | |
1633 | ||
1634 | if (BP_IS_PROTECTED(&spic->spic_bp)) { | |
1635 | ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE || | |
1636 | BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET); | |
1637 | ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0); | |
1638 | zio_flags |= ZIO_FLAG_RAW; | |
1639 | } | |
1640 | ||
1641 | /* issue the prefetch asynchronously */ | |
1642 | (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, | |
1643 | &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc, | |
a8b2e306 | 1644 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb); |
428870ff | 1645 | |
d4a72f23 | 1646 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); |
b5256303 TC |
1647 | } |
1648 | ||
d4a72f23 | 1649 | ASSERT(scn->scn_prefetch_stop); |
428870ff | 1650 | |
d4a72f23 TC |
1651 | /* free any prefetches we didn't get to complete */ |
1652 | mutex_enter(&spa->spa_scrub_lock); | |
1653 | while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { | |
1654 | avl_remove(&scn->scn_prefetch_queue, spic); | |
1655 | scan_prefetch_ctx_rele(spic->spic_spc, scn); | |
1656 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1657 | } | |
1658 | ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); | |
1659 | mutex_exit(&spa->spa_scrub_lock); | |
428870ff BB |
1660 | } |
1661 | ||
1662 | static boolean_t | |
1663 | dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, | |
5dbd68a3 | 1664 | const zbookmark_phys_t *zb) |
428870ff BB |
1665 | { |
1666 | /* | |
1667 | * We never skip over user/group accounting objects (obj<0) | |
1668 | */ | |
9ae529ec | 1669 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && |
428870ff BB |
1670 | (int64_t)zb->zb_object >= 0) { |
1671 | /* | |
1672 | * If we already visited this bp & everything below (in | |
1673 | * a prior txg sync), don't bother doing it again. | |
1674 | */ | |
fcff0f35 PD |
1675 | if (zbookmark_subtree_completed(dnp, zb, |
1676 | &scn->scn_phys.scn_bookmark)) | |
428870ff BB |
1677 | return (B_TRUE); |
1678 | ||
1679 | /* | |
1680 | * If we found the block we're trying to resume from, or | |
1681 | * we went past it to a different object, zero it out to | |
0ea05c64 | 1682 | * indicate that it's OK to start checking for suspending |
428870ff BB |
1683 | * again. |
1684 | */ | |
1685 | if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || | |
1686 | zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { | |
1687 | dprintf("resuming at %llx/%llx/%llx/%llx\n", | |
1688 | (longlong_t)zb->zb_objset, | |
1689 | (longlong_t)zb->zb_object, | |
1690 | (longlong_t)zb->zb_level, | |
1691 | (longlong_t)zb->zb_blkid); | |
1692 | bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); | |
1693 | } | |
1694 | } | |
1695 | return (B_FALSE); | |
1696 | } | |
1697 | ||
d4a72f23 TC |
1698 | static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
1699 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, | |
1700 | dmu_objset_type_t ostype, dmu_tx_t *tx); | |
1701 | inline __attribute__((always_inline)) static void dsl_scan_visitdnode( | |
1702 | dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, | |
1703 | dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); | |
1704 | ||
428870ff BB |
1705 | /* |
1706 | * Return nonzero on i/o error. | |
1707 | * Return new buf to write out in *bufp. | |
1708 | */ | |
10be533e | 1709 | inline __attribute__((always_inline)) static int |
428870ff BB |
1710 | dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, |
1711 | dnode_phys_t *dnp, const blkptr_t *bp, | |
ebcf4936 | 1712 | const zbookmark_phys_t *zb, dmu_tx_t *tx) |
428870ff BB |
1713 | { |
1714 | dsl_pool_t *dp = scn->scn_dp; | |
572e2857 | 1715 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; |
428870ff BB |
1716 | int err; |
1717 | ||
1718 | if (BP_GET_LEVEL(bp) > 0) { | |
2a432414 | 1719 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff BB |
1720 | int i; |
1721 | blkptr_t *cbp; | |
1722 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
ebcf4936 | 1723 | arc_buf_t *buf; |
428870ff | 1724 | |
ebcf4936 | 1725 | err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 1726 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
1727 | if (err) { |
1728 | scn->scn_phys.scn_errors++; | |
1729 | return (err); | |
1730 | } | |
ebcf4936 | 1731 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { |
5dbd68a3 | 1732 | zbookmark_phys_t czb; |
428870ff BB |
1733 | |
1734 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
1735 | zb->zb_level - 1, | |
1736 | zb->zb_blkid * epb + i); | |
1737 | dsl_scan_visitbp(cbp, &czb, dnp, | |
ebcf4936 | 1738 | ds, scn, ostype, tx); |
428870ff | 1739 | } |
d3c2ae1c | 1740 | arc_buf_destroy(buf, &buf); |
428870ff | 1741 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { |
2a432414 | 1742 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 1743 | dnode_phys_t *cdnp; |
d4a72f23 | 1744 | int i; |
428870ff | 1745 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; |
ebcf4936 | 1746 | arc_buf_t *buf; |
428870ff | 1747 | |
b5256303 TC |
1748 | if (BP_IS_PROTECTED(bp)) { |
1749 | ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); | |
1750 | zio_flags |= ZIO_FLAG_RAW; | |
1751 | } | |
1752 | ||
ebcf4936 | 1753 | err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 1754 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
1755 | if (err) { |
1756 | scn->scn_phys.scn_errors++; | |
1757 | return (err); | |
1758 | } | |
50c957f7 NB |
1759 | for (i = 0, cdnp = buf->b_data; i < epb; |
1760 | i += cdnp->dn_extra_slots + 1, | |
1761 | cdnp += cdnp->dn_extra_slots + 1) { | |
428870ff | 1762 | dsl_scan_visitdnode(scn, ds, ostype, |
ebcf4936 | 1763 | cdnp, zb->zb_blkid * epb + i, tx); |
428870ff BB |
1764 | } |
1765 | ||
d3c2ae1c | 1766 | arc_buf_destroy(buf, &buf); |
428870ff | 1767 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { |
2a432414 | 1768 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 1769 | objset_phys_t *osp; |
ebcf4936 | 1770 | arc_buf_t *buf; |
428870ff | 1771 | |
ebcf4936 | 1772 | err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 1773 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
1774 | if (err) { |
1775 | scn->scn_phys.scn_errors++; | |
1776 | return (err); | |
1777 | } | |
1778 | ||
ebcf4936 | 1779 | osp = buf->b_data; |
428870ff | 1780 | |
428870ff | 1781 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 1782 | &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); |
428870ff | 1783 | |
ebcf4936 | 1784 | if (OBJSET_BUF_HAS_USERUSED(buf)) { |
428870ff | 1785 | /* |
9c5167d1 | 1786 | * We also always visit user/group/project accounting |
428870ff | 1787 | * objects, and never skip them, even if we are |
d4a72f23 TC |
1788 | * suspending. This is necessary so that the |
1789 | * space deltas from this txg get integrated. | |
428870ff | 1790 | */ |
9c5167d1 NF |
1791 | if (OBJSET_BUF_HAS_PROJECTUSED(buf)) |
1792 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
1793 | &osp->os_projectused_dnode, | |
1794 | DMU_PROJECTUSED_OBJECT, tx); | |
428870ff | 1795 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 1796 | &osp->os_groupused_dnode, |
428870ff BB |
1797 | DMU_GROUPUSED_OBJECT, tx); |
1798 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
ebcf4936 | 1799 | &osp->os_userused_dnode, |
428870ff BB |
1800 | DMU_USERUSED_OBJECT, tx); |
1801 | } | |
d3c2ae1c | 1802 | arc_buf_destroy(buf, &buf); |
428870ff BB |
1803 | } |
1804 | ||
1805 | return (0); | |
1806 | } | |
1807 | ||
10be533e | 1808 | inline __attribute__((always_inline)) static void |
428870ff | 1809 | dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, |
ebcf4936 | 1810 | dmu_objset_type_t ostype, dnode_phys_t *dnp, |
428870ff BB |
1811 | uint64_t object, dmu_tx_t *tx) |
1812 | { | |
1813 | int j; | |
1814 | ||
1815 | for (j = 0; j < dnp->dn_nblkptr; j++) { | |
5dbd68a3 | 1816 | zbookmark_phys_t czb; |
428870ff BB |
1817 | |
1818 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, | |
1819 | dnp->dn_nlevels - 1, j); | |
1820 | dsl_scan_visitbp(&dnp->dn_blkptr[j], | |
ebcf4936 | 1821 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
1822 | } |
1823 | ||
1824 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
5dbd68a3 | 1825 | zbookmark_phys_t czb; |
428870ff BB |
1826 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, |
1827 | 0, DMU_SPILL_BLKID); | |
50c957f7 | 1828 | dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), |
ebcf4936 | 1829 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
1830 | } |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * The arguments are in this order because mdb can only print the | |
1835 | * first 5; we want them to be useful. | |
1836 | */ | |
1837 | static void | |
5dbd68a3 | 1838 | dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
ebcf4936 MA |
1839 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, |
1840 | dmu_objset_type_t ostype, dmu_tx_t *tx) | |
428870ff BB |
1841 | { |
1842 | dsl_pool_t *dp = scn->scn_dp; | |
d4a72f23 | 1843 | blkptr_t *bp_toread = NULL; |
428870ff | 1844 | |
0ea05c64 | 1845 | if (dsl_scan_check_suspend(scn, zb)) |
d4a72f23 | 1846 | return; |
428870ff BB |
1847 | |
1848 | if (dsl_scan_check_resume(scn, dnp, zb)) | |
d4a72f23 | 1849 | return; |
428870ff BB |
1850 | |
1851 | scn->scn_visited_this_txg++; | |
1852 | ||
b81c4ac9 BB |
1853 | /* |
1854 | * This debugging is commented out to conserve stack space. This | |
1855 | * function is called recursively and the debugging addes several | |
1856 | * bytes to the stack for each call. It can be commented back in | |
1857 | * if required to debug an issue in dsl_scan_visitbp(). | |
1858 | * | |
1859 | * dprintf_bp(bp, | |
d4a72f23 TC |
1860 | * "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", |
1861 | * ds, ds ? ds->ds_object : 0, | |
1862 | * zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, | |
1863 | * bp); | |
b81c4ac9 | 1864 | */ |
428870ff | 1865 | |
d4a72f23 TC |
1866 | if (BP_IS_HOLE(bp)) { |
1867 | scn->scn_holes_this_txg++; | |
1868 | return; | |
1869 | } | |
1870 | ||
1871 | if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { | |
1872 | scn->scn_lt_min_this_txg++; | |
1873 | return; | |
1874 | } | |
1875 | ||
1876 | bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); | |
1877 | *bp_toread = *bp; | |
428870ff | 1878 | |
ebcf4936 | 1879 | if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) |
161ce7ce | 1880 | goto out; |
428870ff BB |
1881 | |
1882 | /* | |
4e33ba4c | 1883 | * If dsl_scan_ddt() has already visited this block, it will have |
428870ff BB |
1884 | * already done any translations or scrubbing, so don't call the |
1885 | * callback again. | |
1886 | */ | |
1887 | if (ddt_class_contains(dp->dp_spa, | |
1888 | scn->scn_phys.scn_ddt_class_max, bp)) { | |
d4a72f23 | 1889 | scn->scn_ddt_contained_this_txg++; |
161ce7ce | 1890 | goto out; |
428870ff BB |
1891 | } |
1892 | ||
1893 | /* | |
1894 | * If this block is from the future (after cur_max_txg), then we | |
1895 | * are doing this on behalf of a deleted snapshot, and we will | |
1896 | * revisit the future block on the next pass of this dataset. | |
1897 | * Don't scan it now unless we need to because something | |
1898 | * under it was modified. | |
1899 | */ | |
d4a72f23 TC |
1900 | if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { |
1901 | scn->scn_gt_max_this_txg++; | |
1902 | goto out; | |
428870ff | 1903 | } |
d4a72f23 TC |
1904 | |
1905 | scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); | |
1906 | ||
161ce7ce | 1907 | out: |
d1d7e268 | 1908 | kmem_free(bp_toread, sizeof (blkptr_t)); |
428870ff BB |
1909 | } |
1910 | ||
1911 | static void | |
1912 | dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, | |
1913 | dmu_tx_t *tx) | |
1914 | { | |
5dbd68a3 | 1915 | zbookmark_phys_t zb; |
d4a72f23 | 1916 | scan_prefetch_ctx_t *spc; |
428870ff BB |
1917 | |
1918 | SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, | |
1919 | ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); | |
d4a72f23 TC |
1920 | |
1921 | if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { | |
1922 | SET_BOOKMARK(&scn->scn_prefetch_bookmark, | |
1923 | zb.zb_objset, 0, 0, 0); | |
1924 | } else { | |
1925 | scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; | |
1926 | } | |
1927 | ||
1928 | scn->scn_objsets_visited_this_txg++; | |
1929 | ||
1930 | spc = scan_prefetch_ctx_create(scn, NULL, FTAG); | |
1931 | dsl_scan_prefetch(spc, bp, &zb); | |
1932 | scan_prefetch_ctx_rele(spc, FTAG); | |
1933 | ||
1934 | dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); | |
428870ff BB |
1935 | |
1936 | dprintf_ds(ds, "finished scan%s", ""); | |
1937 | } | |
1938 | ||
d4a72f23 TC |
1939 | static void |
1940 | ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) | |
428870ff | 1941 | { |
d4a72f23 | 1942 | if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { |
0c66c32d | 1943 | if (ds->ds_is_snapshot) { |
b77222c8 MA |
1944 | /* |
1945 | * Note: | |
1946 | * - scn_cur_{min,max}_txg stays the same. | |
1947 | * - Setting the flag is not really necessary if | |
1948 | * scn_cur_max_txg == scn_max_txg, because there | |
1949 | * is nothing after this snapshot that we care | |
1950 | * about. However, we set it anyway and then | |
1951 | * ignore it when we retraverse it in | |
1952 | * dsl_scan_visitds(). | |
1953 | */ | |
d4a72f23 | 1954 | scn_phys->scn_bookmark.zb_objset = |
d683ddbb | 1955 | dsl_dataset_phys(ds)->ds_next_snap_obj; |
428870ff BB |
1956 | zfs_dbgmsg("destroying ds %llu; currently traversing; " |
1957 | "reset zb_objset to %llu", | |
1958 | (u_longlong_t)ds->ds_object, | |
d683ddbb JG |
1959 | (u_longlong_t)dsl_dataset_phys(ds)-> |
1960 | ds_next_snap_obj); | |
d4a72f23 | 1961 | scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; |
428870ff | 1962 | } else { |
d4a72f23 | 1963 | SET_BOOKMARK(&scn_phys->scn_bookmark, |
428870ff BB |
1964 | ZB_DESTROYED_OBJSET, 0, 0, 0); |
1965 | zfs_dbgmsg("destroying ds %llu; currently traversing; " | |
1966 | "reset bookmark to -1,0,0,0", | |
1967 | (u_longlong_t)ds->ds_object); | |
1968 | } | |
d4a72f23 TC |
1969 | } |
1970 | } | |
1971 | ||
1972 | /* | |
1973 | * Invoked when a dataset is destroyed. We need to make sure that: | |
1974 | * | |
1975 | * 1) If it is the dataset that was currently being scanned, we write | |
1976 | * a new dsl_scan_phys_t and marking the objset reference in it | |
1977 | * as destroyed. | |
1978 | * 2) Remove it from the work queue, if it was present. | |
1979 | * | |
1980 | * If the dataset was actually a snapshot, instead of marking the dataset | |
1981 | * as destroyed, we instead substitute the next snapshot in line. | |
1982 | */ | |
1983 | void | |
1984 | dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) | |
1985 | { | |
1986 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
1987 | dsl_scan_t *scn = dp->dp_scan; | |
1988 | uint64_t mintxg; | |
1989 | ||
1990 | if (!dsl_scan_is_running(scn)) | |
1991 | return; | |
1992 | ||
1993 | ds_destroyed_scn_phys(ds, &scn->scn_phys); | |
1994 | ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); | |
1995 | ||
1996 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
1997 | scan_ds_queue_remove(scn, ds->ds_object); | |
1998 | if (ds->ds_is_snapshot) | |
1999 | scan_ds_queue_insert(scn, | |
2000 | dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); | |
2001 | } | |
2002 | ||
2003 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2004 | ds->ds_object, &mintxg) == 0) { | |
d683ddbb | 2005 | ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); |
428870ff BB |
2006 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2007 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
0c66c32d | 2008 | if (ds->ds_is_snapshot) { |
428870ff BB |
2009 | /* |
2010 | * We keep the same mintxg; it could be > | |
2011 | * ds_creation_txg if the previous snapshot was | |
2012 | * deleted too. | |
2013 | */ | |
2014 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
2015 | scn->scn_phys.scn_queue_obj, | |
d683ddbb JG |
2016 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
2017 | mintxg, tx) == 0); | |
428870ff BB |
2018 | zfs_dbgmsg("destroying ds %llu; in queue; " |
2019 | "replacing with %llu", | |
2020 | (u_longlong_t)ds->ds_object, | |
d683ddbb JG |
2021 | (u_longlong_t)dsl_dataset_phys(ds)-> |
2022 | ds_next_snap_obj); | |
428870ff BB |
2023 | } else { |
2024 | zfs_dbgmsg("destroying ds %llu; in queue; removing", | |
2025 | (u_longlong_t)ds->ds_object); | |
2026 | } | |
428870ff BB |
2027 | } |
2028 | ||
2029 | /* | |
2030 | * dsl_scan_sync() should be called after this, and should sync | |
2031 | * out our changed state, but just to be safe, do it here. | |
2032 | */ | |
d4a72f23 TC |
2033 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
2034 | } | |
2035 | ||
2036 | static void | |
2037 | ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) | |
2038 | { | |
2039 | if (scn_bookmark->zb_objset == ds->ds_object) { | |
2040 | scn_bookmark->zb_objset = | |
2041 | dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
2042 | zfs_dbgmsg("snapshotting ds %llu; currently traversing; " | |
2043 | "reset zb_objset to %llu", | |
2044 | (u_longlong_t)ds->ds_object, | |
2045 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); | |
2046 | } | |
428870ff BB |
2047 | } |
2048 | ||
d4a72f23 TC |
2049 | /* |
2050 | * Called when a dataset is snapshotted. If we were currently traversing | |
2051 | * this snapshot, we reset our bookmark to point at the newly created | |
2052 | * snapshot. We also modify our work queue to remove the old snapshot and | |
2053 | * replace with the new one. | |
2054 | */ | |
428870ff BB |
2055 | void |
2056 | dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) | |
2057 | { | |
2058 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
2059 | dsl_scan_t *scn = dp->dp_scan; | |
2060 | uint64_t mintxg; | |
2061 | ||
d4a72f23 | 2062 | if (!dsl_scan_is_running(scn)) |
428870ff BB |
2063 | return; |
2064 | ||
d683ddbb | 2065 | ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); |
428870ff | 2066 | |
d4a72f23 TC |
2067 | ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); |
2068 | ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); | |
2069 | ||
2070 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
2071 | scan_ds_queue_remove(scn, ds->ds_object); | |
2072 | scan_ds_queue_insert(scn, | |
2073 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); | |
2074 | } | |
2075 | ||
2076 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2077 | ds->ds_object, &mintxg) == 0) { | |
428870ff BB |
2078 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2079 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
2080 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
2081 | scn->scn_phys.scn_queue_obj, | |
d683ddbb | 2082 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); |
428870ff BB |
2083 | zfs_dbgmsg("snapshotting ds %llu; in queue; " |
2084 | "replacing with %llu", | |
2085 | (u_longlong_t)ds->ds_object, | |
d683ddbb | 2086 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); |
428870ff | 2087 | } |
d4a72f23 TC |
2088 | |
2089 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); | |
428870ff BB |
2090 | } |
2091 | ||
d4a72f23 TC |
2092 | static void |
2093 | ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, | |
2094 | zbookmark_phys_t *scn_bookmark) | |
428870ff | 2095 | { |
d4a72f23 TC |
2096 | if (scn_bookmark->zb_objset == ds1->ds_object) { |
2097 | scn_bookmark->zb_objset = ds2->ds_object; | |
428870ff BB |
2098 | zfs_dbgmsg("clone_swap ds %llu; currently traversing; " |
2099 | "reset zb_objset to %llu", | |
2100 | (u_longlong_t)ds1->ds_object, | |
2101 | (u_longlong_t)ds2->ds_object); | |
d4a72f23 TC |
2102 | } else if (scn_bookmark->zb_objset == ds2->ds_object) { |
2103 | scn_bookmark->zb_objset = ds1->ds_object; | |
428870ff BB |
2104 | zfs_dbgmsg("clone_swap ds %llu; currently traversing; " |
2105 | "reset zb_objset to %llu", | |
2106 | (u_longlong_t)ds2->ds_object, | |
2107 | (u_longlong_t)ds1->ds_object); | |
2108 | } | |
d4a72f23 TC |
2109 | } |
2110 | ||
2111 | /* | |
2112 | * Called when a parent dataset and its clone are swapped. If we were | |
2113 | * currently traversing the dataset, we need to switch to traversing the | |
2114 | * newly promoted parent. | |
2115 | */ | |
2116 | void | |
2117 | dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) | |
2118 | { | |
2119 | dsl_pool_t *dp = ds1->ds_dir->dd_pool; | |
2120 | dsl_scan_t *scn = dp->dp_scan; | |
2121 | uint64_t mintxg; | |
2122 | ||
2123 | if (!dsl_scan_is_running(scn)) | |
2124 | return; | |
2125 | ||
2126 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); | |
2127 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); | |
2128 | ||
2129 | if (scan_ds_queue_contains(scn, ds1->ds_object, &mintxg)) { | |
2130 | scan_ds_queue_remove(scn, ds1->ds_object); | |
2131 | scan_ds_queue_insert(scn, ds2->ds_object, mintxg); | |
2132 | } | |
2133 | if (scan_ds_queue_contains(scn, ds2->ds_object, &mintxg)) { | |
2134 | scan_ds_queue_remove(scn, ds2->ds_object); | |
2135 | scan_ds_queue_insert(scn, ds1->ds_object, mintxg); | |
2136 | } | |
428870ff BB |
2137 | |
2138 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2139 | ds1->ds_object, &mintxg) == 0) { | |
2140 | int err; | |
d683ddbb JG |
2141 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); |
2142 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
428870ff BB |
2143 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2144 | scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); | |
2145 | err = zap_add_int_key(dp->dp_meta_objset, | |
2146 | scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); | |
2147 | VERIFY(err == 0 || err == EEXIST); | |
2148 | if (err == EEXIST) { | |
2149 | /* Both were there to begin with */ | |
2150 | VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, | |
2151 | scn->scn_phys.scn_queue_obj, | |
2152 | ds1->ds_object, mintxg, tx)); | |
2153 | } | |
2154 | zfs_dbgmsg("clone_swap ds %llu; in queue; " | |
2155 | "replacing with %llu", | |
2156 | (u_longlong_t)ds1->ds_object, | |
2157 | (u_longlong_t)ds2->ds_object); | |
d4a72f23 TC |
2158 | } |
2159 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2160 | ds2->ds_object, &mintxg) == 0) { | |
d683ddbb JG |
2161 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); |
2162 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
428870ff BB |
2163 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2164 | scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); | |
2165 | VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, | |
2166 | scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); | |
2167 | zfs_dbgmsg("clone_swap ds %llu; in queue; " | |
2168 | "replacing with %llu", | |
2169 | (u_longlong_t)ds2->ds_object, | |
2170 | (u_longlong_t)ds1->ds_object); | |
2171 | } | |
2172 | ||
d4a72f23 | 2173 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
428870ff BB |
2174 | } |
2175 | ||
428870ff BB |
2176 | /* ARGSUSED */ |
2177 | static int | |
13fe0198 | 2178 | enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2179 | { |
d4a72f23 | 2180 | uint64_t originobj = *(uint64_t *)arg; |
428870ff BB |
2181 | dsl_dataset_t *ds; |
2182 | int err; | |
428870ff BB |
2183 | dsl_scan_t *scn = dp->dp_scan; |
2184 | ||
d4a72f23 | 2185 | if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) |
13fe0198 MA |
2186 | return (0); |
2187 | ||
2188 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); | |
428870ff BB |
2189 | if (err) |
2190 | return (err); | |
2191 | ||
d4a72f23 | 2192 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { |
13fe0198 MA |
2193 | dsl_dataset_t *prev; |
2194 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 2195 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); |
428870ff | 2196 | |
13fe0198 MA |
2197 | dsl_dataset_rele(ds, FTAG); |
2198 | if (err) | |
2199 | return (err); | |
2200 | ds = prev; | |
428870ff | 2201 | } |
d4a72f23 TC |
2202 | scan_ds_queue_insert(scn, ds->ds_object, |
2203 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2204 | dsl_dataset_rele(ds, FTAG); |
2205 | return (0); | |
2206 | } | |
2207 | ||
2208 | static void | |
2209 | dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) | |
2210 | { | |
2211 | dsl_pool_t *dp = scn->scn_dp; | |
2212 | dsl_dataset_t *ds; | |
2213 | ||
2214 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); | |
2215 | ||
b77222c8 MA |
2216 | if (scn->scn_phys.scn_cur_min_txg >= |
2217 | scn->scn_phys.scn_max_txg) { | |
2218 | /* | |
2219 | * This can happen if this snapshot was created after the | |
2220 | * scan started, and we already completed a previous snapshot | |
2221 | * that was created after the scan started. This snapshot | |
2222 | * only references blocks with: | |
2223 | * | |
2224 | * birth < our ds_creation_txg | |
2225 | * cur_min_txg is no less than ds_creation_txg. | |
2226 | * We have already visited these blocks. | |
2227 | * or | |
2228 | * birth > scn_max_txg | |
2229 | * The scan requested not to visit these blocks. | |
2230 | * | |
2231 | * Subsequent snapshots (and clones) can reference our | |
2232 | * blocks, or blocks with even higher birth times. | |
2233 | * Therefore we do not need to visit them either, | |
2234 | * so we do not add them to the work queue. | |
2235 | * | |
2236 | * Note that checking for cur_min_txg >= cur_max_txg | |
2237 | * is not sufficient, because in that case we may need to | |
2238 | * visit subsequent snapshots. This happens when min_txg > 0, | |
2239 | * which raises cur_min_txg. In this case we will visit | |
2240 | * this dataset but skip all of its blocks, because the | |
2241 | * rootbp's birth time is < cur_min_txg. Then we will | |
2242 | * add the next snapshots/clones to the work queue. | |
2243 | */ | |
eca7b760 | 2244 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
b77222c8 MA |
2245 | dsl_dataset_name(ds, dsname); |
2246 | zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " | |
2247 | "cur_min_txg (%llu) >= max_txg (%llu)", | |
d4a72f23 TC |
2248 | (longlong_t)dsobj, dsname, |
2249 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2250 | (longlong_t)scn->scn_phys.scn_max_txg); | |
b77222c8 MA |
2251 | kmem_free(dsname, MAXNAMELEN); |
2252 | ||
2253 | goto out; | |
2254 | } | |
2255 | ||
572e2857 | 2256 | /* |
a1d477c2 | 2257 | * Only the ZIL in the head (non-snapshot) is valid. Even though |
572e2857 | 2258 | * snapshots can have ZIL block pointers (which may be the same |
a1d477c2 MA |
2259 | * BP as in the head), they must be ignored. In addition, $ORIGIN |
2260 | * doesn't have a objset (i.e. its ds_bp is a hole) so we don't | |
2261 | * need to look for a ZIL in it either. So we traverse the ZIL here, | |
2262 | * rather than in scan_recurse(), because the regular snapshot | |
2263 | * block-sharing rules don't apply to it. | |
572e2857 | 2264 | */ |
a1d477c2 | 2265 | if (!dsl_dataset_is_snapshot(ds) && |
5e097c67 MA |
2266 | (dp->dp_origin_snap == NULL || |
2267 | ds->ds_dir != dp->dp_origin_snap->ds_dir)) { | |
a1d477c2 MA |
2268 | objset_t *os; |
2269 | if (dmu_objset_from_ds(ds, &os) != 0) { | |
2270 | goto out; | |
2271 | } | |
572e2857 | 2272 | dsl_scan_zil(dp, &os->os_zil_header); |
a1d477c2 | 2273 | } |
572e2857 | 2274 | |
428870ff BB |
2275 | /* |
2276 | * Iterate over the bps in this ds. | |
2277 | */ | |
2278 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
cc9bb3e5 | 2279 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 2280 | dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); |
cc9bb3e5 | 2281 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
428870ff | 2282 | |
1c27024e | 2283 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
428870ff BB |
2284 | dsl_dataset_name(ds, dsname); |
2285 | zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " | |
0ea05c64 | 2286 | "suspending=%u", |
428870ff BB |
2287 | (longlong_t)dsobj, dsname, |
2288 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2289 | (longlong_t)scn->scn_phys.scn_cur_max_txg, | |
0ea05c64 | 2290 | (int)scn->scn_suspending); |
eca7b760 | 2291 | kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); |
428870ff | 2292 | |
0ea05c64 | 2293 | if (scn->scn_suspending) |
428870ff BB |
2294 | goto out; |
2295 | ||
2296 | /* | |
2297 | * We've finished this pass over this dataset. | |
2298 | */ | |
2299 | ||
2300 | /* | |
2301 | * If we did not completely visit this dataset, do another pass. | |
2302 | */ | |
2303 | if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { | |
2304 | zfs_dbgmsg("incomplete pass; visiting again"); | |
2305 | scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; | |
d4a72f23 TC |
2306 | scan_ds_queue_insert(scn, ds->ds_object, |
2307 | scn->scn_phys.scn_cur_max_txg); | |
428870ff BB |
2308 | goto out; |
2309 | } | |
2310 | ||
2311 | /* | |
13a2ff27 | 2312 | * Add descendant datasets to work queue. |
428870ff | 2313 | */ |
d683ddbb | 2314 | if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { |
d4a72f23 | 2315 | scan_ds_queue_insert(scn, |
d683ddbb | 2316 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
d4a72f23 | 2317 | dsl_dataset_phys(ds)->ds_creation_txg); |
428870ff | 2318 | } |
d683ddbb | 2319 | if (dsl_dataset_phys(ds)->ds_num_children > 1) { |
428870ff | 2320 | boolean_t usenext = B_FALSE; |
d683ddbb | 2321 | if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { |
428870ff BB |
2322 | uint64_t count; |
2323 | /* | |
2324 | * A bug in a previous version of the code could | |
2325 | * cause upgrade_clones_cb() to not set | |
2326 | * ds_next_snap_obj when it should, leading to a | |
2327 | * missing entry. Therefore we can only use the | |
2328 | * next_clones_obj when its count is correct. | |
2329 | */ | |
2330 | int err = zap_count(dp->dp_meta_objset, | |
d683ddbb | 2331 | dsl_dataset_phys(ds)->ds_next_clones_obj, &count); |
428870ff | 2332 | if (err == 0 && |
d683ddbb | 2333 | count == dsl_dataset_phys(ds)->ds_num_children - 1) |
428870ff BB |
2334 | usenext = B_TRUE; |
2335 | } | |
2336 | ||
2337 | if (usenext) { | |
d4a72f23 TC |
2338 | zap_cursor_t zc; |
2339 | zap_attribute_t za; | |
2340 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
2341 | dsl_dataset_phys(ds)->ds_next_clones_obj); | |
2342 | zap_cursor_retrieve(&zc, &za) == 0; | |
2343 | (void) zap_cursor_advance(&zc)) { | |
2344 | scan_ds_queue_insert(scn, | |
2345 | zfs_strtonum(za.za_name, NULL), | |
2346 | dsl_dataset_phys(ds)->ds_creation_txg); | |
2347 | } | |
2348 | zap_cursor_fini(&zc); | |
428870ff | 2349 | } else { |
13fe0198 | 2350 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 TC |
2351 | enqueue_clones_cb, &ds->ds_object, |
2352 | DS_FIND_CHILDREN)); | |
428870ff BB |
2353 | } |
2354 | } | |
2355 | ||
2356 | out: | |
2357 | dsl_dataset_rele(ds, FTAG); | |
2358 | } | |
2359 | ||
2360 | /* ARGSUSED */ | |
2361 | static int | |
13fe0198 | 2362 | enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2363 | { |
428870ff BB |
2364 | dsl_dataset_t *ds; |
2365 | int err; | |
428870ff BB |
2366 | dsl_scan_t *scn = dp->dp_scan; |
2367 | ||
13fe0198 | 2368 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
428870ff BB |
2369 | if (err) |
2370 | return (err); | |
2371 | ||
d683ddbb | 2372 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
428870ff | 2373 | dsl_dataset_t *prev; |
d683ddbb JG |
2374 | err = dsl_dataset_hold_obj(dp, |
2375 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
428870ff BB |
2376 | if (err) { |
2377 | dsl_dataset_rele(ds, FTAG); | |
2378 | return (err); | |
2379 | } | |
2380 | ||
2381 | /* | |
2382 | * If this is a clone, we don't need to worry about it for now. | |
2383 | */ | |
d683ddbb | 2384 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { |
428870ff BB |
2385 | dsl_dataset_rele(ds, FTAG); |
2386 | dsl_dataset_rele(prev, FTAG); | |
2387 | return (0); | |
2388 | } | |
2389 | dsl_dataset_rele(ds, FTAG); | |
2390 | ds = prev; | |
2391 | } | |
2392 | ||
d4a72f23 TC |
2393 | scan_ds_queue_insert(scn, ds->ds_object, |
2394 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2395 | dsl_dataset_rele(ds, FTAG); |
2396 | return (0); | |
2397 | } | |
2398 | ||
d4a72f23 TC |
2399 | /* ARGSUSED */ |
2400 | void | |
2401 | dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, | |
2402 | ddt_entry_t *dde, dmu_tx_t *tx) | |
2403 | { | |
2404 | const ddt_key_t *ddk = &dde->dde_key; | |
2405 | ddt_phys_t *ddp = dde->dde_phys; | |
2406 | blkptr_t bp; | |
2407 | zbookmark_phys_t zb = { 0 }; | |
2408 | int p; | |
2409 | ||
f90a30ad | 2410 | if (!dsl_scan_is_running(scn)) |
d4a72f23 TC |
2411 | return; |
2412 | ||
5e0bd0ae TC |
2413 | /* |
2414 | * This function is special because it is the only thing | |
2415 | * that can add scan_io_t's to the vdev scan queues from | |
2416 | * outside dsl_scan_sync(). For the most part this is ok | |
2417 | * as long as it is called from within syncing context. | |
2418 | * However, dsl_scan_sync() expects that no new sio's will | |
2419 | * be added between when all the work for a scan is done | |
2420 | * and the next txg when the scan is actually marked as | |
2421 | * completed. This check ensures we do not issue new sio's | |
2422 | * during this period. | |
2423 | */ | |
2424 | if (scn->scn_done_txg != 0) | |
2425 | return; | |
2426 | ||
d4a72f23 TC |
2427 | for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { |
2428 | if (ddp->ddp_phys_birth == 0 || | |
2429 | ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) | |
2430 | continue; | |
2431 | ddt_bp_create(checksum, ddk, ddp, &bp); | |
2432 | ||
2433 | scn->scn_visited_this_txg++; | |
2434 | scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); | |
2435 | } | |
2436 | } | |
2437 | ||
428870ff BB |
2438 | /* |
2439 | * Scrub/dedup interaction. | |
2440 | * | |
2441 | * If there are N references to a deduped block, we don't want to scrub it | |
2442 | * N times -- ideally, we should scrub it exactly once. | |
2443 | * | |
2444 | * We leverage the fact that the dde's replication class (enum ddt_class) | |
2445 | * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest | |
2446 | * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. | |
2447 | * | |
2448 | * To prevent excess scrubbing, the scrub begins by walking the DDT | |
2449 | * to find all blocks with refcnt > 1, and scrubs each of these once. | |
2450 | * Since there are two replication classes which contain blocks with | |
2451 | * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. | |
2452 | * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. | |
2453 | * | |
2454 | * There would be nothing more to say if a block's refcnt couldn't change | |
2455 | * during a scrub, but of course it can so we must account for changes | |
2456 | * in a block's replication class. | |
2457 | * | |
2458 | * Here's an example of what can occur: | |
2459 | * | |
2460 | * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 | |
2461 | * when visited during the top-down scrub phase, it will be scrubbed twice. | |
2462 | * This negates our scrub optimization, but is otherwise harmless. | |
2463 | * | |
2464 | * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 | |
2465 | * on each visit during the top-down scrub phase, it will never be scrubbed. | |
2466 | * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's | |
2467 | * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to | |
2468 | * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 | |
2469 | * while a scrub is in progress, it scrubs the block right then. | |
2470 | */ | |
2471 | static void | |
2472 | dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) | |
2473 | { | |
2474 | ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; | |
2598c001 | 2475 | ddt_entry_t dde; |
428870ff BB |
2476 | int error; |
2477 | uint64_t n = 0; | |
2478 | ||
2598c001 BB |
2479 | bzero(&dde, sizeof (ddt_entry_t)); |
2480 | ||
428870ff BB |
2481 | while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { |
2482 | ddt_t *ddt; | |
2483 | ||
2484 | if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) | |
2485 | break; | |
2486 | dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", | |
2487 | (longlong_t)ddb->ddb_class, | |
2488 | (longlong_t)ddb->ddb_type, | |
2489 | (longlong_t)ddb->ddb_checksum, | |
2490 | (longlong_t)ddb->ddb_cursor); | |
2491 | ||
2492 | /* There should be no pending changes to the dedup table */ | |
2493 | ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; | |
2494 | ASSERT(avl_first(&ddt->ddt_tree) == NULL); | |
2495 | ||
2496 | dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); | |
2497 | n++; | |
2498 | ||
0ea05c64 | 2499 | if (dsl_scan_check_suspend(scn, NULL)) |
428870ff BB |
2500 | break; |
2501 | } | |
2502 | ||
0ea05c64 AP |
2503 | zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; " |
2504 | "suspending=%u", (longlong_t)n, | |
2505 | (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); | |
428870ff BB |
2506 | |
2507 | ASSERT(error == 0 || error == ENOENT); | |
2508 | ASSERT(error != ENOENT || | |
2509 | ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); | |
2510 | } | |
2511 | ||
d4a72f23 TC |
2512 | static uint64_t |
2513 | dsl_scan_ds_maxtxg(dsl_dataset_t *ds) | |
428870ff | 2514 | { |
d4a72f23 TC |
2515 | uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; |
2516 | if (ds->ds_is_snapshot) | |
2517 | return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); | |
2518 | return (smt); | |
428870ff BB |
2519 | } |
2520 | ||
2521 | static void | |
2522 | dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) | |
2523 | { | |
d4a72f23 | 2524 | scan_ds_t *sds; |
428870ff | 2525 | dsl_pool_t *dp = scn->scn_dp; |
428870ff BB |
2526 | |
2527 | if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= | |
2528 | scn->scn_phys.scn_ddt_class_max) { | |
2529 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
2530 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
2531 | dsl_scan_ddt(scn, tx); | |
0ea05c64 | 2532 | if (scn->scn_suspending) |
428870ff BB |
2533 | return; |
2534 | } | |
2535 | ||
2536 | if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { | |
2537 | /* First do the MOS & ORIGIN */ | |
2538 | ||
2539 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
2540 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
2541 | dsl_scan_visit_rootbp(scn, NULL, | |
2542 | &dp->dp_meta_rootbp, tx); | |
2543 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
0ea05c64 | 2544 | if (scn->scn_suspending) |
428870ff BB |
2545 | return; |
2546 | ||
2547 | if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { | |
13fe0198 | 2548 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 | 2549 | enqueue_cb, NULL, DS_FIND_CHILDREN)); |
428870ff BB |
2550 | } else { |
2551 | dsl_scan_visitds(scn, | |
2552 | dp->dp_origin_snap->ds_object, tx); | |
2553 | } | |
0ea05c64 | 2554 | ASSERT(!scn->scn_suspending); |
428870ff BB |
2555 | } else if (scn->scn_phys.scn_bookmark.zb_objset != |
2556 | ZB_DESTROYED_OBJSET) { | |
d4a72f23 | 2557 | uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; |
428870ff | 2558 | /* |
d4a72f23 | 2559 | * If we were suspended, continue from here. Note if the |
0ea05c64 | 2560 | * ds we were suspended on was deleted, the zb_objset may |
428870ff BB |
2561 | * be -1, so we will skip this and find a new objset |
2562 | * below. | |
2563 | */ | |
d4a72f23 | 2564 | dsl_scan_visitds(scn, dsobj, tx); |
0ea05c64 | 2565 | if (scn->scn_suspending) |
428870ff BB |
2566 | return; |
2567 | } | |
2568 | ||
2569 | /* | |
d4a72f23 | 2570 | * In case we suspended right at the end of the ds, zero the |
428870ff BB |
2571 | * bookmark so we don't think that we're still trying to resume. |
2572 | */ | |
5dbd68a3 | 2573 | bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); |
428870ff | 2574 | |
d4a72f23 TC |
2575 | /* |
2576 | * Keep pulling things out of the dataset avl queue. Updates to the | |
2577 | * persistent zap-object-as-queue happen only at checkpoints. | |
2578 | */ | |
2579 | while ((sds = avl_first(&scn->scn_queue)) != NULL) { | |
428870ff | 2580 | dsl_dataset_t *ds; |
d4a72f23 TC |
2581 | uint64_t dsobj = sds->sds_dsobj; |
2582 | uint64_t txg = sds->sds_txg; | |
428870ff | 2583 | |
d4a72f23 TC |
2584 | /* dequeue and free the ds from the queue */ |
2585 | scan_ds_queue_remove(scn, dsobj); | |
2586 | sds = NULL; | |
428870ff | 2587 | |
d4a72f23 | 2588 | /* set up min / max txg */ |
428870ff | 2589 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
d4a72f23 | 2590 | if (txg != 0) { |
428870ff | 2591 | scn->scn_phys.scn_cur_min_txg = |
d4a72f23 | 2592 | MAX(scn->scn_phys.scn_min_txg, txg); |
428870ff BB |
2593 | } else { |
2594 | scn->scn_phys.scn_cur_min_txg = | |
2595 | MAX(scn->scn_phys.scn_min_txg, | |
d683ddbb | 2596 | dsl_dataset_phys(ds)->ds_prev_snap_txg); |
428870ff BB |
2597 | } |
2598 | scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); | |
2599 | dsl_dataset_rele(ds, FTAG); | |
2600 | ||
2601 | dsl_scan_visitds(scn, dsobj, tx); | |
0ea05c64 | 2602 | if (scn->scn_suspending) |
d4a72f23 | 2603 | return; |
428870ff | 2604 | } |
d4a72f23 TC |
2605 | |
2606 | /* No more objsets to fetch, we're done */ | |
2607 | scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; | |
2608 | ASSERT0(scn->scn_suspending); | |
2609 | } | |
2610 | ||
2611 | static uint64_t | |
2612 | dsl_scan_count_leaves(vdev_t *vd) | |
2613 | { | |
2614 | uint64_t i, leaves = 0; | |
2615 | ||
2616 | /* we only count leaves that belong to the main pool and are readable */ | |
2617 | if (vd->vdev_islog || vd->vdev_isspare || | |
2618 | vd->vdev_isl2cache || !vdev_readable(vd)) | |
2619 | return (0); | |
2620 | ||
2621 | if (vd->vdev_ops->vdev_op_leaf) | |
2622 | return (1); | |
2623 | ||
2624 | for (i = 0; i < vd->vdev_children; i++) { | |
2625 | leaves += dsl_scan_count_leaves(vd->vdev_child[i]); | |
2626 | } | |
2627 | ||
2628 | return (leaves); | |
2629 | } | |
2630 | ||
2631 | static void | |
2632 | scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) | |
2633 | { | |
2634 | int i; | |
2635 | uint64_t cur_size = 0; | |
2636 | ||
2637 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
2638 | cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); | |
2639 | } | |
2640 | ||
2641 | q->q_total_zio_size_this_txg += cur_size; | |
2642 | q->q_zios_this_txg++; | |
2643 | } | |
2644 | ||
2645 | static void | |
2646 | scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, | |
2647 | uint64_t end) | |
2648 | { | |
2649 | q->q_total_seg_size_this_txg += end - start; | |
2650 | q->q_segs_this_txg++; | |
2651 | } | |
2652 | ||
2653 | static boolean_t | |
2654 | scan_io_queue_check_suspend(dsl_scan_t *scn) | |
2655 | { | |
2656 | /* See comment in dsl_scan_check_suspend() */ | |
2657 | uint64_t curr_time_ns = gethrtime(); | |
2658 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
2659 | uint64_t sync_time_ns = curr_time_ns - | |
2660 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
2661 | int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; | |
2662 | int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? | |
2663 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; | |
2664 | ||
2665 | return ((NSEC2MSEC(scan_time_ns) > mintime && | |
2666 | (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || | |
2667 | txg_sync_waiting(scn->scn_dp) || | |
2668 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
2669 | spa_shutting_down(scn->scn_dp->dp_spa)); | |
2670 | } | |
2671 | ||
2672 | /* | |
13a2ff27 | 2673 | * Given a list of scan_io_t's in io_list, this issues the I/Os out to |
d4a72f23 TC |
2674 | * disk. This consumes the io_list and frees the scan_io_t's. This is |
2675 | * called when emptying queues, either when we're up against the memory | |
2676 | * limit or when we have finished scanning. Returns B_TRUE if we stopped | |
13a2ff27 | 2677 | * processing the list before we finished. Any sios that were not issued |
d4a72f23 TC |
2678 | * will remain in the io_list. |
2679 | */ | |
2680 | static boolean_t | |
2681 | scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) | |
2682 | { | |
2683 | dsl_scan_t *scn = queue->q_scn; | |
2684 | scan_io_t *sio; | |
2685 | int64_t bytes_issued = 0; | |
2686 | boolean_t suspended = B_FALSE; | |
2687 | ||
2688 | while ((sio = list_head(io_list)) != NULL) { | |
2689 | blkptr_t bp; | |
2690 | ||
2691 | if (scan_io_queue_check_suspend(scn)) { | |
2692 | suspended = B_TRUE; | |
2693 | break; | |
2694 | } | |
2695 | ||
2696 | sio2bp(sio, &bp, queue->q_vd->vdev_id); | |
2697 | bytes_issued += sio->sio_asize; | |
2698 | scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, | |
2699 | &sio->sio_zb, queue); | |
2700 | (void) list_remove_head(io_list); | |
2701 | scan_io_queues_update_zio_stats(queue, &bp); | |
2702 | kmem_cache_free(sio_cache, sio); | |
2703 | } | |
2704 | ||
2705 | atomic_add_64(&scn->scn_bytes_pending, -bytes_issued); | |
2706 | ||
2707 | return (suspended); | |
2708 | } | |
2709 | ||
2710 | /* | |
2711 | * This function removes sios from an IO queue which reside within a given | |
2712 | * range_seg_t and inserts them (in offset order) into a list. Note that | |
2713 | * we only ever return a maximum of 32 sios at once. If there are more sios | |
2714 | * to process within this segment that did not make it onto the list we | |
2715 | * return B_TRUE and otherwise B_FALSE. | |
2716 | */ | |
2717 | static boolean_t | |
2718 | scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) | |
2719 | { | |
2720 | scan_io_t srch_sio, *sio, *next_sio; | |
2721 | avl_index_t idx; | |
2722 | uint_t num_sios = 0; | |
2723 | int64_t bytes_issued = 0; | |
2724 | ||
2725 | ASSERT(rs != NULL); | |
2726 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
2727 | ||
2728 | srch_sio.sio_offset = rs->rs_start; | |
2729 | ||
2730 | /* | |
2731 | * The exact start of the extent might not contain any matching zios, | |
2732 | * so if that's the case, examine the next one in the tree. | |
2733 | */ | |
2734 | sio = avl_find(&queue->q_sios_by_addr, &srch_sio, &idx); | |
2735 | if (sio == NULL) | |
2736 | sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); | |
2737 | ||
2738 | while (sio != NULL && sio->sio_offset < rs->rs_end && num_sios <= 32) { | |
2739 | ASSERT3U(sio->sio_offset, >=, rs->rs_start); | |
2740 | ASSERT3U(sio->sio_offset + sio->sio_asize, <=, rs->rs_end); | |
2741 | ||
2742 | next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); | |
2743 | avl_remove(&queue->q_sios_by_addr, sio); | |
2744 | ||
2745 | bytes_issued += sio->sio_asize; | |
2746 | num_sios++; | |
2747 | list_insert_tail(list, sio); | |
2748 | sio = next_sio; | |
2749 | } | |
2750 | ||
2751 | /* | |
2752 | * We limit the number of sios we process at once to 32 to avoid | |
2753 | * biting off more than we can chew. If we didn't take everything | |
2754 | * in the segment we update it to reflect the work we were able to | |
2755 | * complete. Otherwise, we remove it from the range tree entirely. | |
2756 | */ | |
2757 | if (sio != NULL && sio->sio_offset < rs->rs_end) { | |
2758 | range_tree_adjust_fill(queue->q_exts_by_addr, rs, | |
2759 | -bytes_issued); | |
2760 | range_tree_resize_segment(queue->q_exts_by_addr, rs, | |
2761 | sio->sio_offset, rs->rs_end - sio->sio_offset); | |
2762 | ||
2763 | return (B_TRUE); | |
2764 | } else { | |
2765 | range_tree_remove(queue->q_exts_by_addr, rs->rs_start, | |
2766 | rs->rs_end - rs->rs_start); | |
2767 | return (B_FALSE); | |
2768 | } | |
2769 | } | |
2770 | ||
2771 | /* | |
2772 | * This is called from the queue emptying thread and selects the next | |
13a2ff27 | 2773 | * extent from which we are to issue I/Os. The behavior of this function |
d4a72f23 TC |
2774 | * depends on the state of the scan, the current memory consumption and |
2775 | * whether or not we are performing a scan shutdown. | |
2776 | * 1) We select extents in an elevator algorithm (LBA-order) if the scan | |
2777 | * needs to perform a checkpoint | |
2778 | * 2) We select the largest available extent if we are up against the | |
2779 | * memory limit. | |
2780 | * 3) Otherwise we don't select any extents. | |
2781 | */ | |
2782 | static range_seg_t * | |
2783 | scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) | |
2784 | { | |
2785 | dsl_scan_t *scn = queue->q_scn; | |
2786 | ||
2787 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
2788 | ASSERT(scn->scn_is_sorted); | |
2789 | ||
2790 | /* handle tunable overrides */ | |
2791 | if (scn->scn_checkpointing || scn->scn_clearing) { | |
2792 | if (zfs_scan_issue_strategy == 1) { | |
2793 | return (range_tree_first(queue->q_exts_by_addr)); | |
2794 | } else if (zfs_scan_issue_strategy == 2) { | |
2795 | return (avl_first(&queue->q_exts_by_size)); | |
2796 | } | |
2797 | } | |
2798 | ||
2799 | /* | |
2800 | * During normal clearing, we want to issue our largest segments | |
2801 | * first, keeping IO as sequential as possible, and leaving the | |
2802 | * smaller extents for later with the hope that they might eventually | |
2803 | * grow to larger sequential segments. However, when the scan is | |
2804 | * checkpointing, no new extents will be added to the sorting queue, | |
2805 | * so the way we are sorted now is as good as it will ever get. | |
2806 | * In this case, we instead switch to issuing extents in LBA order. | |
2807 | */ | |
2808 | if (scn->scn_checkpointing) { | |
2809 | return (range_tree_first(queue->q_exts_by_addr)); | |
2810 | } else if (scn->scn_clearing) { | |
2811 | return (avl_first(&queue->q_exts_by_size)); | |
2812 | } else { | |
2813 | return (NULL); | |
2814 | } | |
2815 | } | |
2816 | ||
2817 | static void | |
2818 | scan_io_queues_run_one(void *arg) | |
2819 | { | |
2820 | dsl_scan_io_queue_t *queue = arg; | |
2821 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
2822 | boolean_t suspended = B_FALSE; | |
2823 | range_seg_t *rs = NULL; | |
2824 | scan_io_t *sio = NULL; | |
2825 | list_t sio_list; | |
2826 | uint64_t bytes_per_leaf = zfs_scan_vdev_limit; | |
2827 | uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd); | |
2828 | ||
2829 | ASSERT(queue->q_scn->scn_is_sorted); | |
2830 | ||
2831 | list_create(&sio_list, sizeof (scan_io_t), | |
2832 | offsetof(scan_io_t, sio_nodes.sio_list_node)); | |
2833 | mutex_enter(q_lock); | |
2834 | ||
2835 | /* calculate maximum in-flight bytes for this txg (min 1MB) */ | |
2836 | queue->q_maxinflight_bytes = | |
2837 | MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); | |
2838 | ||
2839 | /* reset per-queue scan statistics for this txg */ | |
2840 | queue->q_total_seg_size_this_txg = 0; | |
2841 | queue->q_segs_this_txg = 0; | |
2842 | queue->q_total_zio_size_this_txg = 0; | |
2843 | queue->q_zios_this_txg = 0; | |
2844 | ||
2845 | /* loop until we run out of time or sios */ | |
2846 | while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) { | |
2847 | uint64_t seg_start = 0, seg_end = 0; | |
2848 | boolean_t more_left = B_TRUE; | |
2849 | ||
2850 | ASSERT(list_is_empty(&sio_list)); | |
2851 | ||
2852 | /* loop while we still have sios left to process in this rs */ | |
2853 | while (more_left) { | |
2854 | scan_io_t *first_sio, *last_sio; | |
2855 | ||
2856 | /* | |
2857 | * We have selected which extent needs to be | |
2858 | * processed next. Gather up the corresponding sios. | |
2859 | */ | |
2860 | more_left = scan_io_queue_gather(queue, rs, &sio_list); | |
2861 | ASSERT(!list_is_empty(&sio_list)); | |
2862 | first_sio = list_head(&sio_list); | |
2863 | last_sio = list_tail(&sio_list); | |
2864 | ||
2865 | seg_end = last_sio->sio_offset + last_sio->sio_asize; | |
2866 | if (seg_start == 0) | |
2867 | seg_start = first_sio->sio_offset; | |
2868 | ||
2869 | /* | |
2870 | * Issuing sios can take a long time so drop the | |
2871 | * queue lock. The sio queue won't be updated by | |
2872 | * other threads since we're in syncing context so | |
2873 | * we can be sure that our trees will remain exactly | |
2874 | * as we left them. | |
2875 | */ | |
2876 | mutex_exit(q_lock); | |
2877 | suspended = scan_io_queue_issue(queue, &sio_list); | |
2878 | mutex_enter(q_lock); | |
2879 | ||
2880 | if (suspended) | |
2881 | break; | |
2882 | } | |
2883 | ||
2884 | /* update statistics for debugging purposes */ | |
2885 | scan_io_queues_update_seg_stats(queue, seg_start, seg_end); | |
2886 | ||
2887 | if (suspended) | |
2888 | break; | |
2889 | } | |
2890 | ||
2891 | /* | |
2892 | * If we were suspended in the middle of processing, | |
2893 | * requeue any unfinished sios and exit. | |
2894 | */ | |
2895 | while ((sio = list_head(&sio_list)) != NULL) { | |
2896 | list_remove(&sio_list, sio); | |
2897 | scan_io_queue_insert_impl(queue, sio); | |
2898 | } | |
2899 | ||
2900 | mutex_exit(q_lock); | |
2901 | list_destroy(&sio_list); | |
2902 | } | |
2903 | ||
2904 | /* | |
2905 | * Performs an emptying run on all scan queues in the pool. This just | |
2906 | * punches out one thread per top-level vdev, each of which processes | |
2907 | * only that vdev's scan queue. We can parallelize the I/O here because | |
13a2ff27 | 2908 | * we know that each queue's I/Os only affect its own top-level vdev. |
d4a72f23 TC |
2909 | * |
2910 | * This function waits for the queue runs to complete, and must be | |
2911 | * called from dsl_scan_sync (or in general, syncing context). | |
2912 | */ | |
2913 | static void | |
2914 | scan_io_queues_run(dsl_scan_t *scn) | |
2915 | { | |
2916 | spa_t *spa = scn->scn_dp->dp_spa; | |
2917 | ||
2918 | ASSERT(scn->scn_is_sorted); | |
2919 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); | |
2920 | ||
2921 | if (scn->scn_bytes_pending == 0) | |
2922 | return; | |
2923 | ||
2924 | if (scn->scn_taskq == NULL) { | |
2925 | int nthreads = spa->spa_root_vdev->vdev_children; | |
2926 | ||
2927 | /* | |
2928 | * We need to make this taskq *always* execute as many | |
2929 | * threads in parallel as we have top-level vdevs and no | |
2930 | * less, otherwise strange serialization of the calls to | |
2931 | * scan_io_queues_run_one can occur during spa_sync runs | |
2932 | * and that significantly impacts performance. | |
2933 | */ | |
2934 | scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads, | |
2935 | minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE); | |
2936 | } | |
2937 | ||
2938 | for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
2939 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
2940 | ||
2941 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
2942 | if (vd->vdev_scan_io_queue != NULL) { | |
2943 | VERIFY(taskq_dispatch(scn->scn_taskq, | |
2944 | scan_io_queues_run_one, vd->vdev_scan_io_queue, | |
2945 | TQ_SLEEP) != TASKQID_INVALID); | |
2946 | } | |
2947 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
2948 | } | |
2949 | ||
2950 | /* | |
13a2ff27 | 2951 | * Wait for the queues to finish issuing their IOs for this run |
d4a72f23 TC |
2952 | * before we return. There may still be IOs in flight at this |
2953 | * point. | |
2954 | */ | |
2955 | taskq_wait(scn->scn_taskq); | |
428870ff BB |
2956 | } |
2957 | ||
9ae529ec | 2958 | static boolean_t |
a1d477c2 | 2959 | dsl_scan_async_block_should_pause(dsl_scan_t *scn) |
428870ff | 2960 | { |
428870ff BB |
2961 | uint64_t elapsed_nanosecs; |
2962 | ||
78e2739d MA |
2963 | if (zfs_recover) |
2964 | return (B_FALSE); | |
2965 | ||
a1d477c2 | 2966 | if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks) |
36283ca2 MG |
2967 | return (B_TRUE); |
2968 | ||
428870ff | 2969 | elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; |
9ae529ec | 2970 | return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || |
a1d477c2 | 2971 | (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && |
428870ff | 2972 | txg_sync_waiting(scn->scn_dp)) || |
9ae529ec CS |
2973 | spa_shutting_down(scn->scn_dp->dp_spa)); |
2974 | } | |
2975 | ||
2976 | static int | |
2977 | dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) | |
2978 | { | |
2979 | dsl_scan_t *scn = arg; | |
2980 | ||
2981 | if (!scn->scn_is_bptree || | |
2982 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { | |
a1d477c2 | 2983 | if (dsl_scan_async_block_should_pause(scn)) |
2e528b49 | 2984 | return (SET_ERROR(ERESTART)); |
9ae529ec | 2985 | } |
428870ff BB |
2986 | |
2987 | zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, | |
2988 | dmu_tx_get_txg(tx), bp, 0)); | |
2989 | dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, | |
2990 | -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), | |
2991 | -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); | |
2992 | scn->scn_visited_this_txg++; | |
2993 | return (0); | |
2994 | } | |
2995 | ||
d4a72f23 TC |
2996 | static void |
2997 | dsl_scan_update_stats(dsl_scan_t *scn) | |
2998 | { | |
2999 | spa_t *spa = scn->scn_dp->dp_spa; | |
3000 | uint64_t i; | |
3001 | uint64_t seg_size_total = 0, zio_size_total = 0; | |
3002 | uint64_t seg_count_total = 0, zio_count_total = 0; | |
3003 | ||
3004 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
3005 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
3006 | dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; | |
3007 | ||
3008 | if (queue == NULL) | |
3009 | continue; | |
3010 | ||
3011 | seg_size_total += queue->q_total_seg_size_this_txg; | |
3012 | zio_size_total += queue->q_total_zio_size_this_txg; | |
3013 | seg_count_total += queue->q_segs_this_txg; | |
3014 | zio_count_total += queue->q_zios_this_txg; | |
3015 | } | |
3016 | ||
3017 | if (seg_count_total == 0 || zio_count_total == 0) { | |
3018 | scn->scn_avg_seg_size_this_txg = 0; | |
3019 | scn->scn_avg_zio_size_this_txg = 0; | |
3020 | scn->scn_segs_this_txg = 0; | |
3021 | scn->scn_zios_this_txg = 0; | |
3022 | return; | |
3023 | } | |
3024 | ||
3025 | scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; | |
3026 | scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; | |
3027 | scn->scn_segs_this_txg = seg_count_total; | |
3028 | scn->scn_zios_this_txg = zio_count_total; | |
3029 | } | |
3030 | ||
a1d477c2 MA |
3031 | static int |
3032 | dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) | |
3033 | { | |
3034 | dsl_scan_t *scn = arg; | |
3035 | const dva_t *dva = &bp->blk_dva[0]; | |
3036 | ||
3037 | if (dsl_scan_async_block_should_pause(scn)) | |
3038 | return (SET_ERROR(ERESTART)); | |
3039 | ||
3040 | spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, | |
3041 | DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), | |
3042 | DVA_GET_ASIZE(dva), tx); | |
3043 | scn->scn_visited_this_txg++; | |
3044 | return (0); | |
3045 | } | |
3046 | ||
428870ff BB |
3047 | boolean_t |
3048 | dsl_scan_active(dsl_scan_t *scn) | |
3049 | { | |
3050 | spa_t *spa = scn->scn_dp->dp_spa; | |
3051 | uint64_t used = 0, comp, uncomp; | |
3052 | ||
3053 | if (spa->spa_load_state != SPA_LOAD_NONE) | |
3054 | return (B_FALSE); | |
3055 | if (spa_shutting_down(spa)) | |
3056 | return (B_FALSE); | |
d4a72f23 | 3057 | if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || |
fbeddd60 | 3058 | (scn->scn_async_destroying && !scn->scn_async_stalled)) |
428870ff BB |
3059 | return (B_TRUE); |
3060 | ||
3061 | if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { | |
3062 | (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, | |
3063 | &used, &comp, &uncomp); | |
3064 | } | |
3065 | return (used != 0); | |
3066 | } | |
3067 | ||
80a91e74 TC |
3068 | static boolean_t |
3069 | dsl_scan_check_deferred(vdev_t *vd) | |
3070 | { | |
3071 | boolean_t need_resilver = B_FALSE; | |
3072 | ||
3073 | for (int c = 0; c < vd->vdev_children; c++) { | |
3074 | need_resilver |= | |
3075 | dsl_scan_check_deferred(vd->vdev_child[c]); | |
3076 | } | |
3077 | ||
3078 | if (!vdev_is_concrete(vd) || vd->vdev_aux || | |
3079 | !vd->vdev_ops->vdev_op_leaf) | |
3080 | return (need_resilver); | |
3081 | ||
3082 | if (!vd->vdev_resilver_deferred) | |
3083 | need_resilver = B_TRUE; | |
3084 | ||
3085 | return (need_resilver); | |
3086 | } | |
3087 | ||
d4a72f23 TC |
3088 | static boolean_t |
3089 | dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, | |
3090 | uint64_t phys_birth) | |
3091 | { | |
3092 | vdev_t *vd; | |
3093 | ||
9e052db4 MA |
3094 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); |
3095 | ||
3096 | if (vd->vdev_ops == &vdev_indirect_ops) { | |
3097 | /* | |
3098 | * The indirect vdev can point to multiple | |
3099 | * vdevs. For simplicity, always create | |
3100 | * the resilver zio_t. zio_vdev_io_start() | |
3101 | * will bypass the child resilver i/o's if | |
3102 | * they are on vdevs that don't have DTL's. | |
3103 | */ | |
3104 | return (B_TRUE); | |
3105 | } | |
3106 | ||
d4a72f23 TC |
3107 | if (DVA_GET_GANG(dva)) { |
3108 | /* | |
3109 | * Gang members may be spread across multiple | |
3110 | * vdevs, so the best estimate we have is the | |
3111 | * scrub range, which has already been checked. | |
3112 | * XXX -- it would be better to change our | |
3113 | * allocation policy to ensure that all | |
3114 | * gang members reside on the same vdev. | |
3115 | */ | |
3116 | return (B_TRUE); | |
3117 | } | |
3118 | ||
d4a72f23 TC |
3119 | /* |
3120 | * Check if the txg falls within the range which must be | |
3121 | * resilvered. DVAs outside this range can always be skipped. | |
3122 | */ | |
3123 | if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)) | |
3124 | return (B_FALSE); | |
3125 | ||
3126 | /* | |
3127 | * Check if the top-level vdev must resilver this offset. | |
3128 | * When the offset does not intersect with a dirty leaf DTL | |
3129 | * then it may be possible to skip the resilver IO. The psize | |
3130 | * is provided instead of asize to simplify the check for RAIDZ. | |
3131 | */ | |
3132 | if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize)) | |
3133 | return (B_FALSE); | |
3134 | ||
80a91e74 TC |
3135 | /* |
3136 | * Check that this top-level vdev has a device under it which | |
3137 | * is resilvering and is not deferred. | |
3138 | */ | |
3139 | if (!dsl_scan_check_deferred(vd)) | |
3140 | return (B_FALSE); | |
3141 | ||
d4a72f23 TC |
3142 | return (B_TRUE); |
3143 | } | |
3144 | ||
d2734cce SD |
3145 | static int |
3146 | dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) | |
428870ff BB |
3147 | { |
3148 | dsl_scan_t *scn = dp->dp_scan; | |
3149 | spa_t *spa = dp->dp_spa; | |
d2734cce | 3150 | int err = 0; |
428870ff | 3151 | |
d2734cce SD |
3152 | if (spa_suspend_async_destroy(spa)) |
3153 | return (0); | |
428870ff | 3154 | |
ba5ad9a4 | 3155 | if (zfs_free_bpobj_enabled && |
d4a72f23 | 3156 | spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
9ae529ec | 3157 | scn->scn_is_bptree = B_FALSE; |
a1d477c2 | 3158 | scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; |
d4a72f23 | 3159 | scn->scn_zio_root = zio_root(spa, NULL, |
428870ff BB |
3160 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3161 | err = bpobj_iterate(&dp->dp_free_bpobj, | |
9ae529ec | 3162 | dsl_scan_free_block_cb, scn, tx); |
d4a72f23 TC |
3163 | VERIFY0(zio_wait(scn->scn_zio_root)); |
3164 | scn->scn_zio_root = NULL; | |
9ae529ec | 3165 | |
fbeddd60 MA |
3166 | if (err != 0 && err != ERESTART) |
3167 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3168 | } | |
13fe0198 | 3169 | |
fbeddd60 MA |
3170 | if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { |
3171 | ASSERT(scn->scn_async_destroying); | |
3172 | scn->scn_is_bptree = B_TRUE; | |
d4a72f23 | 3173 | scn->scn_zio_root = zio_root(spa, NULL, |
fbeddd60 MA |
3174 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3175 | err = bptree_iterate(dp->dp_meta_objset, | |
3176 | dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); | |
3177 | VERIFY0(zio_wait(scn->scn_zio_root)); | |
d4a72f23 | 3178 | scn->scn_zio_root = NULL; |
fbeddd60 MA |
3179 | |
3180 | if (err == EIO || err == ECKSUM) { | |
3181 | err = 0; | |
3182 | } else if (err != 0 && err != ERESTART) { | |
3183 | zfs_panic_recover("error %u from " | |
3184 | "traverse_dataset_destroyed()", err); | |
9ae529ec | 3185 | } |
fbeddd60 | 3186 | |
fbeddd60 MA |
3187 | if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { |
3188 | /* finished; deactivate async destroy feature */ | |
3189 | spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); | |
3190 | ASSERT(!spa_feature_is_active(spa, | |
3191 | SPA_FEATURE_ASYNC_DESTROY)); | |
3192 | VERIFY0(zap_remove(dp->dp_meta_objset, | |
3193 | DMU_POOL_DIRECTORY_OBJECT, | |
3194 | DMU_POOL_BPTREE_OBJ, tx)); | |
3195 | VERIFY0(bptree_free(dp->dp_meta_objset, | |
3196 | dp->dp_bptree_obj, tx)); | |
3197 | dp->dp_bptree_obj = 0; | |
3198 | scn->scn_async_destroying = B_FALSE; | |
905edb40 | 3199 | scn->scn_async_stalled = B_FALSE; |
89b1cd65 | 3200 | } else { |
3201 | /* | |
905edb40 MA |
3202 | * If we didn't make progress, mark the async |
3203 | * destroy as stalled, so that we will not initiate | |
3204 | * a spa_sync() on its behalf. Note that we only | |
3205 | * check this if we are not finished, because if the | |
3206 | * bptree had no blocks for us to visit, we can | |
3207 | * finish without "making progress". | |
89b1cd65 | 3208 | */ |
3209 | scn->scn_async_stalled = | |
3210 | (scn->scn_visited_this_txg == 0); | |
428870ff | 3211 | } |
fbeddd60 MA |
3212 | } |
3213 | if (scn->scn_visited_this_txg) { | |
3214 | zfs_dbgmsg("freed %llu blocks in %llums from " | |
3215 | "free_bpobj/bptree txg %llu; err=%u", | |
3216 | (longlong_t)scn->scn_visited_this_txg, | |
3217 | (longlong_t) | |
3218 | NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), | |
3219 | (longlong_t)tx->tx_txg, err); | |
3220 | scn->scn_visited_this_txg = 0; | |
3221 | ||
3222 | /* | |
3223 | * Write out changes to the DDT that may be required as a | |
3224 | * result of the blocks freed. This ensures that the DDT | |
3225 | * is clean when a scrub/resilver runs. | |
3226 | */ | |
3227 | ddt_sync(spa, tx->tx_txg); | |
3228 | } | |
3229 | if (err != 0) | |
d2734cce | 3230 | return (err); |
7c9abfa7 GM |
3231 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && |
3232 | zfs_free_leak_on_eio && | |
d683ddbb JG |
3233 | (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || |
3234 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || | |
3235 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { | |
fbeddd60 MA |
3236 | /* |
3237 | * We have finished background destroying, but there is still | |
3238 | * some space left in the dp_free_dir. Transfer this leaked | |
3239 | * space to the dp_leak_dir. | |
3240 | */ | |
3241 | if (dp->dp_leak_dir == NULL) { | |
3242 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); | |
3243 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
3244 | LEAK_DIR_NAME, tx); | |
3245 | VERIFY0(dsl_pool_open_special_dir(dp, | |
3246 | LEAK_DIR_NAME, &dp->dp_leak_dir)); | |
3247 | rrw_exit(&dp->dp_config_rwlock, FTAG); | |
3248 | } | |
3249 | dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, | |
d683ddbb JG |
3250 | dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3251 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3252 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3253 | dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, |
d683ddbb JG |
3254 | -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3255 | -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3256 | -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3257 | } |
a1d477c2 | 3258 | |
7c9abfa7 | 3259 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) { |
9b67f605 | 3260 | /* finished; verify that space accounting went to zero */ |
d683ddbb JG |
3261 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); |
3262 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); | |
3263 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); | |
428870ff BB |
3264 | } |
3265 | ||
a1d477c2 MA |
3266 | EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), |
3267 | 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
3268 | DMU_POOL_OBSOLETE_BPOBJ)); | |
3269 | if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { | |
3270 | ASSERT(spa_feature_is_active(dp->dp_spa, | |
3271 | SPA_FEATURE_OBSOLETE_COUNTS)); | |
3272 | ||
3273 | scn->scn_is_bptree = B_FALSE; | |
3274 | scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; | |
3275 | err = bpobj_iterate(&dp->dp_obsolete_bpobj, | |
3276 | dsl_scan_obsolete_block_cb, scn, tx); | |
3277 | if (err != 0 && err != ERESTART) | |
3278 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3279 | ||
3280 | if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) | |
3281 | dsl_pool_destroy_obsolete_bpobj(dp, tx); | |
3282 | } | |
d2734cce SD |
3283 | return (0); |
3284 | } | |
3285 | ||
3286 | /* | |
3287 | * This is the primary entry point for scans that is called from syncing | |
3288 | * context. Scans must happen entirely during syncing context so that we | |
3289 | * cna guarantee that blocks we are currently scanning will not change out | |
3290 | * from under us. While a scan is active, this function controls how quickly | |
3291 | * transaction groups proceed, instead of the normal handling provided by | |
3292 | * txg_sync_thread(). | |
3293 | */ | |
3294 | void | |
3295 | dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) | |
3296 | { | |
3297 | int err = 0; | |
3298 | dsl_scan_t *scn = dp->dp_scan; | |
3299 | spa_t *spa = dp->dp_spa; | |
3300 | state_sync_type_t sync_type = SYNC_OPTIONAL; | |
3301 | ||
80a91e74 TC |
3302 | if (spa->spa_resilver_deferred && |
3303 | !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) | |
3304 | spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx); | |
3305 | ||
d2734cce SD |
3306 | /* |
3307 | * Check for scn_restart_txg before checking spa_load_state, so | |
3308 | * that we can restart an old-style scan while the pool is being | |
80a91e74 TC |
3309 | * imported (see dsl_scan_init). We also restart scans if there |
3310 | * is a deferred resilver and the user has manually disabled | |
3311 | * deferred resilvers via the tunable. | |
d2734cce | 3312 | */ |
80a91e74 TC |
3313 | if (dsl_scan_restarting(scn, tx) || |
3314 | (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) { | |
d2734cce SD |
3315 | pool_scan_func_t func = POOL_SCAN_SCRUB; |
3316 | dsl_scan_done(scn, B_FALSE, tx); | |
3317 | if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) | |
3318 | func = POOL_SCAN_RESILVER; | |
3319 | zfs_dbgmsg("restarting scan func=%u txg=%llu", | |
3320 | func, (longlong_t)tx->tx_txg); | |
3321 | dsl_scan_setup_sync(&func, tx); | |
3322 | } | |
3323 | ||
3324 | /* | |
3325 | * Only process scans in sync pass 1. | |
3326 | */ | |
3327 | if (spa_sync_pass(spa) > 1) | |
3328 | return; | |
3329 | ||
3330 | /* | |
3331 | * If the spa is shutting down, then stop scanning. This will | |
3332 | * ensure that the scan does not dirty any new data during the | |
3333 | * shutdown phase. | |
3334 | */ | |
3335 | if (spa_shutting_down(spa)) | |
3336 | return; | |
3337 | ||
3338 | /* | |
3339 | * If the scan is inactive due to a stalled async destroy, try again. | |
3340 | */ | |
3341 | if (!scn->scn_async_stalled && !dsl_scan_active(scn)) | |
3342 | return; | |
3343 | ||
3344 | /* reset scan statistics */ | |
3345 | scn->scn_visited_this_txg = 0; | |
3346 | scn->scn_holes_this_txg = 0; | |
3347 | scn->scn_lt_min_this_txg = 0; | |
3348 | scn->scn_gt_max_this_txg = 0; | |
3349 | scn->scn_ddt_contained_this_txg = 0; | |
3350 | scn->scn_objsets_visited_this_txg = 0; | |
3351 | scn->scn_avg_seg_size_this_txg = 0; | |
3352 | scn->scn_segs_this_txg = 0; | |
3353 | scn->scn_avg_zio_size_this_txg = 0; | |
3354 | scn->scn_zios_this_txg = 0; | |
3355 | scn->scn_suspending = B_FALSE; | |
3356 | scn->scn_sync_start_time = gethrtime(); | |
3357 | spa->spa_scrub_active = B_TRUE; | |
3358 | ||
3359 | /* | |
3360 | * First process the async destroys. If we suspend, don't do | |
3361 | * any scrubbing or resilvering. This ensures that there are no | |
3362 | * async destroys while we are scanning, so the scan code doesn't | |
3363 | * have to worry about traversing it. It is also faster to free the | |
3364 | * blocks than to scrub them. | |
3365 | */ | |
3366 | err = dsl_process_async_destroys(dp, tx); | |
3367 | if (err != 0) | |
3368 | return; | |
a1d477c2 | 3369 | |
d4a72f23 | 3370 | if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) |
428870ff BB |
3371 | return; |
3372 | ||
d4a72f23 TC |
3373 | /* |
3374 | * Wait a few txgs after importing to begin scanning so that | |
3375 | * we can get the pool imported quickly. | |
3376 | */ | |
3377 | if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) | |
5d1f7fb6 | 3378 | return; |
5d1f7fb6 | 3379 | |
cef48f14 TC |
3380 | /* |
3381 | * zfs_scan_suspend_progress can be set to disable scan progress. | |
3382 | * We don't want to spin the txg_sync thread, so we add a delay | |
3383 | * here to simulate the time spent doing a scan. This is mostly | |
3384 | * useful for testing and debugging. | |
3385 | */ | |
3386 | if (zfs_scan_suspend_progress) { | |
3387 | uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
3388 | int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? | |
3389 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; | |
3390 | ||
3391 | while (zfs_scan_suspend_progress && | |
3392 | !txg_sync_waiting(scn->scn_dp) && | |
3393 | !spa_shutting_down(scn->scn_dp->dp_spa) && | |
3394 | NSEC2MSEC(scan_time_ns) < mintime) { | |
3395 | delay(hz); | |
3396 | scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
3397 | } | |
3398 | return; | |
3399 | } | |
3400 | ||
d4a72f23 TC |
3401 | /* |
3402 | * It is possible to switch from unsorted to sorted at any time, | |
3403 | * but afterwards the scan will remain sorted unless reloaded from | |
3404 | * a checkpoint after a reboot. | |
3405 | */ | |
3406 | if (!zfs_scan_legacy) { | |
3407 | scn->scn_is_sorted = B_TRUE; | |
3408 | if (scn->scn_last_checkpoint == 0) | |
3409 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
3410 | } | |
0ea05c64 | 3411 | |
d4a72f23 TC |
3412 | /* |
3413 | * For sorted scans, determine what kind of work we will be doing | |
3414 | * this txg based on our memory limitations and whether or not we | |
3415 | * need to perform a checkpoint. | |
3416 | */ | |
3417 | if (scn->scn_is_sorted) { | |
3418 | /* | |
3419 | * If we are over our checkpoint interval, set scn_clearing | |
3420 | * so that we can begin checkpointing immediately. The | |
13a2ff27 | 3421 | * checkpoint allows us to save a consistent bookmark |
d4a72f23 TC |
3422 | * representing how much data we have scrubbed so far. |
3423 | * Otherwise, use the memory limit to determine if we should | |
3424 | * scan for metadata or start issue scrub IOs. We accumulate | |
3425 | * metadata until we hit our hard memory limit at which point | |
3426 | * we issue scrub IOs until we are at our soft memory limit. | |
3427 | */ | |
3428 | if (scn->scn_checkpointing || | |
3429 | ddi_get_lbolt() - scn->scn_last_checkpoint > | |
3430 | SEC_TO_TICK(zfs_scan_checkpoint_intval)) { | |
3431 | if (!scn->scn_checkpointing) | |
3432 | zfs_dbgmsg("begin scan checkpoint"); | |
3433 | ||
3434 | scn->scn_checkpointing = B_TRUE; | |
3435 | scn->scn_clearing = B_TRUE; | |
3436 | } else { | |
3437 | boolean_t should_clear = dsl_scan_should_clear(scn); | |
3438 | if (should_clear && !scn->scn_clearing) { | |
3439 | zfs_dbgmsg("begin scan clearing"); | |
3440 | scn->scn_clearing = B_TRUE; | |
3441 | } else if (!should_clear && scn->scn_clearing) { | |
3442 | zfs_dbgmsg("finish scan clearing"); | |
3443 | scn->scn_clearing = B_FALSE; | |
3444 | } | |
3445 | } | |
428870ff | 3446 | } else { |
d4a72f23 TC |
3447 | ASSERT0(scn->scn_checkpointing); |
3448 | ASSERT0(scn->scn_clearing); | |
428870ff BB |
3449 | } |
3450 | ||
d4a72f23 TC |
3451 | if (!scn->scn_clearing && scn->scn_done_txg == 0) { |
3452 | /* Need to scan metadata for more blocks to scrub */ | |
3453 | dsl_scan_phys_t *scnp = &scn->scn_phys; | |
3454 | taskqid_t prefetch_tqid; | |
3455 | uint64_t bytes_per_leaf = zfs_scan_vdev_limit; | |
3456 | uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev); | |
428870ff | 3457 | |
d4a72f23 | 3458 | /* |
f90a30ad | 3459 | * Recalculate the max number of in-flight bytes for pool-wide |
d4a72f23 TC |
3460 | * scanning operations (minimum 1MB). Limits for the issuing |
3461 | * phase are done per top-level vdev and are handled separately. | |
3462 | */ | |
3463 | scn->scn_maxinflight_bytes = | |
3464 | MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); | |
3465 | ||
3466 | if (scnp->scn_ddt_bookmark.ddb_class <= | |
3467 | scnp->scn_ddt_class_max) { | |
3468 | ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); | |
3469 | zfs_dbgmsg("doing scan sync txg %llu; " | |
3470 | "ddt bm=%llu/%llu/%llu/%llx", | |
3471 | (longlong_t)tx->tx_txg, | |
3472 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
3473 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
3474 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
3475 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
3476 | } else { | |
3477 | zfs_dbgmsg("doing scan sync txg %llu; " | |
3478 | "bm=%llu/%llu/%llu/%llu", | |
3479 | (longlong_t)tx->tx_txg, | |
3480 | (longlong_t)scnp->scn_bookmark.zb_objset, | |
3481 | (longlong_t)scnp->scn_bookmark.zb_object, | |
3482 | (longlong_t)scnp->scn_bookmark.zb_level, | |
3483 | (longlong_t)scnp->scn_bookmark.zb_blkid); | |
3484 | } | |
428870ff | 3485 | |
d4a72f23 TC |
3486 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, |
3487 | NULL, ZIO_FLAG_CANFAIL); | |
428870ff | 3488 | |
d4a72f23 TC |
3489 | scn->scn_prefetch_stop = B_FALSE; |
3490 | prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, | |
3491 | dsl_scan_prefetch_thread, scn, TQ_SLEEP); | |
3492 | ASSERT(prefetch_tqid != TASKQID_INVALID); | |
428870ff | 3493 | |
d4a72f23 TC |
3494 | dsl_pool_config_enter(dp, FTAG); |
3495 | dsl_scan_visit(scn, tx); | |
3496 | dsl_pool_config_exit(dp, FTAG); | |
428870ff | 3497 | |
d4a72f23 TC |
3498 | mutex_enter(&dp->dp_spa->spa_scrub_lock); |
3499 | scn->scn_prefetch_stop = B_TRUE; | |
3500 | cv_broadcast(&spa->spa_scrub_io_cv); | |
3501 | mutex_exit(&dp->dp_spa->spa_scrub_lock); | |
428870ff | 3502 | |
d4a72f23 TC |
3503 | taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); |
3504 | (void) zio_wait(scn->scn_zio_root); | |
3505 | scn->scn_zio_root = NULL; | |
3506 | ||
3507 | zfs_dbgmsg("scan visited %llu blocks in %llums " | |
3508 | "(%llu os's, %llu holes, %llu < mintxg, " | |
3509 | "%llu in ddt, %llu > maxtxg)", | |
3510 | (longlong_t)scn->scn_visited_this_txg, | |
3511 | (longlong_t)NSEC2MSEC(gethrtime() - | |
3512 | scn->scn_sync_start_time), | |
3513 | (longlong_t)scn->scn_objsets_visited_this_txg, | |
3514 | (longlong_t)scn->scn_holes_this_txg, | |
3515 | (longlong_t)scn->scn_lt_min_this_txg, | |
3516 | (longlong_t)scn->scn_ddt_contained_this_txg, | |
3517 | (longlong_t)scn->scn_gt_max_this_txg); | |
3518 | ||
3519 | if (!scn->scn_suspending) { | |
3520 | ASSERT0(avl_numnodes(&scn->scn_queue)); | |
3521 | scn->scn_done_txg = tx->tx_txg + 1; | |
3522 | if (scn->scn_is_sorted) { | |
3523 | scn->scn_checkpointing = B_TRUE; | |
3524 | scn->scn_clearing = B_TRUE; | |
3525 | } | |
3526 | zfs_dbgmsg("scan complete txg %llu", | |
3527 | (longlong_t)tx->tx_txg); | |
3528 | } | |
3529 | } else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) { | |
5e0bd0ae TC |
3530 | ASSERT(scn->scn_clearing); |
3531 | ||
d4a72f23 TC |
3532 | /* need to issue scrubbing IOs from per-vdev queues */ |
3533 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, | |
3534 | NULL, ZIO_FLAG_CANFAIL); | |
3535 | scan_io_queues_run(scn); | |
3536 | (void) zio_wait(scn->scn_zio_root); | |
3537 | scn->scn_zio_root = NULL; | |
3538 | ||
3539 | /* calculate and dprintf the current memory usage */ | |
3540 | (void) dsl_scan_should_clear(scn); | |
3541 | dsl_scan_update_stats(scn); | |
3542 | ||
3543 | zfs_dbgmsg("scan issued %llu blocks (%llu segs) in %llums " | |
3544 | "(avg_block_size = %llu, avg_seg_size = %llu)", | |
3545 | (longlong_t)scn->scn_zios_this_txg, | |
3546 | (longlong_t)scn->scn_segs_this_txg, | |
3547 | (longlong_t)NSEC2MSEC(gethrtime() - | |
3548 | scn->scn_sync_start_time), | |
3549 | (longlong_t)scn->scn_avg_zio_size_this_txg, | |
3550 | (longlong_t)scn->scn_avg_seg_size_this_txg); | |
3551 | } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { | |
3552 | /* Finished with everything. Mark the scrub as complete */ | |
3553 | zfs_dbgmsg("scan issuing complete txg %llu", | |
3554 | (longlong_t)tx->tx_txg); | |
3555 | ASSERT3U(scn->scn_done_txg, !=, 0); | |
3556 | ASSERT0(spa->spa_scrub_inflight); | |
3557 | ASSERT0(scn->scn_bytes_pending); | |
3558 | dsl_scan_done(scn, B_TRUE, tx); | |
3559 | sync_type = SYNC_MANDATORY; | |
428870ff | 3560 | } |
428870ff | 3561 | |
d4a72f23 | 3562 | dsl_scan_sync_state(scn, tx, sync_type); |
428870ff BB |
3563 | } |
3564 | ||
428870ff | 3565 | static void |
d4a72f23 | 3566 | count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp) |
428870ff BB |
3567 | { |
3568 | int i; | |
3569 | ||
d4a72f23 TC |
3570 | /* update the spa's stats on how many bytes we have issued */ |
3571 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
3572 | atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued, | |
3573 | DVA_GET_ASIZE(&bp->blk_dva[i])); | |
3574 | } | |
3575 | ||
428870ff BB |
3576 | /* |
3577 | * If we resume after a reboot, zab will be NULL; don't record | |
3578 | * incomplete stats in that case. | |
3579 | */ | |
3580 | if (zab == NULL) | |
3581 | return; | |
3582 | ||
d4a72f23 TC |
3583 | mutex_enter(&zab->zab_lock); |
3584 | ||
428870ff BB |
3585 | for (i = 0; i < 4; i++) { |
3586 | int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; | |
3587 | int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; | |
9ae529ec CS |
3588 | |
3589 | if (t & DMU_OT_NEWTYPE) | |
3590 | t = DMU_OT_OTHER; | |
1c27024e DB |
3591 | zfs_blkstat_t *zb = &zab->zab_type[l][t]; |
3592 | int equal; | |
428870ff BB |
3593 | |
3594 | zb->zb_count++; | |
3595 | zb->zb_asize += BP_GET_ASIZE(bp); | |
3596 | zb->zb_lsize += BP_GET_LSIZE(bp); | |
3597 | zb->zb_psize += BP_GET_PSIZE(bp); | |
3598 | zb->zb_gangs += BP_COUNT_GANG(bp); | |
3599 | ||
3600 | switch (BP_GET_NDVAS(bp)) { | |
3601 | case 2: | |
3602 | if (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
3603 | DVA_GET_VDEV(&bp->blk_dva[1])) | |
3604 | zb->zb_ditto_2_of_2_samevdev++; | |
3605 | break; | |
3606 | case 3: | |
3607 | equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
3608 | DVA_GET_VDEV(&bp->blk_dva[1])) + | |
3609 | (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
3610 | DVA_GET_VDEV(&bp->blk_dva[2])) + | |
3611 | (DVA_GET_VDEV(&bp->blk_dva[1]) == | |
3612 | DVA_GET_VDEV(&bp->blk_dva[2])); | |
3613 | if (equal == 1) | |
3614 | zb->zb_ditto_2_of_3_samevdev++; | |
3615 | else if (equal == 3) | |
3616 | zb->zb_ditto_3_of_3_samevdev++; | |
3617 | break; | |
3618 | } | |
3619 | } | |
d4a72f23 TC |
3620 | |
3621 | mutex_exit(&zab->zab_lock); | |
428870ff BB |
3622 | } |
3623 | ||
3624 | static void | |
d4a72f23 | 3625 | scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) |
428870ff | 3626 | { |
d4a72f23 TC |
3627 | avl_index_t idx; |
3628 | int64_t asize = sio->sio_asize; | |
3629 | dsl_scan_t *scn = queue->q_scn; | |
428870ff | 3630 | |
d4a72f23 | 3631 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); |
428870ff | 3632 | |
d4a72f23 TC |
3633 | if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { |
3634 | /* block is already scheduled for reading */ | |
3635 | atomic_add_64(&scn->scn_bytes_pending, -asize); | |
3636 | kmem_cache_free(sio_cache, sio); | |
3637 | return; | |
428870ff | 3638 | } |
d4a72f23 TC |
3639 | avl_insert(&queue->q_sios_by_addr, sio, idx); |
3640 | range_tree_add(queue->q_exts_by_addr, sio->sio_offset, asize); | |
428870ff BB |
3641 | } |
3642 | ||
d4a72f23 TC |
3643 | /* |
3644 | * Given all the info we got from our metadata scanning process, we | |
3645 | * construct a scan_io_t and insert it into the scan sorting queue. The | |
3646 | * I/O must already be suitable for us to process. This is controlled | |
3647 | * by dsl_scan_enqueue(). | |
3648 | */ | |
3649 | static void | |
3650 | scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, | |
3651 | int zio_flags, const zbookmark_phys_t *zb) | |
3d6da72d | 3652 | { |
d4a72f23 TC |
3653 | dsl_scan_t *scn = queue->q_scn; |
3654 | scan_io_t *sio = kmem_cache_alloc(sio_cache, KM_SLEEP); | |
3d6da72d | 3655 | |
d4a72f23 TC |
3656 | ASSERT0(BP_IS_GANG(bp)); |
3657 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3d6da72d | 3658 | |
d4a72f23 TC |
3659 | bp2sio(bp, sio, dva_i); |
3660 | sio->sio_flags = zio_flags; | |
3661 | sio->sio_zb = *zb; | |
3d6da72d IH |
3662 | |
3663 | /* | |
d4a72f23 TC |
3664 | * Increment the bytes pending counter now so that we can't |
3665 | * get an integer underflow in case the worker processes the | |
3666 | * zio before we get to incrementing this counter. | |
3d6da72d | 3667 | */ |
d4a72f23 TC |
3668 | atomic_add_64(&scn->scn_bytes_pending, sio->sio_asize); |
3669 | ||
3670 | scan_io_queue_insert_impl(queue, sio); | |
3671 | } | |
3672 | ||
3673 | /* | |
3674 | * Given a set of I/O parameters as discovered by the metadata traversal | |
3675 | * process, attempts to place the I/O into the sorted queues (if allowed), | |
3676 | * or immediately executes the I/O. | |
3677 | */ | |
3678 | static void | |
3679 | dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
3680 | const zbookmark_phys_t *zb) | |
3681 | { | |
3682 | spa_t *spa = dp->dp_spa; | |
3683 | ||
3684 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
3d6da72d IH |
3685 | |
3686 | /* | |
d4a72f23 TC |
3687 | * Gang blocks are hard to issue sequentially, so we just issue them |
3688 | * here immediately instead of queuing them. | |
3d6da72d | 3689 | */ |
d4a72f23 TC |
3690 | if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { |
3691 | scan_exec_io(dp, bp, zio_flags, zb, NULL); | |
3692 | return; | |
3693 | } | |
3d6da72d | 3694 | |
d4a72f23 TC |
3695 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
3696 | dva_t dva; | |
3697 | vdev_t *vdev; | |
3698 | ||
3699 | dva = bp->blk_dva[i]; | |
3700 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); | |
3701 | ASSERT(vdev != NULL); | |
3702 | ||
3703 | mutex_enter(&vdev->vdev_scan_io_queue_lock); | |
3704 | if (vdev->vdev_scan_io_queue == NULL) | |
3705 | vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); | |
3706 | ASSERT(dp->dp_scan != NULL); | |
3707 | scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, | |
3708 | i, zio_flags, zb); | |
3709 | mutex_exit(&vdev->vdev_scan_io_queue_lock); | |
3710 | } | |
3d6da72d IH |
3711 | } |
3712 | ||
428870ff BB |
3713 | static int |
3714 | dsl_scan_scrub_cb(dsl_pool_t *dp, | |
5dbd68a3 | 3715 | const blkptr_t *bp, const zbookmark_phys_t *zb) |
428870ff BB |
3716 | { |
3717 | dsl_scan_t *scn = dp->dp_scan; | |
428870ff BB |
3718 | spa_t *spa = dp->dp_spa; |
3719 | uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); | |
d4a72f23 | 3720 | size_t psize = BP_GET_PSIZE(bp); |
d6320ddb | 3721 | boolean_t needs_io = B_FALSE; |
572e2857 | 3722 | int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; |
428870ff | 3723 | |
00c405b4 | 3724 | |
428870ff | 3725 | if (phys_birth <= scn->scn_phys.scn_min_txg || |
863522b1 SN |
3726 | phys_birth >= scn->scn_phys.scn_max_txg) { |
3727 | count_block(scn, dp->dp_blkstats, bp); | |
428870ff | 3728 | return (0); |
863522b1 | 3729 | } |
428870ff | 3730 | |
00c405b4 MA |
3731 | /* Embedded BP's have phys_birth==0, so we reject them above. */ |
3732 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
9b67f605 | 3733 | |
428870ff BB |
3734 | ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); |
3735 | if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { | |
3736 | zio_flags |= ZIO_FLAG_SCRUB; | |
428870ff | 3737 | needs_io = B_TRUE; |
a117a6d6 GW |
3738 | } else { |
3739 | ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); | |
428870ff | 3740 | zio_flags |= ZIO_FLAG_RESILVER; |
428870ff BB |
3741 | needs_io = B_FALSE; |
3742 | } | |
3743 | ||
3744 | /* If it's an intent log block, failure is expected. */ | |
3745 | if (zb->zb_level == ZB_ZIL_LEVEL) | |
3746 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
3747 | ||
1c27024e | 3748 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) { |
3d6da72d | 3749 | const dva_t *dva = &bp->blk_dva[d]; |
428870ff BB |
3750 | |
3751 | /* | |
3752 | * Keep track of how much data we've examined so that | |
3753 | * zpool(1M) status can make useful progress reports. | |
3754 | */ | |
3d6da72d IH |
3755 | scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva); |
3756 | spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva); | |
428870ff BB |
3757 | |
3758 | /* if it's a resilver, this may not be in the target range */ | |
3d6da72d IH |
3759 | if (!needs_io) |
3760 | needs_io = dsl_scan_need_resilver(spa, dva, psize, | |
3761 | phys_birth); | |
428870ff BB |
3762 | } |
3763 | ||
3764 | if (needs_io && !zfs_no_scrub_io) { | |
d4a72f23 TC |
3765 | dsl_scan_enqueue(dp, bp, zio_flags, zb); |
3766 | } else { | |
3767 | count_block(scn, dp->dp_blkstats, bp); | |
3768 | } | |
3769 | ||
3770 | /* do not relocate this block */ | |
3771 | return (0); | |
3772 | } | |
3773 | ||
3774 | static void | |
3775 | dsl_scan_scrub_done(zio_t *zio) | |
3776 | { | |
3777 | spa_t *spa = zio->io_spa; | |
3778 | blkptr_t *bp = zio->io_bp; | |
3779 | dsl_scan_io_queue_t *queue = zio->io_private; | |
3780 | ||
3781 | abd_free(zio->io_abd); | |
3782 | ||
3783 | if (queue == NULL) { | |
3784 | mutex_enter(&spa->spa_scrub_lock); | |
3785 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
3786 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
3787 | cv_broadcast(&spa->spa_scrub_io_cv); | |
3788 | mutex_exit(&spa->spa_scrub_lock); | |
3789 | } else { | |
3790 | mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); | |
3791 | ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); | |
3792 | queue->q_inflight_bytes -= BP_GET_PSIZE(bp); | |
3793 | cv_broadcast(&queue->q_zio_cv); | |
3794 | mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); | |
3795 | } | |
3796 | ||
3797 | if (zio->io_error && (zio->io_error != ECKSUM || | |
3798 | !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { | |
3799 | atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors); | |
3800 | } | |
3801 | } | |
428870ff | 3802 | |
d4a72f23 TC |
3803 | /* |
3804 | * Given a scanning zio's information, executes the zio. The zio need | |
3805 | * not necessarily be only sortable, this function simply executes the | |
3806 | * zio, no matter what it is. The optional queue argument allows the | |
3807 | * caller to specify that they want per top level vdev IO rate limiting | |
3808 | * instead of the legacy global limiting. | |
3809 | */ | |
3810 | static void | |
3811 | scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
3812 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) | |
3813 | { | |
3814 | spa_t *spa = dp->dp_spa; | |
3815 | dsl_scan_t *scn = dp->dp_scan; | |
3816 | size_t size = BP_GET_PSIZE(bp); | |
3817 | abd_t *data = abd_alloc_for_io(size, B_FALSE); | |
3818 | ||
f90a30ad BB |
3819 | ASSERT3U(scn->scn_maxinflight_bytes, >, 0); |
3820 | ||
d4a72f23 | 3821 | if (queue == NULL) { |
428870ff | 3822 | mutex_enter(&spa->spa_scrub_lock); |
d4a72f23 | 3823 | while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) |
428870ff | 3824 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); |
d4a72f23 | 3825 | spa->spa_scrub_inflight += BP_GET_PSIZE(bp); |
428870ff | 3826 | mutex_exit(&spa->spa_scrub_lock); |
d4a72f23 TC |
3827 | } else { |
3828 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
428870ff | 3829 | |
d4a72f23 TC |
3830 | mutex_enter(q_lock); |
3831 | while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) | |
3832 | cv_wait(&queue->q_zio_cv, q_lock); | |
3833 | queue->q_inflight_bytes += BP_GET_PSIZE(bp); | |
3834 | mutex_exit(q_lock); | |
3835 | } | |
3836 | ||
3837 | count_block(scn, dp->dp_blkstats, bp); | |
3838 | zio_nowait(zio_read(scn->scn_zio_root, spa, bp, data, size, | |
3839 | dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); | |
3840 | } | |
572e2857 | 3841 | |
d4a72f23 TC |
3842 | /* |
3843 | * This is the primary extent sorting algorithm. We balance two parameters: | |
3844 | * 1) how many bytes of I/O are in an extent | |
3845 | * 2) how well the extent is filled with I/O (as a fraction of its total size) | |
3846 | * Since we allow extents to have gaps between their constituent I/Os, it's | |
3847 | * possible to have a fairly large extent that contains the same amount of | |
3848 | * I/O bytes than a much smaller extent, which just packs the I/O more tightly. | |
3849 | * The algorithm sorts based on a score calculated from the extent's size, | |
3850 | * the relative fill volume (in %) and a "fill weight" parameter that controls | |
3851 | * the split between whether we prefer larger extents or more well populated | |
3852 | * extents: | |
3853 | * | |
3854 | * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) | |
3855 | * | |
3856 | * Example: | |
3857 | * 1) assume extsz = 64 MiB | |
3858 | * 2) assume fill = 32 MiB (extent is half full) | |
3859 | * 3) assume fill_weight = 3 | |
3860 | * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 | |
3861 | * SCORE = 32M + (50 * 3 * 32M) / 100 | |
3862 | * SCORE = 32M + (4800M / 100) | |
3863 | * SCORE = 32M + 48M | |
3864 | * ^ ^ | |
3865 | * | +--- final total relative fill-based score | |
3866 | * +--------- final total fill-based score | |
3867 | * SCORE = 80M | |
3868 | * | |
3869 | * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards | |
3870 | * extents that are more completely filled (in a 3:2 ratio) vs just larger. | |
3871 | * Note that as an optimization, we replace multiplication and division by | |
3872 | * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128). | |
3873 | */ | |
3874 | static int | |
3875 | ext_size_compare(const void *x, const void *y) | |
3876 | { | |
3877 | const range_seg_t *rsa = x, *rsb = y; | |
3878 | uint64_t sa = rsa->rs_end - rsa->rs_start, | |
3879 | sb = rsb->rs_end - rsb->rs_start; | |
3880 | uint64_t score_a, score_b; | |
3881 | ||
3882 | score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) * | |
3883 | fill_weight * rsa->rs_fill) >> 7); | |
3884 | score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) * | |
3885 | fill_weight * rsb->rs_fill) >> 7); | |
3886 | ||
3887 | if (score_a > score_b) | |
3888 | return (-1); | |
3889 | if (score_a == score_b) { | |
3890 | if (rsa->rs_start < rsb->rs_start) | |
3891 | return (-1); | |
3892 | if (rsa->rs_start == rsb->rs_start) | |
3893 | return (0); | |
3894 | return (1); | |
428870ff | 3895 | } |
d4a72f23 TC |
3896 | return (1); |
3897 | } | |
428870ff | 3898 | |
d4a72f23 TC |
3899 | /* |
3900 | * Comparator for the q_sios_by_addr tree. Sorting is simply performed | |
3901 | * based on LBA-order (from lowest to highest). | |
3902 | */ | |
3903 | static int | |
3904 | sio_addr_compare(const void *x, const void *y) | |
3905 | { | |
3906 | const scan_io_t *a = x, *b = y; | |
3907 | ||
3908 | if (a->sio_offset < b->sio_offset) | |
3909 | return (-1); | |
3910 | if (a->sio_offset == b->sio_offset) | |
3911 | return (0); | |
3912 | return (1); | |
3913 | } | |
3914 | ||
3915 | /* IO queues are created on demand when they are needed. */ | |
3916 | static dsl_scan_io_queue_t * | |
3917 | scan_io_queue_create(vdev_t *vd) | |
3918 | { | |
3919 | dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; | |
3920 | dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); | |
3921 | ||
3922 | q->q_scn = scn; | |
3923 | q->q_vd = vd; | |
3924 | cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); | |
3925 | q->q_exts_by_addr = range_tree_create_impl(&rt_avl_ops, | |
a1d477c2 | 3926 | &q->q_exts_by_size, ext_size_compare, zfs_scan_max_ext_gap); |
d4a72f23 TC |
3927 | avl_create(&q->q_sios_by_addr, sio_addr_compare, |
3928 | sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); | |
3929 | ||
3930 | return (q); | |
428870ff BB |
3931 | } |
3932 | ||
0ea05c64 | 3933 | /* |
d4a72f23 TC |
3934 | * Destroys a scan queue and all segments and scan_io_t's contained in it. |
3935 | * No further execution of I/O occurs, anything pending in the queue is | |
3936 | * simply freed without being executed. | |
0ea05c64 | 3937 | */ |
d4a72f23 TC |
3938 | void |
3939 | dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) | |
428870ff | 3940 | { |
d4a72f23 TC |
3941 | dsl_scan_t *scn = queue->q_scn; |
3942 | scan_io_t *sio; | |
3943 | void *cookie = NULL; | |
3944 | int64_t bytes_dequeued = 0; | |
3945 | ||
3946 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3947 | ||
3948 | while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != | |
3949 | NULL) { | |
3950 | ASSERT(range_tree_contains(queue->q_exts_by_addr, | |
3951 | sio->sio_offset, sio->sio_asize)); | |
3952 | bytes_dequeued += sio->sio_asize; | |
3953 | kmem_cache_free(sio_cache, sio); | |
3954 | } | |
428870ff | 3955 | |
d4a72f23 TC |
3956 | atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued); |
3957 | range_tree_vacate(queue->q_exts_by_addr, NULL, queue); | |
3958 | range_tree_destroy(queue->q_exts_by_addr); | |
3959 | avl_destroy(&queue->q_sios_by_addr); | |
3960 | cv_destroy(&queue->q_zio_cv); | |
428870ff | 3961 | |
d4a72f23 TC |
3962 | kmem_free(queue, sizeof (*queue)); |
3963 | } | |
0ea05c64 | 3964 | |
d4a72f23 TC |
3965 | /* |
3966 | * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is | |
3967 | * called on behalf of vdev_top_transfer when creating or destroying | |
3968 | * a mirror vdev due to zpool attach/detach. | |
3969 | */ | |
3970 | void | |
3971 | dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) | |
3972 | { | |
3973 | mutex_enter(&svd->vdev_scan_io_queue_lock); | |
3974 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
3975 | ||
3976 | VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); | |
3977 | tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; | |
3978 | svd->vdev_scan_io_queue = NULL; | |
a1d477c2 | 3979 | if (tvd->vdev_scan_io_queue != NULL) |
d4a72f23 | 3980 | tvd->vdev_scan_io_queue->q_vd = tvd; |
0ea05c64 | 3981 | |
d4a72f23 TC |
3982 | mutex_exit(&tvd->vdev_scan_io_queue_lock); |
3983 | mutex_exit(&svd->vdev_scan_io_queue_lock); | |
428870ff | 3984 | } |
c409e464 | 3985 | |
d4a72f23 TC |
3986 | static void |
3987 | scan_io_queues_destroy(dsl_scan_t *scn) | |
784d15c1 | 3988 | { |
d4a72f23 TC |
3989 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
3990 | ||
3991 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
3992 | vdev_t *tvd = rvd->vdev_child[i]; | |
3993 | ||
3994 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
3995 | if (tvd->vdev_scan_io_queue != NULL) | |
3996 | dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); | |
3997 | tvd->vdev_scan_io_queue = NULL; | |
3998 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
3999 | } | |
784d15c1 NR |
4000 | } |
4001 | ||
d4a72f23 TC |
4002 | static void |
4003 | dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) | |
4004 | { | |
4005 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
4006 | dsl_scan_t *scn = dp->dp_scan; | |
4007 | vdev_t *vdev; | |
4008 | kmutex_t *q_lock; | |
4009 | dsl_scan_io_queue_t *queue; | |
4010 | scan_io_t srch, *sio; | |
4011 | avl_index_t idx; | |
4012 | uint64_t start, size; | |
4013 | ||
4014 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); | |
4015 | ASSERT(vdev != NULL); | |
4016 | q_lock = &vdev->vdev_scan_io_queue_lock; | |
4017 | queue = vdev->vdev_scan_io_queue; | |
4018 | ||
4019 | mutex_enter(q_lock); | |
4020 | if (queue == NULL) { | |
4021 | mutex_exit(q_lock); | |
4022 | return; | |
4023 | } | |
4024 | ||
4025 | bp2sio(bp, &srch, dva_i); | |
4026 | start = srch.sio_offset; | |
4027 | size = srch.sio_asize; | |
4028 | ||
4029 | /* | |
4030 | * We can find the zio in two states: | |
4031 | * 1) Cold, just sitting in the queue of zio's to be issued at | |
4032 | * some point in the future. In this case, all we do is | |
4033 | * remove the zio from the q_sios_by_addr tree, decrement | |
4034 | * its data volume from the containing range_seg_t and | |
4035 | * resort the q_exts_by_size tree to reflect that the | |
4036 | * range_seg_t has lost some of its 'fill'. We don't shorten | |
4037 | * the range_seg_t - this is usually rare enough not to be | |
4038 | * worth the extra hassle of trying keep track of precise | |
4039 | * extent boundaries. | |
4040 | * 2) Hot, where the zio is currently in-flight in | |
4041 | * dsl_scan_issue_ios. In this case, we can't simply | |
4042 | * reach in and stop the in-flight zio's, so we instead | |
4043 | * block the caller. Eventually, dsl_scan_issue_ios will | |
4044 | * be done with issuing the zio's it gathered and will | |
4045 | * signal us. | |
4046 | */ | |
4047 | sio = avl_find(&queue->q_sios_by_addr, &srch, &idx); | |
4048 | if (sio != NULL) { | |
4049 | int64_t asize = sio->sio_asize; | |
4050 | blkptr_t tmpbp; | |
4051 | ||
4052 | /* Got it while it was cold in the queue */ | |
4053 | ASSERT3U(start, ==, sio->sio_offset); | |
4054 | ASSERT3U(size, ==, asize); | |
4055 | avl_remove(&queue->q_sios_by_addr, sio); | |
c409e464 | 4056 | |
d4a72f23 TC |
4057 | ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); |
4058 | range_tree_remove_fill(queue->q_exts_by_addr, start, size); | |
4059 | ||
4060 | /* | |
4061 | * We only update scn_bytes_pending in the cold path, | |
4062 | * otherwise it will already have been accounted for as | |
4063 | * part of the zio's execution. | |
4064 | */ | |
4065 | atomic_add_64(&scn->scn_bytes_pending, -asize); | |
c409e464 | 4066 | |
d4a72f23 TC |
4067 | /* count the block as though we issued it */ |
4068 | sio2bp(sio, &tmpbp, dva_i); | |
4069 | count_block(scn, dp->dp_blkstats, &tmpbp); | |
c409e464 | 4070 | |
d4a72f23 TC |
4071 | kmem_cache_free(sio_cache, sio); |
4072 | } | |
4073 | mutex_exit(q_lock); | |
4074 | } | |
c409e464 | 4075 | |
d4a72f23 TC |
4076 | /* |
4077 | * Callback invoked when a zio_free() zio is executing. This needs to be | |
4078 | * intercepted to prevent the zio from deallocating a particular portion | |
4079 | * of disk space and it then getting reallocated and written to, while we | |
4080 | * still have it queued up for processing. | |
4081 | */ | |
4082 | void | |
4083 | dsl_scan_freed(spa_t *spa, const blkptr_t *bp) | |
4084 | { | |
4085 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
4086 | dsl_scan_t *scn = dp->dp_scan; | |
4087 | ||
4088 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
4089 | ASSERT(scn != NULL); | |
4090 | if (!dsl_scan_is_running(scn)) | |
4091 | return; | |
4092 | ||
4093 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) | |
4094 | dsl_scan_freed_dva(spa, bp, i); | |
4095 | } | |
4096 | ||
93ce2b4c | 4097 | #if defined(_KERNEL) |
d4a72f23 TC |
4098 | /* CSTYLED */ |
4099 | module_param(zfs_scan_vdev_limit, ulong, 0644); | |
4100 | MODULE_PARM_DESC(zfs_scan_vdev_limit, | |
4101 | "Max bytes in flight per leaf vdev for scrubs and resilvers"); | |
4102 | ||
4103 | module_param(zfs_scrub_min_time_ms, int, 0644); | |
4104 | MODULE_PARM_DESC(zfs_scrub_min_time_ms, "Min millisecs to scrub per txg"); | |
c409e464 | 4105 | |
a1d477c2 MA |
4106 | module_param(zfs_obsolete_min_time_ms, int, 0644); |
4107 | MODULE_PARM_DESC(zfs_obsolete_min_time_ms, "Min millisecs to obsolete per txg"); | |
4108 | ||
c409e464 BB |
4109 | module_param(zfs_free_min_time_ms, int, 0644); |
4110 | MODULE_PARM_DESC(zfs_free_min_time_ms, "Min millisecs to free per txg"); | |
4111 | ||
4112 | module_param(zfs_resilver_min_time_ms, int, 0644); | |
4113 | MODULE_PARM_DESC(zfs_resilver_min_time_ms, "Min millisecs to resilver per txg"); | |
4114 | ||
cef48f14 TC |
4115 | module_param(zfs_scan_suspend_progress, int, 0644); |
4116 | MODULE_PARM_DESC(zfs_scan_suspend_progress, | |
4117 | "Set to prevent scans from progressing"); | |
4118 | ||
c409e464 BB |
4119 | module_param(zfs_no_scrub_io, int, 0644); |
4120 | MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O"); | |
4121 | ||
4122 | module_param(zfs_no_scrub_prefetch, int, 0644); | |
4123 | MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching"); | |
36283ca2 | 4124 | |
02730c33 | 4125 | /* CSTYLED */ |
a1d477c2 MA |
4126 | module_param(zfs_async_block_max_blocks, ulong, 0644); |
4127 | MODULE_PARM_DESC(zfs_async_block_max_blocks, | |
4128 | "Max number of blocks freed in one txg"); | |
ba5ad9a4 GW |
4129 | |
4130 | module_param(zfs_free_bpobj_enabled, int, 0644); | |
4131 | MODULE_PARM_DESC(zfs_free_bpobj_enabled, "Enable processing of the free_bpobj"); | |
d4a72f23 TC |
4132 | |
4133 | module_param(zfs_scan_mem_lim_fact, int, 0644); | |
4134 | MODULE_PARM_DESC(zfs_scan_mem_lim_fact, "Fraction of RAM for scan hard limit"); | |
4135 | ||
4136 | module_param(zfs_scan_issue_strategy, int, 0644); | |
4137 | MODULE_PARM_DESC(zfs_scan_issue_strategy, | |
4138 | "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size"); | |
4139 | ||
4140 | module_param(zfs_scan_legacy, int, 0644); | |
4141 | MODULE_PARM_DESC(zfs_scan_legacy, "Scrub using legacy non-sequential method"); | |
4142 | ||
4143 | module_param(zfs_scan_checkpoint_intval, int, 0644); | |
4144 | MODULE_PARM_DESC(zfs_scan_checkpoint_intval, | |
4145 | "Scan progress on-disk checkpointing interval"); | |
4146 | ||
63f88c12 | 4147 | /* CSTYLED */ |
4148 | module_param(zfs_scan_max_ext_gap, ulong, 0644); | |
4149 | MODULE_PARM_DESC(zfs_scan_max_ext_gap, | |
4150 | "Max gap in bytes between sequential scrub / resilver I/Os"); | |
4151 | ||
d4a72f23 TC |
4152 | module_param(zfs_scan_mem_lim_soft_fact, int, 0644); |
4153 | MODULE_PARM_DESC(zfs_scan_mem_lim_soft_fact, | |
4154 | "Fraction of hard limit used as soft limit"); | |
4155 | ||
4156 | module_param(zfs_scan_strict_mem_lim, int, 0644); | |
4157 | MODULE_PARM_DESC(zfs_scan_strict_mem_lim, | |
4158 | "Tunable to attempt to reduce lock contention"); | |
4159 | ||
4160 | module_param(zfs_scan_fill_weight, int, 0644); | |
4161 | MODULE_PARM_DESC(zfs_scan_fill_weight, | |
4162 | "Tunable to adjust bias towards more filled segments during scans"); | |
80a91e74 TC |
4163 | |
4164 | module_param(zfs_resilver_disable_defer, int, 0644); | |
4165 | MODULE_PARM_DESC(zfs_resilver_disable_defer, | |
4166 | "Process all resilvers immediately"); | |
c409e464 | 4167 | #endif |