]>
Commit | Line | Data |
---|---|---|
428870ff BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
a1d477c2 | 23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
7c9abfa7 | 24 | * Copyright 2016 Gary Mills |
0ea05c64 | 25 | * Copyright (c) 2017 Datto Inc. |
12fa0466 | 26 | * Copyright 2017 Joyent, Inc. |
428870ff BB |
27 | */ |
28 | ||
29 | #include <sys/dsl_scan.h> | |
30 | #include <sys/dsl_pool.h> | |
31 | #include <sys/dsl_dataset.h> | |
32 | #include <sys/dsl_prop.h> | |
33 | #include <sys/dsl_dir.h> | |
34 | #include <sys/dsl_synctask.h> | |
35 | #include <sys/dnode.h> | |
36 | #include <sys/dmu_tx.h> | |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/arc.h> | |
39 | #include <sys/zap.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/zfs_context.h> | |
42 | #include <sys/fs/zfs.h> | |
43 | #include <sys/zfs_znode.h> | |
44 | #include <sys/spa_impl.h> | |
45 | #include <sys/vdev_impl.h> | |
46 | #include <sys/zil_impl.h> | |
47 | #include <sys/zio_checksum.h> | |
48 | #include <sys/ddt.h> | |
49 | #include <sys/sa.h> | |
50 | #include <sys/sa_impl.h> | |
9ae529ec | 51 | #include <sys/zfeature.h> |
a6255b7f | 52 | #include <sys/abd.h> |
d4a72f23 | 53 | #include <sys/range_tree.h> |
428870ff BB |
54 | #ifdef _KERNEL |
55 | #include <sys/zfs_vfsops.h> | |
56 | #endif | |
57 | ||
d4a72f23 TC |
58 | /* |
59 | * Grand theory statement on scan queue sorting | |
60 | * | |
61 | * Scanning is implemented by recursively traversing all indirection levels | |
62 | * in an object and reading all blocks referenced from said objects. This | |
63 | * results in us approximately traversing the object from lowest logical | |
64 | * offset to the highest. For best performance, we would want the logical | |
65 | * blocks to be physically contiguous. However, this is frequently not the | |
66 | * case with pools given the allocation patterns of copy-on-write filesystems. | |
67 | * So instead, we put the I/Os into a reordering queue and issue them in a | |
68 | * way that will most benefit physical disks (LBA-order). | |
69 | * | |
70 | * Queue management: | |
71 | * | |
72 | * Ideally, we would want to scan all metadata and queue up all block I/O | |
73 | * prior to starting to issue it, because that allows us to do an optimal | |
74 | * sorting job. This can however consume large amounts of memory. Therefore | |
75 | * we continuously monitor the size of the queues and constrain them to 5% | |
76 | * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this | |
77 | * limit, we clear out a few of the largest extents at the head of the queues | |
78 | * to make room for more scanning. Hopefully, these extents will be fairly | |
79 | * large and contiguous, allowing us to approach sequential I/O throughput | |
80 | * even without a fully sorted tree. | |
81 | * | |
82 | * Metadata scanning takes place in dsl_scan_visit(), which is called from | |
83 | * dsl_scan_sync() every spa_sync(). If we have either fully scanned all | |
84 | * metadata on the pool, or we need to make room in memory because our | |
85 | * queues are too large, dsl_scan_visit() is postponed and | |
86 | * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies | |
87 | * that metadata scanning and queued I/O issuing are mutually exclusive. This | |
88 | * allows us to provide maximum sequential I/O throughput for the majority of | |
89 | * I/O's issued since sequential I/O performance is significantly negatively | |
90 | * impacted if it is interleaved with random I/O. | |
91 | * | |
92 | * Implementation Notes | |
93 | * | |
94 | * One side effect of the queued scanning algorithm is that the scanning code | |
95 | * needs to be notified whenever a block is freed. This is needed to allow | |
96 | * the scanning code to remove these I/Os from the issuing queue. Additionally, | |
97 | * we do not attempt to queue gang blocks to be issued sequentially since this | |
13a2ff27 | 98 | * is very hard to do and would have an extremely limited performance benefit. |
d4a72f23 TC |
99 | * Instead, we simply issue gang I/Os as soon as we find them using the legacy |
100 | * algorithm. | |
101 | * | |
102 | * Backwards compatibility | |
103 | * | |
104 | * This new algorithm is backwards compatible with the legacy on-disk data | |
105 | * structures (and therefore does not require a new feature flag). | |
106 | * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan | |
107 | * will stop scanning metadata (in logical order) and wait for all outstanding | |
108 | * sorted I/O to complete. Once this is done, we write out a checkpoint | |
109 | * bookmark, indicating that we have scanned everything logically before it. | |
110 | * If the pool is imported on a machine without the new sorting algorithm, | |
111 | * the scan simply resumes from the last checkpoint using the legacy algorithm. | |
112 | */ | |
113 | ||
5dbd68a3 MA |
114 | typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, |
115 | const zbookmark_phys_t *); | |
428870ff | 116 | |
428870ff | 117 | static scan_cb_t dsl_scan_scrub_cb; |
428870ff | 118 | |
d4a72f23 TC |
119 | static int scan_ds_queue_compare(const void *a, const void *b); |
120 | static int scan_prefetch_queue_compare(const void *a, const void *b); | |
121 | static void scan_ds_queue_clear(dsl_scan_t *scn); | |
122 | static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, | |
123 | uint64_t *txg); | |
124 | static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); | |
125 | static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); | |
126 | static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); | |
f90a30ad | 127 | static uint64_t dsl_scan_count_leaves(vdev_t *vd); |
d4a72f23 TC |
128 | |
129 | extern int zfs_vdev_async_write_active_min_dirty_percent; | |
130 | ||
131 | /* | |
132 | * By default zfs will check to ensure it is not over the hard memory | |
133 | * limit before each txg. If finer-grained control of this is needed | |
134 | * this value can be set to 1 to enable checking before scanning each | |
135 | * block. | |
136 | */ | |
137 | int zfs_scan_strict_mem_lim = B_FALSE; | |
138 | ||
139 | /* | |
140 | * Maximum number of parallelly executed bytes per leaf vdev. We attempt | |
141 | * to strike a balance here between keeping the vdev queues full of I/Os | |
142 | * at all times and not overflowing the queues to cause long latency, | |
143 | * which would cause long txg sync times. No matter what, we will not | |
144 | * overload the drives with I/O, since that is protected by | |
145 | * zfs_vdev_scrub_max_active. | |
146 | */ | |
147 | unsigned long zfs_scan_vdev_limit = 4 << 20; | |
148 | ||
149 | int zfs_scan_issue_strategy = 0; | |
150 | int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ | |
63f88c12 | 151 | unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ |
d4a72f23 TC |
152 | |
153 | /* | |
154 | * fill_weight is non-tunable at runtime, so we copy it at module init from | |
155 | * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would | |
156 | * break queue sorting. | |
157 | */ | |
158 | int zfs_scan_fill_weight = 3; | |
159 | static uint64_t fill_weight; | |
160 | ||
161 | /* See dsl_scan_should_clear() for details on the memory limit tunables */ | |
162 | uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ | |
163 | uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ | |
164 | int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ | |
165 | int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ | |
572e2857 | 166 | |
d4a72f23 | 167 | int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */ |
a1d477c2 | 168 | int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */ |
428870ff BB |
169 | int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ |
170 | int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */ | |
d4a72f23 | 171 | int zfs_scan_checkpoint_intval = 7200; /* in seconds */ |
c409e464 | 172 | int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ |
fbeddd60 | 173 | int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ |
428870ff | 174 | enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; |
36283ca2 | 175 | /* max number of blocks to free in a single TXG */ |
a1d477c2 | 176 | unsigned long zfs_async_block_max_blocks = 100000; |
428870ff | 177 | |
d4a72f23 TC |
178 | /* |
179 | * We wait a few txgs after importing a pool to begin scanning so that | |
180 | * the import / mounting code isn't held up by scrub / resilver IO. | |
181 | * Unfortunately, it is a bit difficult to determine exactly how long | |
182 | * this will take since userspace will trigger fs mounts asynchronously | |
183 | * and the kernel will create zvol minors asynchronously. As a result, | |
184 | * the value provided here is a bit arbitrary, but represents a | |
185 | * reasonable estimate of how many txgs it will take to finish fully | |
186 | * importing a pool | |
187 | */ | |
188 | #define SCAN_IMPORT_WAIT_TXGS 5 | |
189 | ||
428870ff BB |
190 | #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ |
191 | ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ | |
192 | (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) | |
193 | ||
ba5ad9a4 GW |
194 | /* |
195 | * Enable/disable the processing of the free_bpobj object. | |
196 | */ | |
197 | int zfs_free_bpobj_enabled = 1; | |
198 | ||
428870ff BB |
199 | /* the order has to match pool_scan_type */ |
200 | static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { | |
201 | NULL, | |
202 | dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ | |
203 | dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ | |
204 | }; | |
205 | ||
d4a72f23 TC |
206 | /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ |
207 | typedef struct { | |
208 | uint64_t sds_dsobj; | |
209 | uint64_t sds_txg; | |
210 | avl_node_t sds_node; | |
211 | } scan_ds_t; | |
212 | ||
213 | /* | |
214 | * This controls what conditions are placed on dsl_scan_sync_state(): | |
215 | * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0 | |
216 | * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0. | |
217 | * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise | |
218 | * write out the scn_phys_cached version. | |
219 | * See dsl_scan_sync_state for details. | |
220 | */ | |
221 | typedef enum { | |
222 | SYNC_OPTIONAL, | |
223 | SYNC_MANDATORY, | |
224 | SYNC_CACHED | |
225 | } state_sync_type_t; | |
226 | ||
227 | /* | |
228 | * This struct represents the minimum information needed to reconstruct a | |
229 | * zio for sequential scanning. This is useful because many of these will | |
230 | * accumulate in the sequential IO queues before being issued, so saving | |
231 | * memory matters here. | |
232 | */ | |
233 | typedef struct scan_io { | |
234 | /* fields from blkptr_t */ | |
235 | uint64_t sio_offset; | |
236 | uint64_t sio_blk_prop; | |
237 | uint64_t sio_phys_birth; | |
238 | uint64_t sio_birth; | |
239 | zio_cksum_t sio_cksum; | |
240 | uint32_t sio_asize; | |
241 | ||
242 | /* fields from zio_t */ | |
243 | int sio_flags; | |
244 | zbookmark_phys_t sio_zb; | |
245 | ||
246 | /* members for queue sorting */ | |
247 | union { | |
248 | avl_node_t sio_addr_node; /* link into issueing queue */ | |
249 | list_node_t sio_list_node; /* link for issuing to disk */ | |
250 | } sio_nodes; | |
251 | } scan_io_t; | |
252 | ||
253 | struct dsl_scan_io_queue { | |
254 | dsl_scan_t *q_scn; /* associated dsl_scan_t */ | |
255 | vdev_t *q_vd; /* top-level vdev that this queue represents */ | |
256 | ||
257 | /* trees used for sorting I/Os and extents of I/Os */ | |
258 | range_tree_t *q_exts_by_addr; | |
259 | avl_tree_t q_exts_by_size; | |
260 | avl_tree_t q_sios_by_addr; | |
261 | ||
262 | /* members for zio rate limiting */ | |
263 | uint64_t q_maxinflight_bytes; | |
264 | uint64_t q_inflight_bytes; | |
265 | kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ | |
266 | ||
267 | /* per txg statistics */ | |
268 | uint64_t q_total_seg_size_this_txg; | |
269 | uint64_t q_segs_this_txg; | |
270 | uint64_t q_total_zio_size_this_txg; | |
271 | uint64_t q_zios_this_txg; | |
272 | }; | |
273 | ||
274 | /* private data for dsl_scan_prefetch_cb() */ | |
275 | typedef struct scan_prefetch_ctx { | |
c13060e4 | 276 | zfs_refcount_t spc_refcnt; /* refcount for memory management */ |
d4a72f23 TC |
277 | dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ |
278 | boolean_t spc_root; /* is this prefetch for an objset? */ | |
279 | uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ | |
280 | uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ | |
281 | } scan_prefetch_ctx_t; | |
282 | ||
283 | /* private data for dsl_scan_prefetch() */ | |
284 | typedef struct scan_prefetch_issue_ctx { | |
285 | avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ | |
286 | scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ | |
287 | blkptr_t spic_bp; /* bp to prefetch */ | |
288 | zbookmark_phys_t spic_zb; /* bookmark to prefetch */ | |
289 | } scan_prefetch_issue_ctx_t; | |
290 | ||
291 | static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
292 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); | |
293 | static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, | |
294 | scan_io_t *sio); | |
295 | ||
296 | static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); | |
297 | static void scan_io_queues_destroy(dsl_scan_t *scn); | |
298 | ||
299 | static kmem_cache_t *sio_cache; | |
300 | ||
301 | void | |
302 | scan_init(void) | |
303 | { | |
304 | /* | |
305 | * This is used in ext_size_compare() to weight segments | |
306 | * based on how sparse they are. This cannot be changed | |
307 | * mid-scan and the tree comparison functions don't currently | |
13a2ff27 | 308 | * have a mechanism for passing additional context to the |
d4a72f23 | 309 | * compare functions. Thus we store this value globally and |
13a2ff27 | 310 | * we only allow it to be set at module initialization time |
d4a72f23 TC |
311 | */ |
312 | fill_weight = zfs_scan_fill_weight; | |
313 | ||
314 | sio_cache = kmem_cache_create("sio_cache", | |
315 | sizeof (scan_io_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
316 | } | |
317 | ||
318 | void | |
319 | scan_fini(void) | |
320 | { | |
321 | kmem_cache_destroy(sio_cache); | |
322 | } | |
323 | ||
324 | static inline boolean_t | |
325 | dsl_scan_is_running(const dsl_scan_t *scn) | |
326 | { | |
327 | return (scn->scn_phys.scn_state == DSS_SCANNING); | |
328 | } | |
329 | ||
330 | boolean_t | |
331 | dsl_scan_resilvering(dsl_pool_t *dp) | |
332 | { | |
333 | return (dsl_scan_is_running(dp->dp_scan) && | |
334 | dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); | |
335 | } | |
336 | ||
337 | static inline void | |
338 | sio2bp(const scan_io_t *sio, blkptr_t *bp, uint64_t vdev_id) | |
339 | { | |
340 | bzero(bp, sizeof (*bp)); | |
341 | DVA_SET_ASIZE(&bp->blk_dva[0], sio->sio_asize); | |
342 | DVA_SET_VDEV(&bp->blk_dva[0], vdev_id); | |
343 | DVA_SET_OFFSET(&bp->blk_dva[0], sio->sio_offset); | |
344 | bp->blk_prop = sio->sio_blk_prop; | |
345 | bp->blk_phys_birth = sio->sio_phys_birth; | |
346 | bp->blk_birth = sio->sio_birth; | |
347 | bp->blk_fill = 1; /* we always only work with data pointers */ | |
348 | bp->blk_cksum = sio->sio_cksum; | |
349 | } | |
350 | ||
351 | static inline void | |
352 | bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) | |
353 | { | |
354 | /* we discard the vdev id, since we can deduce it from the queue */ | |
355 | sio->sio_offset = DVA_GET_OFFSET(&bp->blk_dva[dva_i]); | |
356 | sio->sio_asize = DVA_GET_ASIZE(&bp->blk_dva[dva_i]); | |
357 | sio->sio_blk_prop = bp->blk_prop; | |
358 | sio->sio_phys_birth = bp->blk_phys_birth; | |
359 | sio->sio_birth = bp->blk_birth; | |
360 | sio->sio_cksum = bp->blk_cksum; | |
361 | } | |
362 | ||
428870ff BB |
363 | int |
364 | dsl_scan_init(dsl_pool_t *dp, uint64_t txg) | |
365 | { | |
366 | int err; | |
367 | dsl_scan_t *scn; | |
368 | spa_t *spa = dp->dp_spa; | |
369 | uint64_t f; | |
370 | ||
371 | scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); | |
372 | scn->scn_dp = dp; | |
373 | ||
2696dfaf GW |
374 | /* |
375 | * It's possible that we're resuming a scan after a reboot so | |
376 | * make sure that the scan_async_destroying flag is initialized | |
377 | * appropriately. | |
378 | */ | |
379 | ASSERT(!scn->scn_async_destroying); | |
380 | scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, | |
fa86b5db | 381 | SPA_FEATURE_ASYNC_DESTROY); |
2696dfaf | 382 | |
f90a30ad BB |
383 | /* |
384 | * Calculate the max number of in-flight bytes for pool-wide | |
385 | * scanning operations (minimum 1MB). Limits for the issuing | |
386 | * phase are done per top-level vdev and are handled separately. | |
387 | */ | |
388 | scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit * | |
389 | dsl_scan_count_leaves(spa->spa_root_vdev), 1ULL << 20); | |
390 | ||
d4a72f23 TC |
391 | bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); |
392 | avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), | |
393 | offsetof(scan_ds_t, sds_node)); | |
394 | avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, | |
395 | sizeof (scan_prefetch_issue_ctx_t), | |
396 | offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); | |
397 | ||
428870ff BB |
398 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
399 | "scrub_func", sizeof (uint64_t), 1, &f); | |
400 | if (err == 0) { | |
401 | /* | |
402 | * There was an old-style scrub in progress. Restart a | |
403 | * new-style scrub from the beginning. | |
404 | */ | |
405 | scn->scn_restart_txg = txg; | |
406 | zfs_dbgmsg("old-style scrub was in progress; " | |
407 | "restarting new-style scrub in txg %llu", | |
d4a72f23 | 408 | (longlong_t)scn->scn_restart_txg); |
428870ff BB |
409 | |
410 | /* | |
411 | * Load the queue obj from the old location so that it | |
412 | * can be freed by dsl_scan_done(). | |
413 | */ | |
414 | (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
415 | "scrub_queue", sizeof (uint64_t), 1, | |
416 | &scn->scn_phys.scn_queue_obj); | |
417 | } else { | |
418 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
419 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
420 | &scn->scn_phys); | |
4f2dcb3e RY |
421 | /* |
422 | * Detect if the pool contains the signature of #2094. If it | |
423 | * does properly update the scn->scn_phys structure and notify | |
424 | * the administrator by setting an errata for the pool. | |
425 | */ | |
426 | if (err == EOVERFLOW) { | |
427 | uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; | |
428 | VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); | |
429 | VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, | |
430 | (23 * sizeof (uint64_t))); | |
431 | ||
432 | err = zap_lookup(dp->dp_meta_objset, | |
433 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, | |
434 | sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); | |
435 | if (err == 0) { | |
436 | uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; | |
437 | ||
438 | if (overflow & ~DSL_SCAN_FLAGS_MASK || | |
439 | scn->scn_async_destroying) { | |
440 | spa->spa_errata = | |
441 | ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; | |
d4a72f23 | 442 | return (EOVERFLOW); |
4f2dcb3e RY |
443 | } |
444 | ||
445 | bcopy(zaptmp, &scn->scn_phys, | |
446 | SCAN_PHYS_NUMINTS * sizeof (uint64_t)); | |
447 | scn->scn_phys.scn_flags = overflow; | |
448 | ||
449 | /* Required scrub already in progress. */ | |
450 | if (scn->scn_phys.scn_state == DSS_FINISHED || | |
451 | scn->scn_phys.scn_state == DSS_CANCELED) | |
452 | spa->spa_errata = | |
453 | ZPOOL_ERRATA_ZOL_2094_SCRUB; | |
454 | } | |
455 | } | |
456 | ||
428870ff BB |
457 | if (err == ENOENT) |
458 | return (0); | |
459 | else if (err) | |
460 | return (err); | |
461 | ||
d4a72f23 TC |
462 | /* |
463 | * We might be restarting after a reboot, so jump the issued | |
464 | * counter to how far we've scanned. We know we're consistent | |
465 | * up to here. | |
466 | */ | |
467 | scn->scn_issued_before_pass = scn->scn_phys.scn_examined; | |
468 | ||
469 | if (dsl_scan_is_running(scn) && | |
428870ff BB |
470 | spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { |
471 | /* | |
472 | * A new-type scrub was in progress on an old | |
473 | * pool, and the pool was accessed by old | |
474 | * software. Restart from the beginning, since | |
475 | * the old software may have changed the pool in | |
476 | * the meantime. | |
477 | */ | |
478 | scn->scn_restart_txg = txg; | |
479 | zfs_dbgmsg("new-style scrub was modified " | |
480 | "by old software; restarting in txg %llu", | |
d4a72f23 TC |
481 | (longlong_t)scn->scn_restart_txg); |
482 | } | |
483 | } | |
484 | ||
485 | /* reload the queue into the in-core state */ | |
486 | if (scn->scn_phys.scn_queue_obj != 0) { | |
487 | zap_cursor_t zc; | |
488 | zap_attribute_t za; | |
489 | ||
490 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
491 | scn->scn_phys.scn_queue_obj); | |
492 | zap_cursor_retrieve(&zc, &za) == 0; | |
493 | (void) zap_cursor_advance(&zc)) { | |
494 | scan_ds_queue_insert(scn, | |
495 | zfs_strtonum(za.za_name, NULL), | |
496 | za.za_first_integer); | |
428870ff | 497 | } |
d4a72f23 | 498 | zap_cursor_fini(&zc); |
428870ff BB |
499 | } |
500 | ||
501 | spa_scan_stat_init(spa); | |
502 | return (0); | |
503 | } | |
504 | ||
505 | void | |
506 | dsl_scan_fini(dsl_pool_t *dp) | |
507 | { | |
d4a72f23 TC |
508 | if (dp->dp_scan != NULL) { |
509 | dsl_scan_t *scn = dp->dp_scan; | |
510 | ||
511 | if (scn->scn_taskq != NULL) | |
512 | taskq_destroy(scn->scn_taskq); | |
513 | scan_ds_queue_clear(scn); | |
514 | avl_destroy(&scn->scn_queue); | |
515 | avl_destroy(&scn->scn_prefetch_queue); | |
516 | ||
428870ff BB |
517 | kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); |
518 | dp->dp_scan = NULL; | |
519 | } | |
520 | } | |
521 | ||
d4a72f23 TC |
522 | static boolean_t |
523 | dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) | |
524 | { | |
525 | return (scn->scn_restart_txg != 0 && | |
526 | scn->scn_restart_txg <= tx->tx_txg); | |
527 | } | |
528 | ||
529 | boolean_t | |
530 | dsl_scan_scrubbing(const dsl_pool_t *dp) | |
531 | { | |
532 | dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; | |
533 | ||
534 | return (scn_phys->scn_state == DSS_SCANNING && | |
535 | scn_phys->scn_func == POOL_SCAN_SCRUB); | |
536 | } | |
537 | ||
538 | boolean_t | |
539 | dsl_scan_is_paused_scrub(const dsl_scan_t *scn) | |
540 | { | |
541 | return (dsl_scan_scrubbing(scn->scn_dp) && | |
542 | scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); | |
543 | } | |
544 | ||
545 | /* | |
546 | * Writes out a persistent dsl_scan_phys_t record to the pool directory. | |
547 | * Because we can be running in the block sorting algorithm, we do not always | |
548 | * want to write out the record, only when it is "safe" to do so. This safety | |
549 | * condition is achieved by making sure that the sorting queues are empty | |
550 | * (scn_bytes_pending == 0). When this condition is not true, the sync'd state | |
551 | * is inconsistent with how much actual scanning progress has been made. The | |
552 | * kind of sync to be performed is specified by the sync_type argument. If the | |
553 | * sync is optional, we only sync if the queues are empty. If the sync is | |
554 | * mandatory, we do a hard ASSERT to make sure that the queues are empty. The | |
555 | * third possible state is a "cached" sync. This is done in response to: | |
556 | * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
557 | * destroyed, so we wouldn't be able to restart scanning from it. | |
558 | * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been | |
559 | * superseded by a newer snapshot. | |
560 | * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
561 | * swapped with its clone. | |
562 | * In all cases, a cached sync simply rewrites the last record we've written, | |
563 | * just slightly modified. For the modifications that are performed to the | |
564 | * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, | |
565 | * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. | |
566 | */ | |
567 | static void | |
568 | dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) | |
569 | { | |
570 | int i; | |
571 | spa_t *spa = scn->scn_dp->dp_spa; | |
572 | ||
573 | ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0); | |
574 | if (scn->scn_bytes_pending == 0) { | |
575 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
576 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
577 | dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; | |
578 | ||
579 | if (q == NULL) | |
580 | continue; | |
581 | ||
582 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
583 | ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); | |
584 | ASSERT3P(avl_first(&q->q_exts_by_size), ==, NULL); | |
585 | ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); | |
586 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
587 | } | |
588 | ||
589 | if (scn->scn_phys.scn_queue_obj != 0) | |
590 | scan_ds_queue_sync(scn, tx); | |
591 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
592 | DMU_POOL_DIRECTORY_OBJECT, | |
593 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
594 | &scn->scn_phys, tx)); | |
595 | bcopy(&scn->scn_phys, &scn->scn_phys_cached, | |
596 | sizeof (scn->scn_phys)); | |
597 | ||
598 | if (scn->scn_checkpointing) | |
599 | zfs_dbgmsg("finish scan checkpoint"); | |
600 | ||
601 | scn->scn_checkpointing = B_FALSE; | |
602 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
603 | } else if (sync_type == SYNC_CACHED) { | |
604 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
605 | DMU_POOL_DIRECTORY_OBJECT, | |
606 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
607 | &scn->scn_phys_cached, tx)); | |
608 | } | |
609 | } | |
610 | ||
428870ff BB |
611 | /* ARGSUSED */ |
612 | static int | |
13fe0198 | 613 | dsl_scan_setup_check(void *arg, dmu_tx_t *tx) |
428870ff | 614 | { |
13fe0198 | 615 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff | 616 | |
d4a72f23 | 617 | if (dsl_scan_is_running(scn)) |
2e528b49 | 618 | return (SET_ERROR(EBUSY)); |
428870ff BB |
619 | |
620 | return (0); | |
621 | } | |
622 | ||
428870ff | 623 | static void |
13fe0198 | 624 | dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) |
428870ff | 625 | { |
13fe0198 MA |
626 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
627 | pool_scan_func_t *funcp = arg; | |
428870ff BB |
628 | dmu_object_type_t ot = 0; |
629 | dsl_pool_t *dp = scn->scn_dp; | |
630 | spa_t *spa = dp->dp_spa; | |
631 | ||
d4a72f23 | 632 | ASSERT(!dsl_scan_is_running(scn)); |
428870ff BB |
633 | ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); |
634 | bzero(&scn->scn_phys, sizeof (scn->scn_phys)); | |
635 | scn->scn_phys.scn_func = *funcp; | |
636 | scn->scn_phys.scn_state = DSS_SCANNING; | |
637 | scn->scn_phys.scn_min_txg = 0; | |
638 | scn->scn_phys.scn_max_txg = tx->tx_txg; | |
639 | scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ | |
640 | scn->scn_phys.scn_start_time = gethrestime_sec(); | |
641 | scn->scn_phys.scn_errors = 0; | |
642 | scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; | |
d4a72f23 | 643 | scn->scn_issued_before_pass = 0; |
428870ff | 644 | scn->scn_restart_txg = 0; |
5d1f7fb6 | 645 | scn->scn_done_txg = 0; |
d4a72f23 TC |
646 | scn->scn_last_checkpoint = 0; |
647 | scn->scn_checkpointing = B_FALSE; | |
428870ff BB |
648 | spa_scan_stat_init(spa); |
649 | ||
650 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
651 | scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; | |
652 | ||
653 | /* rewrite all disk labels */ | |
654 | vdev_config_dirty(spa->spa_root_vdev); | |
655 | ||
656 | if (vdev_resilver_needed(spa->spa_root_vdev, | |
657 | &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { | |
12fa0466 DE |
658 | spa_event_notify(spa, NULL, NULL, |
659 | ESC_ZFS_RESILVER_START); | |
428870ff | 660 | } else { |
12fa0466 | 661 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); |
428870ff BB |
662 | } |
663 | ||
664 | spa->spa_scrub_started = B_TRUE; | |
665 | /* | |
666 | * If this is an incremental scrub, limit the DDT scrub phase | |
667 | * to just the auto-ditto class (for correctness); the rest | |
668 | * of the scrub should go faster using top-down pruning. | |
669 | */ | |
670 | if (scn->scn_phys.scn_min_txg > TXG_INITIAL) | |
671 | scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; | |
672 | ||
673 | } | |
674 | ||
675 | /* back to the generic stuff */ | |
676 | ||
677 | if (dp->dp_blkstats == NULL) { | |
79c76d5b BB |
678 | dp->dp_blkstats = |
679 | vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); | |
d4a72f23 TC |
680 | mutex_init(&dp->dp_blkstats->zab_lock, NULL, |
681 | MUTEX_DEFAULT, NULL); | |
428870ff | 682 | } |
d4a72f23 | 683 | bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type)); |
428870ff BB |
684 | |
685 | if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) | |
686 | ot = DMU_OT_ZAP_OTHER; | |
687 | ||
688 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, | |
689 | ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); | |
690 | ||
d4a72f23 TC |
691 | bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); |
692 | ||
693 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); | |
428870ff | 694 | |
6f1ffb06 | 695 | spa_history_log_internal(spa, "scan setup", tx, |
428870ff BB |
696 | "func=%u mintxg=%llu maxtxg=%llu", |
697 | *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); | |
698 | } | |
699 | ||
d4a72f23 TC |
700 | /* |
701 | * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver. | |
702 | * Can also be called to resume a paused scrub. | |
703 | */ | |
704 | int | |
705 | dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) | |
706 | { | |
707 | spa_t *spa = dp->dp_spa; | |
708 | dsl_scan_t *scn = dp->dp_scan; | |
709 | ||
710 | /* | |
711 | * Purge all vdev caches and probe all devices. We do this here | |
712 | * rather than in sync context because this requires a writer lock | |
713 | * on the spa_config lock, which we can't do from sync context. The | |
714 | * spa_scrub_reopen flag indicates that vdev_open() should not | |
715 | * attempt to start another scrub. | |
716 | */ | |
717 | spa_vdev_state_enter(spa, SCL_NONE); | |
718 | spa->spa_scrub_reopen = B_TRUE; | |
719 | vdev_reopen(spa->spa_root_vdev); | |
720 | spa->spa_scrub_reopen = B_FALSE; | |
721 | (void) spa_vdev_state_exit(spa, NULL, 0); | |
722 | ||
723 | if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { | |
724 | /* got scrub start cmd, resume paused scrub */ | |
725 | int err = dsl_scrub_set_pause_resume(scn->scn_dp, | |
726 | POOL_SCRUB_NORMAL); | |
43cb30b3 SEF |
727 | if (err == 0) { |
728 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); | |
d4a72f23 | 729 | return (ECANCELED); |
43cb30b3 | 730 | } |
d4a72f23 TC |
731 | |
732 | return (SET_ERROR(err)); | |
733 | } | |
734 | ||
735 | return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, | |
d2734cce | 736 | dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); |
d4a72f23 TC |
737 | } |
738 | ||
428870ff BB |
739 | /* ARGSUSED */ |
740 | static void | |
741 | dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | |
742 | { | |
743 | static const char *old_names[] = { | |
744 | "scrub_bookmark", | |
745 | "scrub_ddt_bookmark", | |
746 | "scrub_ddt_class_max", | |
747 | "scrub_queue", | |
748 | "scrub_min_txg", | |
749 | "scrub_max_txg", | |
750 | "scrub_func", | |
751 | "scrub_errors", | |
752 | NULL | |
753 | }; | |
754 | ||
755 | dsl_pool_t *dp = scn->scn_dp; | |
756 | spa_t *spa = dp->dp_spa; | |
757 | int i; | |
758 | ||
759 | /* Remove any remnants of an old-style scrub. */ | |
760 | for (i = 0; old_names[i]; i++) { | |
761 | (void) zap_remove(dp->dp_meta_objset, | |
762 | DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); | |
763 | } | |
764 | ||
765 | if (scn->scn_phys.scn_queue_obj != 0) { | |
d4a72f23 | 766 | VERIFY0(dmu_object_free(dp->dp_meta_objset, |
428870ff BB |
767 | scn->scn_phys.scn_queue_obj, tx)); |
768 | scn->scn_phys.scn_queue_obj = 0; | |
769 | } | |
d4a72f23 | 770 | scan_ds_queue_clear(scn); |
428870ff | 771 | |
0ea05c64 AP |
772 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; |
773 | ||
428870ff BB |
774 | /* |
775 | * If we were "restarted" from a stopped state, don't bother | |
776 | * with anything else. | |
777 | */ | |
d4a72f23 TC |
778 | if (!dsl_scan_is_running(scn)) { |
779 | ASSERT(!scn->scn_is_sorted); | |
428870ff | 780 | return; |
d4a72f23 | 781 | } |
428870ff | 782 | |
d4a72f23 TC |
783 | if (scn->scn_is_sorted) { |
784 | scan_io_queues_destroy(scn); | |
785 | scn->scn_is_sorted = B_FALSE; | |
786 | ||
787 | if (scn->scn_taskq != NULL) { | |
788 | taskq_destroy(scn->scn_taskq); | |
789 | scn->scn_taskq = NULL; | |
790 | } | |
791 | } | |
792 | ||
793 | scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; | |
428870ff | 794 | |
784d15c1 NR |
795 | if (dsl_scan_restarting(scn, tx)) |
796 | spa_history_log_internal(spa, "scan aborted, restarting", tx, | |
797 | "errors=%llu", spa_get_errlog_size(spa)); | |
798 | else if (!complete) | |
799 | spa_history_log_internal(spa, "scan cancelled", tx, | |
800 | "errors=%llu", spa_get_errlog_size(spa)); | |
801 | else | |
802 | spa_history_log_internal(spa, "scan done", tx, | |
803 | "errors=%llu", spa_get_errlog_size(spa)); | |
428870ff BB |
804 | |
805 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
428870ff BB |
806 | spa->spa_scrub_started = B_FALSE; |
807 | spa->spa_scrub_active = B_FALSE; | |
808 | ||
809 | /* | |
810 | * If the scrub/resilver completed, update all DTLs to | |
811 | * reflect this. Whether it succeeded or not, vacate | |
812 | * all temporary scrub DTLs. | |
d2734cce SD |
813 | * |
814 | * As the scrub does not currently support traversing | |
815 | * data that have been freed but are part of a checkpoint, | |
816 | * we don't mark the scrub as done in the DTLs as faults | |
817 | * may still exist in those vdevs. | |
428870ff | 818 | */ |
d2734cce SD |
819 | if (complete && |
820 | !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { | |
821 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
822 | scn->scn_phys.scn_max_txg, B_TRUE); | |
823 | ||
12fa0466 DE |
824 | spa_event_notify(spa, NULL, NULL, |
825 | scn->scn_phys.scn_min_txg ? | |
fb390aaf | 826 | ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); |
d2734cce SD |
827 | } else { |
828 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
829 | 0, B_TRUE); | |
428870ff BB |
830 | } |
831 | spa_errlog_rotate(spa); | |
832 | ||
833 | /* | |
834 | * We may have finished replacing a device. | |
835 | * Let the async thread assess this and handle the detach. | |
836 | */ | |
837 | spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); | |
838 | } | |
839 | ||
840 | scn->scn_phys.scn_end_time = gethrestime_sec(); | |
4f2dcb3e RY |
841 | |
842 | if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) | |
843 | spa->spa_errata = 0; | |
d4a72f23 TC |
844 | |
845 | ASSERT(!dsl_scan_is_running(scn)); | |
428870ff BB |
846 | } |
847 | ||
848 | /* ARGSUSED */ | |
849 | static int | |
13fe0198 | 850 | dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) |
428870ff | 851 | { |
13fe0198 | 852 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff | 853 | |
d4a72f23 | 854 | if (!dsl_scan_is_running(scn)) |
2e528b49 | 855 | return (SET_ERROR(ENOENT)); |
428870ff BB |
856 | return (0); |
857 | } | |
858 | ||
859 | /* ARGSUSED */ | |
860 | static void | |
13fe0198 | 861 | dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) |
428870ff | 862 | { |
13fe0198 | 863 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff BB |
864 | |
865 | dsl_scan_done(scn, B_FALSE, tx); | |
d4a72f23 | 866 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); |
43cb30b3 | 867 | spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); |
428870ff BB |
868 | } |
869 | ||
870 | int | |
871 | dsl_scan_cancel(dsl_pool_t *dp) | |
872 | { | |
13fe0198 | 873 | return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, |
3d45fdd6 | 874 | dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); |
428870ff BB |
875 | } |
876 | ||
0ea05c64 AP |
877 | static int |
878 | dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) | |
879 | { | |
880 | pool_scrub_cmd_t *cmd = arg; | |
881 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
882 | dsl_scan_t *scn = dp->dp_scan; | |
883 | ||
884 | if (*cmd == POOL_SCRUB_PAUSE) { | |
885 | /* can't pause a scrub when there is no in-progress scrub */ | |
886 | if (!dsl_scan_scrubbing(dp)) | |
887 | return (SET_ERROR(ENOENT)); | |
888 | ||
889 | /* can't pause a paused scrub */ | |
890 | if (dsl_scan_is_paused_scrub(scn)) | |
891 | return (SET_ERROR(EBUSY)); | |
892 | } else if (*cmd != POOL_SCRUB_NORMAL) { | |
893 | return (SET_ERROR(ENOTSUP)); | |
894 | } | |
895 | ||
896 | return (0); | |
897 | } | |
898 | ||
899 | static void | |
900 | dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) | |
901 | { | |
902 | pool_scrub_cmd_t *cmd = arg; | |
903 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
904 | spa_t *spa = dp->dp_spa; | |
905 | dsl_scan_t *scn = dp->dp_scan; | |
906 | ||
0ea05c64 AP |
907 | if (*cmd == POOL_SCRUB_PAUSE) { |
908 | /* can't pause a scrub when there is no in-progress scrub */ | |
909 | spa->spa_scan_pass_scrub_pause = gethrestime_sec(); | |
910 | scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; | |
d4a72f23 | 911 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
43cb30b3 | 912 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); |
0ea05c64 AP |
913 | } else { |
914 | ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); | |
915 | if (dsl_scan_is_paused_scrub(scn)) { | |
916 | /* | |
917 | * We need to keep track of how much time we spend | |
918 | * paused per pass so that we can adjust the scrub rate | |
919 | * shown in the output of 'zpool status' | |
920 | */ | |
921 | spa->spa_scan_pass_scrub_spent_paused += | |
922 | gethrestime_sec() - spa->spa_scan_pass_scrub_pause; | |
923 | spa->spa_scan_pass_scrub_pause = 0; | |
924 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; | |
d4a72f23 | 925 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
0ea05c64 AP |
926 | } |
927 | } | |
928 | } | |
929 | ||
930 | /* | |
931 | * Set scrub pause/resume state if it makes sense to do so | |
932 | */ | |
933 | int | |
934 | dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) | |
935 | { | |
936 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
937 | dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, | |
938 | ZFS_SPACE_CHECK_RESERVED)); | |
939 | } | |
940 | ||
0ea05c64 | 941 | |
d4a72f23 TC |
942 | /* start a new scan, or restart an existing one. */ |
943 | void | |
944 | dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) | |
945 | { | |
946 | if (txg == 0) { | |
947 | dmu_tx_t *tx; | |
948 | tx = dmu_tx_create_dd(dp->dp_mos_dir); | |
949 | VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); | |
0ea05c64 | 950 | |
d4a72f23 TC |
951 | txg = dmu_tx_get_txg(tx); |
952 | dp->dp_scan->scn_restart_txg = txg; | |
953 | dmu_tx_commit(tx); | |
954 | } else { | |
955 | dp->dp_scan->scn_restart_txg = txg; | |
956 | } | |
957 | zfs_dbgmsg("restarting resilver txg=%llu", (longlong_t)txg); | |
0ea05c64 AP |
958 | } |
959 | ||
428870ff BB |
960 | void |
961 | dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) | |
962 | { | |
963 | zio_free(dp->dp_spa, txg, bp); | |
964 | } | |
965 | ||
966 | void | |
967 | dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) | |
968 | { | |
969 | ASSERT(dsl_pool_sync_context(dp)); | |
970 | zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); | |
971 | } | |
972 | ||
d4a72f23 TC |
973 | static int |
974 | scan_ds_queue_compare(const void *a, const void *b) | |
428870ff | 975 | { |
d4a72f23 TC |
976 | const scan_ds_t *sds_a = a, *sds_b = b; |
977 | ||
978 | if (sds_a->sds_dsobj < sds_b->sds_dsobj) | |
979 | return (-1); | |
980 | if (sds_a->sds_dsobj == sds_b->sds_dsobj) | |
981 | return (0); | |
982 | return (1); | |
428870ff BB |
983 | } |
984 | ||
985 | static void | |
d4a72f23 TC |
986 | scan_ds_queue_clear(dsl_scan_t *scn) |
987 | { | |
988 | void *cookie = NULL; | |
989 | scan_ds_t *sds; | |
990 | while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { | |
991 | kmem_free(sds, sizeof (*sds)); | |
992 | } | |
993 | } | |
994 | ||
995 | static boolean_t | |
996 | scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) | |
428870ff | 997 | { |
d4a72f23 TC |
998 | scan_ds_t srch, *sds; |
999 | ||
1000 | srch.sds_dsobj = dsobj; | |
1001 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1002 | if (sds != NULL && txg != NULL) | |
1003 | *txg = sds->sds_txg; | |
1004 | return (sds != NULL); | |
428870ff BB |
1005 | } |
1006 | ||
d4a72f23 TC |
1007 | static void |
1008 | scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) | |
1009 | { | |
1010 | scan_ds_t *sds; | |
1011 | avl_index_t where; | |
1012 | ||
1013 | sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); | |
1014 | sds->sds_dsobj = dsobj; | |
1015 | sds->sds_txg = txg; | |
1016 | ||
1017 | VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); | |
1018 | avl_insert(&scn->scn_queue, sds, where); | |
1019 | } | |
1020 | ||
1021 | static void | |
1022 | scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) | |
1023 | { | |
1024 | scan_ds_t srch, *sds; | |
1025 | ||
1026 | srch.sds_dsobj = dsobj; | |
1027 | ||
1028 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1029 | VERIFY(sds != NULL); | |
1030 | avl_remove(&scn->scn_queue, sds); | |
1031 | kmem_free(sds, sizeof (*sds)); | |
1032 | } | |
1033 | ||
1034 | static void | |
1035 | scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) | |
1036 | { | |
1037 | dsl_pool_t *dp = scn->scn_dp; | |
1038 | spa_t *spa = dp->dp_spa; | |
1039 | dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? | |
1040 | DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; | |
1041 | ||
1042 | ASSERT0(scn->scn_bytes_pending); | |
1043 | ASSERT(scn->scn_phys.scn_queue_obj != 0); | |
1044 | ||
1045 | VERIFY0(dmu_object_free(dp->dp_meta_objset, | |
1046 | scn->scn_phys.scn_queue_obj, tx)); | |
1047 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, | |
1048 | DMU_OT_NONE, 0, tx); | |
1049 | for (scan_ds_t *sds = avl_first(&scn->scn_queue); | |
1050 | sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { | |
1051 | VERIFY0(zap_add_int_key(dp->dp_meta_objset, | |
1052 | scn->scn_phys.scn_queue_obj, sds->sds_dsobj, | |
1053 | sds->sds_txg, tx)); | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * Computes the memory limit state that we're currently in. A sorted scan | |
1059 | * needs quite a bit of memory to hold the sorting queue, so we need to | |
1060 | * reasonably constrain the size so it doesn't impact overall system | |
1061 | * performance. We compute two limits: | |
1062 | * 1) Hard memory limit: if the amount of memory used by the sorting | |
1063 | * queues on a pool gets above this value, we stop the metadata | |
1064 | * scanning portion and start issuing the queued up and sorted | |
1065 | * I/Os to reduce memory usage. | |
1066 | * This limit is calculated as a fraction of physmem (by default 5%). | |
1067 | * We constrain the lower bound of the hard limit to an absolute | |
1068 | * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain | |
1069 | * the upper bound to 5% of the total pool size - no chance we'll | |
1070 | * ever need that much memory, but just to keep the value in check. | |
1071 | * 2) Soft memory limit: once we hit the hard memory limit, we start | |
1072 | * issuing I/O to reduce queue memory usage, but we don't want to | |
1073 | * completely empty out the queues, since we might be able to find I/Os | |
1074 | * that will fill in the gaps of our non-sequential IOs at some point | |
1075 | * in the future. So we stop the issuing of I/Os once the amount of | |
1076 | * memory used drops below the soft limit (at which point we stop issuing | |
1077 | * I/O and start scanning metadata again). | |
1078 | * | |
1079 | * This limit is calculated by subtracting a fraction of the hard | |
1080 | * limit from the hard limit. By default this fraction is 5%, so | |
1081 | * the soft limit is 95% of the hard limit. We cap the size of the | |
1082 | * difference between the hard and soft limits at an absolute | |
1083 | * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is | |
1084 | * sufficient to not cause too frequent switching between the | |
1085 | * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's | |
1086 | * worth of queues is about 1.2 GiB of on-pool data, so scanning | |
1087 | * that should take at least a decent fraction of a second). | |
1088 | */ | |
1089 | static boolean_t | |
1090 | dsl_scan_should_clear(dsl_scan_t *scn) | |
1091 | { | |
1092 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; | |
1093 | uint64_t mlim_hard, mlim_soft, mused; | |
1094 | uint64_t alloc = metaslab_class_get_alloc(spa_normal_class( | |
1095 | scn->scn_dp->dp_spa)); | |
1096 | ||
1097 | mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, | |
1098 | zfs_scan_mem_lim_min); | |
1099 | mlim_hard = MIN(mlim_hard, alloc / 20); | |
1100 | mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, | |
1101 | zfs_scan_mem_lim_soft_max); | |
1102 | mused = 0; | |
1103 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
1104 | vdev_t *tvd = rvd->vdev_child[i]; | |
1105 | dsl_scan_io_queue_t *queue; | |
1106 | ||
1107 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
1108 | queue = tvd->vdev_scan_io_queue; | |
1109 | if (queue != NULL) { | |
1110 | /* #extents in exts_by_size = # in exts_by_addr */ | |
1111 | mused += avl_numnodes(&queue->q_exts_by_size) * | |
1112 | sizeof (range_seg_t) + | |
1113 | avl_numnodes(&queue->q_sios_by_addr) * | |
1114 | sizeof (scan_io_t); | |
1115 | } | |
1116 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
1117 | } | |
1118 | ||
1119 | dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); | |
1120 | ||
1121 | if (mused == 0) | |
1122 | ASSERT0(scn->scn_bytes_pending); | |
1123 | ||
1124 | /* | |
1125 | * If we are above our hard limit, we need to clear out memory. | |
1126 | * If we are below our soft limit, we need to accumulate sequential IOs. | |
1127 | * Otherwise, we should keep doing whatever we are currently doing. | |
1128 | */ | |
1129 | if (mused >= mlim_hard) | |
1130 | return (B_TRUE); | |
1131 | else if (mused < mlim_soft) | |
1132 | return (B_FALSE); | |
1133 | else | |
1134 | return (scn->scn_clearing); | |
1135 | } | |
10400bfe | 1136 | |
428870ff | 1137 | static boolean_t |
0ea05c64 | 1138 | dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) |
428870ff | 1139 | { |
428870ff BB |
1140 | /* we never skip user/group accounting objects */ |
1141 | if (zb && (int64_t)zb->zb_object < 0) | |
1142 | return (B_FALSE); | |
1143 | ||
0ea05c64 AP |
1144 | if (scn->scn_suspending) |
1145 | return (B_TRUE); /* we're already suspending */ | |
428870ff | 1146 | |
9ae529ec | 1147 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) |
428870ff BB |
1148 | return (B_FALSE); /* we're resuming */ |
1149 | ||
1150 | /* We only know how to resume from level-0 blocks. */ | |
1151 | if (zb && zb->zb_level != 0) | |
1152 | return (B_FALSE); | |
1153 | ||
10400bfe | 1154 | /* |
0ea05c64 | 1155 | * We suspend if: |
10400bfe MA |
1156 | * - we have scanned for at least the minimum time (default 1 sec |
1157 | * for scrub, 3 sec for resilver), and either we have sufficient | |
1158 | * dirty data that we are starting to write more quickly | |
d4a72f23 TC |
1159 | * (default 30%), someone is explicitly waiting for this txg |
1160 | * to complete, or we have used up all of the time in the txg | |
1161 | * timeout (default 5 sec). | |
10400bfe MA |
1162 | * or |
1163 | * - the spa is shutting down because this pool is being exported | |
1164 | * or the machine is rebooting. | |
d4a72f23 TC |
1165 | * or |
1166 | * - the scan queue has reached its memory use limit | |
10400bfe | 1167 | */ |
d4a72f23 TC |
1168 | uint64_t curr_time_ns = gethrtime(); |
1169 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
1170 | uint64_t sync_time_ns = curr_time_ns - | |
1171 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1c27024e | 1172 | int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; |
d4a72f23 TC |
1173 | int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? |
1174 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; | |
1175 | ||
1176 | if ((NSEC2MSEC(scan_time_ns) > mintime && | |
1177 | (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || | |
1178 | txg_sync_waiting(scn->scn_dp) || | |
1179 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
1180 | spa_shutting_down(scn->scn_dp->dp_spa) || | |
1181 | (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { | |
428870ff | 1182 | if (zb) { |
0ea05c64 | 1183 | dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", |
428870ff BB |
1184 | (longlong_t)zb->zb_objset, |
1185 | (longlong_t)zb->zb_object, | |
1186 | (longlong_t)zb->zb_level, | |
1187 | (longlong_t)zb->zb_blkid); | |
1188 | scn->scn_phys.scn_bookmark = *zb; | |
d4a72f23 | 1189 | } else { |
21a4f5cc | 1190 | #ifdef ZFS_DEBUG |
d4a72f23 | 1191 | dsl_scan_phys_t *scnp = &scn->scn_phys; |
d4a72f23 TC |
1192 | dprintf("suspending at at DDT bookmark " |
1193 | "%llx/%llx/%llx/%llx\n", | |
1194 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
1195 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
1196 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
1197 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
21a4f5cc | 1198 | #endif |
428870ff | 1199 | } |
0ea05c64 | 1200 | scn->scn_suspending = B_TRUE; |
428870ff BB |
1201 | return (B_TRUE); |
1202 | } | |
1203 | return (B_FALSE); | |
1204 | } | |
1205 | ||
1206 | typedef struct zil_scan_arg { | |
1207 | dsl_pool_t *zsa_dp; | |
1208 | zil_header_t *zsa_zh; | |
1209 | } zil_scan_arg_t; | |
1210 | ||
1211 | /* ARGSUSED */ | |
1212 | static int | |
1213 | dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) | |
1214 | { | |
1215 | zil_scan_arg_t *zsa = arg; | |
1216 | dsl_pool_t *dp = zsa->zsa_dp; | |
1217 | dsl_scan_t *scn = dp->dp_scan; | |
1218 | zil_header_t *zh = zsa->zsa_zh; | |
5dbd68a3 | 1219 | zbookmark_phys_t zb; |
428870ff | 1220 | |
b0bc7a84 | 1221 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) |
428870ff BB |
1222 | return (0); |
1223 | ||
1224 | /* | |
1225 | * One block ("stubby") can be allocated a long time ago; we | |
1226 | * want to visit that one because it has been allocated | |
1227 | * (on-disk) even if it hasn't been claimed (even though for | |
1228 | * scrub there's nothing to do to it). | |
1229 | */ | |
d2734cce | 1230 | if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) |
428870ff BB |
1231 | return (0); |
1232 | ||
1233 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], | |
1234 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
1235 | ||
1236 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1237 | return (0); | |
1238 | } | |
1239 | ||
1240 | /* ARGSUSED */ | |
1241 | static int | |
1242 | dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) | |
1243 | { | |
1244 | if (lrc->lrc_txtype == TX_WRITE) { | |
1245 | zil_scan_arg_t *zsa = arg; | |
1246 | dsl_pool_t *dp = zsa->zsa_dp; | |
1247 | dsl_scan_t *scn = dp->dp_scan; | |
1248 | zil_header_t *zh = zsa->zsa_zh; | |
1249 | lr_write_t *lr = (lr_write_t *)lrc; | |
1250 | blkptr_t *bp = &lr->lr_blkptr; | |
5dbd68a3 | 1251 | zbookmark_phys_t zb; |
428870ff | 1252 | |
b0bc7a84 MG |
1253 | if (BP_IS_HOLE(bp) || |
1254 | bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) | |
428870ff BB |
1255 | return (0); |
1256 | ||
1257 | /* | |
1258 | * birth can be < claim_txg if this record's txg is | |
1259 | * already txg sync'ed (but this log block contains | |
1260 | * other records that are not synced) | |
1261 | */ | |
1262 | if (claim_txg == 0 || bp->blk_birth < claim_txg) | |
1263 | return (0); | |
1264 | ||
1265 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], | |
1266 | lr->lr_foid, ZB_ZIL_LEVEL, | |
1267 | lr->lr_offset / BP_GET_LSIZE(bp)); | |
1268 | ||
1269 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1270 | } | |
1271 | return (0); | |
1272 | } | |
1273 | ||
1274 | static void | |
1275 | dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) | |
1276 | { | |
1277 | uint64_t claim_txg = zh->zh_claim_txg; | |
1278 | zil_scan_arg_t zsa = { dp, zh }; | |
1279 | zilog_t *zilog; | |
1280 | ||
d2734cce SD |
1281 | ASSERT(spa_writeable(dp->dp_spa)); |
1282 | ||
428870ff BB |
1283 | /* |
1284 | * We only want to visit blocks that have been claimed but not yet | |
1285 | * replayed (or, in read-only mode, blocks that *would* be claimed). | |
1286 | */ | |
d2734cce | 1287 | if (claim_txg == 0) |
428870ff BB |
1288 | return; |
1289 | ||
1290 | zilog = zil_alloc(dp->dp_meta_objset, zh); | |
1291 | ||
1292 | (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, | |
b5256303 | 1293 | claim_txg, B_FALSE); |
428870ff BB |
1294 | |
1295 | zil_free(zilog); | |
1296 | } | |
1297 | ||
d4a72f23 TC |
1298 | /* |
1299 | * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea | |
1300 | * here is to sort the AVL tree by the order each block will be needed. | |
1301 | */ | |
1302 | static int | |
1303 | scan_prefetch_queue_compare(const void *a, const void *b) | |
428870ff | 1304 | { |
d4a72f23 TC |
1305 | const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; |
1306 | const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; | |
1307 | const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; | |
428870ff | 1308 | |
d4a72f23 TC |
1309 | return (zbookmark_compare(spc_a->spc_datablkszsec, |
1310 | spc_a->spc_indblkshift, spc_b->spc_datablkszsec, | |
1311 | spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); | |
1312 | } | |
428870ff | 1313 | |
d4a72f23 TC |
1314 | static void |
1315 | scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag) | |
1316 | { | |
424fd7c3 TS |
1317 | if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { |
1318 | zfs_refcount_destroy(&spc->spc_refcnt); | |
d4a72f23 TC |
1319 | kmem_free(spc, sizeof (scan_prefetch_ctx_t)); |
1320 | } | |
1321 | } | |
1322 | ||
1323 | static scan_prefetch_ctx_t * | |
1324 | scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag) | |
1325 | { | |
1326 | scan_prefetch_ctx_t *spc; | |
1327 | ||
1328 | spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); | |
424fd7c3 | 1329 | zfs_refcount_create(&spc->spc_refcnt); |
c13060e4 | 1330 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1331 | spc->spc_scn = scn; |
1332 | if (dnp != NULL) { | |
1333 | spc->spc_datablkszsec = dnp->dn_datablkszsec; | |
1334 | spc->spc_indblkshift = dnp->dn_indblkshift; | |
1335 | spc->spc_root = B_FALSE; | |
1336 | } else { | |
1337 | spc->spc_datablkszsec = 0; | |
1338 | spc->spc_indblkshift = 0; | |
1339 | spc->spc_root = B_TRUE; | |
1340 | } | |
1341 | ||
1342 | return (spc); | |
1343 | } | |
1344 | ||
1345 | static void | |
1346 | scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag) | |
1347 | { | |
c13060e4 | 1348 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1349 | } |
1350 | ||
1351 | static boolean_t | |
1352 | dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, | |
1353 | const zbookmark_phys_t *zb) | |
1354 | { | |
1355 | zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; | |
1356 | dnode_phys_t tmp_dnp; | |
1357 | dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; | |
1358 | ||
1359 | if (zb->zb_objset != last_zb->zb_objset) | |
1360 | return (B_TRUE); | |
1361 | if ((int64_t)zb->zb_object < 0) | |
1362 | return (B_FALSE); | |
1363 | ||
1364 | tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; | |
1365 | tmp_dnp.dn_indblkshift = spc->spc_indblkshift; | |
1366 | ||
1367 | if (zbookmark_subtree_completed(dnp, zb, last_zb)) | |
1368 | return (B_TRUE); | |
1369 | ||
1370 | return (B_FALSE); | |
1371 | } | |
1372 | ||
1373 | static void | |
1374 | dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) | |
1375 | { | |
1376 | avl_index_t idx; | |
1377 | dsl_scan_t *scn = spc->spc_scn; | |
1378 | spa_t *spa = scn->scn_dp->dp_spa; | |
1379 | scan_prefetch_issue_ctx_t *spic; | |
1380 | ||
1381 | if (zfs_no_scrub_prefetch) | |
1382 | return; | |
1383 | ||
1384 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || | |
1385 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && | |
1386 | BP_GET_TYPE(bp) != DMU_OT_OBJSET)) | |
1387 | return; | |
1388 | ||
1389 | if (dsl_scan_check_prefetch_resume(spc, zb)) | |
1390 | return; | |
1391 | ||
1392 | scan_prefetch_ctx_add_ref(spc, scn); | |
1393 | spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); | |
1394 | spic->spic_spc = spc; | |
1395 | spic->spic_bp = *bp; | |
1396 | spic->spic_zb = *zb; | |
1397 | ||
1398 | /* | |
1399 | * Add the IO to the queue of blocks to prefetch. This allows us to | |
1400 | * prioritize blocks that we will need first for the main traversal | |
1401 | * thread. | |
1402 | */ | |
1403 | mutex_enter(&spa->spa_scrub_lock); | |
1404 | if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { | |
1405 | /* this block is already queued for prefetch */ | |
1406 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1407 | scan_prefetch_ctx_rele(spc, scn); | |
1408 | mutex_exit(&spa->spa_scrub_lock); | |
1409 | return; | |
1410 | } | |
1411 | ||
1412 | avl_insert(&scn->scn_prefetch_queue, spic, idx); | |
1413 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1414 | mutex_exit(&spa->spa_scrub_lock); | |
1415 | } | |
1416 | ||
1417 | static void | |
1418 | dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, | |
1419 | uint64_t objset, uint64_t object) | |
1420 | { | |
1421 | int i; | |
1422 | zbookmark_phys_t zb; | |
1423 | scan_prefetch_ctx_t *spc; | |
1424 | ||
1425 | if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
1426 | return; | |
1427 | ||
1428 | SET_BOOKMARK(&zb, objset, object, 0, 0); | |
1429 | ||
1430 | spc = scan_prefetch_ctx_create(scn, dnp, FTAG); | |
1431 | ||
1432 | for (i = 0; i < dnp->dn_nblkptr; i++) { | |
1433 | zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); | |
1434 | zb.zb_blkid = i; | |
1435 | dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); | |
1436 | } | |
1437 | ||
1438 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
1439 | zb.zb_level = 0; | |
1440 | zb.zb_blkid = DMU_SPILL_BLKID; | |
1441 | dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb); | |
1442 | } | |
1443 | ||
1444 | scan_prefetch_ctx_rele(spc, FTAG); | |
1445 | } | |
1446 | ||
1447 | void | |
1448 | dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | |
1449 | arc_buf_t *buf, void *private) | |
1450 | { | |
1451 | scan_prefetch_ctx_t *spc = private; | |
1452 | dsl_scan_t *scn = spc->spc_scn; | |
1453 | spa_t *spa = scn->scn_dp->dp_spa; | |
1454 | ||
13a2ff27 | 1455 | /* broadcast that the IO has completed for rate limiting purposes */ |
d4a72f23 TC |
1456 | mutex_enter(&spa->spa_scrub_lock); |
1457 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
1458 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
1459 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1460 | mutex_exit(&spa->spa_scrub_lock); | |
1461 | ||
1462 | /* if there was an error or we are done prefetching, just cleanup */ | |
13a2ff27 | 1463 | if (buf == NULL || scn->scn_prefetch_stop) |
d4a72f23 TC |
1464 | goto out; |
1465 | ||
1466 | if (BP_GET_LEVEL(bp) > 0) { | |
1467 | int i; | |
1468 | blkptr_t *cbp; | |
1469 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
1470 | zbookmark_phys_t czb; | |
1471 | ||
1472 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { | |
1473 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
1474 | zb->zb_level - 1, zb->zb_blkid * epb + i); | |
1475 | dsl_scan_prefetch(spc, cbp, &czb); | |
1476 | } | |
1477 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { | |
1478 | dnode_phys_t *cdnp; | |
1479 | int i; | |
1480 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; | |
1481 | ||
1482 | for (i = 0, cdnp = buf->b_data; i < epb; | |
1483 | i += cdnp->dn_extra_slots + 1, | |
1484 | cdnp += cdnp->dn_extra_slots + 1) { | |
1485 | dsl_scan_prefetch_dnode(scn, cdnp, | |
1486 | zb->zb_objset, zb->zb_blkid * epb + i); | |
1487 | } | |
1488 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { | |
1489 | objset_phys_t *osp = buf->b_data; | |
1490 | ||
1491 | dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, | |
1492 | zb->zb_objset, DMU_META_DNODE_OBJECT); | |
1493 | ||
1494 | if (OBJSET_BUF_HAS_USERUSED(buf)) { | |
1495 | dsl_scan_prefetch_dnode(scn, | |
1496 | &osp->os_groupused_dnode, zb->zb_objset, | |
1497 | DMU_GROUPUSED_OBJECT); | |
1498 | dsl_scan_prefetch_dnode(scn, | |
1499 | &osp->os_userused_dnode, zb->zb_objset, | |
1500 | DMU_USERUSED_OBJECT); | |
1501 | } | |
1502 | } | |
1503 | ||
1504 | out: | |
1505 | if (buf != NULL) | |
1506 | arc_buf_destroy(buf, private); | |
1507 | scan_prefetch_ctx_rele(spc, scn); | |
1508 | } | |
1509 | ||
1510 | /* ARGSUSED */ | |
1511 | static void | |
1512 | dsl_scan_prefetch_thread(void *arg) | |
1513 | { | |
1514 | dsl_scan_t *scn = arg; | |
1515 | spa_t *spa = scn->scn_dp->dp_spa; | |
1516 | scan_prefetch_issue_ctx_t *spic; | |
1517 | ||
1518 | /* loop until we are told to stop */ | |
1519 | while (!scn->scn_prefetch_stop) { | |
1520 | arc_flags_t flags = ARC_FLAG_NOWAIT | | |
1521 | ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; | |
1522 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; | |
1523 | ||
1524 | mutex_enter(&spa->spa_scrub_lock); | |
1525 | ||
1526 | /* | |
1527 | * Wait until we have an IO to issue and are not above our | |
1528 | * maximum in flight limit. | |
1529 | */ | |
1530 | while (!scn->scn_prefetch_stop && | |
1531 | (avl_numnodes(&scn->scn_prefetch_queue) == 0 || | |
1532 | spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { | |
1533 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); | |
1534 | } | |
1535 | ||
1536 | /* recheck if we should stop since we waited for the cv */ | |
1537 | if (scn->scn_prefetch_stop) { | |
1538 | mutex_exit(&spa->spa_scrub_lock); | |
1539 | break; | |
1540 | } | |
1541 | ||
1542 | /* remove the prefetch IO from the tree */ | |
1543 | spic = avl_first(&scn->scn_prefetch_queue); | |
1544 | spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); | |
1545 | avl_remove(&scn->scn_prefetch_queue, spic); | |
1546 | ||
1547 | mutex_exit(&spa->spa_scrub_lock); | |
1548 | ||
1549 | if (BP_IS_PROTECTED(&spic->spic_bp)) { | |
1550 | ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE || | |
1551 | BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET); | |
1552 | ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0); | |
1553 | zio_flags |= ZIO_FLAG_RAW; | |
1554 | } | |
1555 | ||
1556 | /* issue the prefetch asynchronously */ | |
1557 | (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, | |
1558 | &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc, | |
a8b2e306 | 1559 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb); |
428870ff | 1560 | |
d4a72f23 | 1561 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); |
b5256303 TC |
1562 | } |
1563 | ||
d4a72f23 | 1564 | ASSERT(scn->scn_prefetch_stop); |
428870ff | 1565 | |
d4a72f23 TC |
1566 | /* free any prefetches we didn't get to complete */ |
1567 | mutex_enter(&spa->spa_scrub_lock); | |
1568 | while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { | |
1569 | avl_remove(&scn->scn_prefetch_queue, spic); | |
1570 | scan_prefetch_ctx_rele(spic->spic_spc, scn); | |
1571 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1572 | } | |
1573 | ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); | |
1574 | mutex_exit(&spa->spa_scrub_lock); | |
428870ff BB |
1575 | } |
1576 | ||
1577 | static boolean_t | |
1578 | dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, | |
5dbd68a3 | 1579 | const zbookmark_phys_t *zb) |
428870ff BB |
1580 | { |
1581 | /* | |
1582 | * We never skip over user/group accounting objects (obj<0) | |
1583 | */ | |
9ae529ec | 1584 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && |
428870ff BB |
1585 | (int64_t)zb->zb_object >= 0) { |
1586 | /* | |
1587 | * If we already visited this bp & everything below (in | |
1588 | * a prior txg sync), don't bother doing it again. | |
1589 | */ | |
fcff0f35 PD |
1590 | if (zbookmark_subtree_completed(dnp, zb, |
1591 | &scn->scn_phys.scn_bookmark)) | |
428870ff BB |
1592 | return (B_TRUE); |
1593 | ||
1594 | /* | |
1595 | * If we found the block we're trying to resume from, or | |
1596 | * we went past it to a different object, zero it out to | |
0ea05c64 | 1597 | * indicate that it's OK to start checking for suspending |
428870ff BB |
1598 | * again. |
1599 | */ | |
1600 | if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || | |
1601 | zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { | |
1602 | dprintf("resuming at %llx/%llx/%llx/%llx\n", | |
1603 | (longlong_t)zb->zb_objset, | |
1604 | (longlong_t)zb->zb_object, | |
1605 | (longlong_t)zb->zb_level, | |
1606 | (longlong_t)zb->zb_blkid); | |
1607 | bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); | |
1608 | } | |
1609 | } | |
1610 | return (B_FALSE); | |
1611 | } | |
1612 | ||
d4a72f23 TC |
1613 | static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
1614 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, | |
1615 | dmu_objset_type_t ostype, dmu_tx_t *tx); | |
1616 | inline __attribute__((always_inline)) static void dsl_scan_visitdnode( | |
1617 | dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, | |
1618 | dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); | |
1619 | ||
428870ff BB |
1620 | /* |
1621 | * Return nonzero on i/o error. | |
1622 | * Return new buf to write out in *bufp. | |
1623 | */ | |
10be533e | 1624 | inline __attribute__((always_inline)) static int |
428870ff BB |
1625 | dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, |
1626 | dnode_phys_t *dnp, const blkptr_t *bp, | |
ebcf4936 | 1627 | const zbookmark_phys_t *zb, dmu_tx_t *tx) |
428870ff BB |
1628 | { |
1629 | dsl_pool_t *dp = scn->scn_dp; | |
572e2857 | 1630 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; |
428870ff BB |
1631 | int err; |
1632 | ||
1633 | if (BP_GET_LEVEL(bp) > 0) { | |
2a432414 | 1634 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff BB |
1635 | int i; |
1636 | blkptr_t *cbp; | |
1637 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
ebcf4936 | 1638 | arc_buf_t *buf; |
428870ff | 1639 | |
ebcf4936 | 1640 | err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 1641 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
1642 | if (err) { |
1643 | scn->scn_phys.scn_errors++; | |
1644 | return (err); | |
1645 | } | |
ebcf4936 | 1646 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { |
5dbd68a3 | 1647 | zbookmark_phys_t czb; |
428870ff BB |
1648 | |
1649 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
1650 | zb->zb_level - 1, | |
1651 | zb->zb_blkid * epb + i); | |
1652 | dsl_scan_visitbp(cbp, &czb, dnp, | |
ebcf4936 | 1653 | ds, scn, ostype, tx); |
428870ff | 1654 | } |
d3c2ae1c | 1655 | arc_buf_destroy(buf, &buf); |
428870ff | 1656 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { |
2a432414 | 1657 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 1658 | dnode_phys_t *cdnp; |
d4a72f23 | 1659 | int i; |
428870ff | 1660 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; |
ebcf4936 | 1661 | arc_buf_t *buf; |
428870ff | 1662 | |
b5256303 TC |
1663 | if (BP_IS_PROTECTED(bp)) { |
1664 | ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); | |
1665 | zio_flags |= ZIO_FLAG_RAW; | |
1666 | } | |
1667 | ||
ebcf4936 | 1668 | err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 1669 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
1670 | if (err) { |
1671 | scn->scn_phys.scn_errors++; | |
1672 | return (err); | |
1673 | } | |
50c957f7 NB |
1674 | for (i = 0, cdnp = buf->b_data; i < epb; |
1675 | i += cdnp->dn_extra_slots + 1, | |
1676 | cdnp += cdnp->dn_extra_slots + 1) { | |
428870ff | 1677 | dsl_scan_visitdnode(scn, ds, ostype, |
ebcf4936 | 1678 | cdnp, zb->zb_blkid * epb + i, tx); |
428870ff BB |
1679 | } |
1680 | ||
d3c2ae1c | 1681 | arc_buf_destroy(buf, &buf); |
428870ff | 1682 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { |
2a432414 | 1683 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 1684 | objset_phys_t *osp; |
ebcf4936 | 1685 | arc_buf_t *buf; |
428870ff | 1686 | |
ebcf4936 | 1687 | err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 1688 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
1689 | if (err) { |
1690 | scn->scn_phys.scn_errors++; | |
1691 | return (err); | |
1692 | } | |
1693 | ||
ebcf4936 | 1694 | osp = buf->b_data; |
428870ff | 1695 | |
428870ff | 1696 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 1697 | &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); |
428870ff | 1698 | |
ebcf4936 | 1699 | if (OBJSET_BUF_HAS_USERUSED(buf)) { |
428870ff | 1700 | /* |
9c5167d1 | 1701 | * We also always visit user/group/project accounting |
428870ff | 1702 | * objects, and never skip them, even if we are |
d4a72f23 TC |
1703 | * suspending. This is necessary so that the |
1704 | * space deltas from this txg get integrated. | |
428870ff | 1705 | */ |
9c5167d1 NF |
1706 | if (OBJSET_BUF_HAS_PROJECTUSED(buf)) |
1707 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
1708 | &osp->os_projectused_dnode, | |
1709 | DMU_PROJECTUSED_OBJECT, tx); | |
428870ff | 1710 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 1711 | &osp->os_groupused_dnode, |
428870ff BB |
1712 | DMU_GROUPUSED_OBJECT, tx); |
1713 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
ebcf4936 | 1714 | &osp->os_userused_dnode, |
428870ff BB |
1715 | DMU_USERUSED_OBJECT, tx); |
1716 | } | |
d3c2ae1c | 1717 | arc_buf_destroy(buf, &buf); |
428870ff BB |
1718 | } |
1719 | ||
1720 | return (0); | |
1721 | } | |
1722 | ||
10be533e | 1723 | inline __attribute__((always_inline)) static void |
428870ff | 1724 | dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, |
ebcf4936 | 1725 | dmu_objset_type_t ostype, dnode_phys_t *dnp, |
428870ff BB |
1726 | uint64_t object, dmu_tx_t *tx) |
1727 | { | |
1728 | int j; | |
1729 | ||
1730 | for (j = 0; j < dnp->dn_nblkptr; j++) { | |
5dbd68a3 | 1731 | zbookmark_phys_t czb; |
428870ff BB |
1732 | |
1733 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, | |
1734 | dnp->dn_nlevels - 1, j); | |
1735 | dsl_scan_visitbp(&dnp->dn_blkptr[j], | |
ebcf4936 | 1736 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
1737 | } |
1738 | ||
1739 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
5dbd68a3 | 1740 | zbookmark_phys_t czb; |
428870ff BB |
1741 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, |
1742 | 0, DMU_SPILL_BLKID); | |
50c957f7 | 1743 | dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), |
ebcf4936 | 1744 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
1745 | } |
1746 | } | |
1747 | ||
1748 | /* | |
1749 | * The arguments are in this order because mdb can only print the | |
1750 | * first 5; we want them to be useful. | |
1751 | */ | |
1752 | static void | |
5dbd68a3 | 1753 | dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
ebcf4936 MA |
1754 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, |
1755 | dmu_objset_type_t ostype, dmu_tx_t *tx) | |
428870ff BB |
1756 | { |
1757 | dsl_pool_t *dp = scn->scn_dp; | |
d4a72f23 | 1758 | blkptr_t *bp_toread = NULL; |
428870ff | 1759 | |
0ea05c64 | 1760 | if (dsl_scan_check_suspend(scn, zb)) |
d4a72f23 | 1761 | return; |
428870ff BB |
1762 | |
1763 | if (dsl_scan_check_resume(scn, dnp, zb)) | |
d4a72f23 | 1764 | return; |
428870ff BB |
1765 | |
1766 | scn->scn_visited_this_txg++; | |
1767 | ||
b81c4ac9 BB |
1768 | /* |
1769 | * This debugging is commented out to conserve stack space. This | |
1770 | * function is called recursively and the debugging addes several | |
1771 | * bytes to the stack for each call. It can be commented back in | |
1772 | * if required to debug an issue in dsl_scan_visitbp(). | |
1773 | * | |
1774 | * dprintf_bp(bp, | |
d4a72f23 TC |
1775 | * "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", |
1776 | * ds, ds ? ds->ds_object : 0, | |
1777 | * zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, | |
1778 | * bp); | |
b81c4ac9 | 1779 | */ |
428870ff | 1780 | |
d4a72f23 TC |
1781 | if (BP_IS_HOLE(bp)) { |
1782 | scn->scn_holes_this_txg++; | |
1783 | return; | |
1784 | } | |
1785 | ||
1786 | if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { | |
1787 | scn->scn_lt_min_this_txg++; | |
1788 | return; | |
1789 | } | |
1790 | ||
1791 | bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); | |
1792 | *bp_toread = *bp; | |
428870ff | 1793 | |
ebcf4936 | 1794 | if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) |
161ce7ce | 1795 | goto out; |
428870ff BB |
1796 | |
1797 | /* | |
4e33ba4c | 1798 | * If dsl_scan_ddt() has already visited this block, it will have |
428870ff BB |
1799 | * already done any translations or scrubbing, so don't call the |
1800 | * callback again. | |
1801 | */ | |
1802 | if (ddt_class_contains(dp->dp_spa, | |
1803 | scn->scn_phys.scn_ddt_class_max, bp)) { | |
d4a72f23 | 1804 | scn->scn_ddt_contained_this_txg++; |
161ce7ce | 1805 | goto out; |
428870ff BB |
1806 | } |
1807 | ||
1808 | /* | |
1809 | * If this block is from the future (after cur_max_txg), then we | |
1810 | * are doing this on behalf of a deleted snapshot, and we will | |
1811 | * revisit the future block on the next pass of this dataset. | |
1812 | * Don't scan it now unless we need to because something | |
1813 | * under it was modified. | |
1814 | */ | |
d4a72f23 TC |
1815 | if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { |
1816 | scn->scn_gt_max_this_txg++; | |
1817 | goto out; | |
428870ff | 1818 | } |
d4a72f23 TC |
1819 | |
1820 | scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); | |
1821 | ||
161ce7ce | 1822 | out: |
d1d7e268 | 1823 | kmem_free(bp_toread, sizeof (blkptr_t)); |
428870ff BB |
1824 | } |
1825 | ||
1826 | static void | |
1827 | dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, | |
1828 | dmu_tx_t *tx) | |
1829 | { | |
5dbd68a3 | 1830 | zbookmark_phys_t zb; |
d4a72f23 | 1831 | scan_prefetch_ctx_t *spc; |
428870ff BB |
1832 | |
1833 | SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, | |
1834 | ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); | |
d4a72f23 TC |
1835 | |
1836 | if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { | |
1837 | SET_BOOKMARK(&scn->scn_prefetch_bookmark, | |
1838 | zb.zb_objset, 0, 0, 0); | |
1839 | } else { | |
1840 | scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; | |
1841 | } | |
1842 | ||
1843 | scn->scn_objsets_visited_this_txg++; | |
1844 | ||
1845 | spc = scan_prefetch_ctx_create(scn, NULL, FTAG); | |
1846 | dsl_scan_prefetch(spc, bp, &zb); | |
1847 | scan_prefetch_ctx_rele(spc, FTAG); | |
1848 | ||
1849 | dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); | |
428870ff BB |
1850 | |
1851 | dprintf_ds(ds, "finished scan%s", ""); | |
1852 | } | |
1853 | ||
d4a72f23 TC |
1854 | static void |
1855 | ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) | |
428870ff | 1856 | { |
d4a72f23 | 1857 | if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { |
0c66c32d | 1858 | if (ds->ds_is_snapshot) { |
b77222c8 MA |
1859 | /* |
1860 | * Note: | |
1861 | * - scn_cur_{min,max}_txg stays the same. | |
1862 | * - Setting the flag is not really necessary if | |
1863 | * scn_cur_max_txg == scn_max_txg, because there | |
1864 | * is nothing after this snapshot that we care | |
1865 | * about. However, we set it anyway and then | |
1866 | * ignore it when we retraverse it in | |
1867 | * dsl_scan_visitds(). | |
1868 | */ | |
d4a72f23 | 1869 | scn_phys->scn_bookmark.zb_objset = |
d683ddbb | 1870 | dsl_dataset_phys(ds)->ds_next_snap_obj; |
428870ff BB |
1871 | zfs_dbgmsg("destroying ds %llu; currently traversing; " |
1872 | "reset zb_objset to %llu", | |
1873 | (u_longlong_t)ds->ds_object, | |
d683ddbb JG |
1874 | (u_longlong_t)dsl_dataset_phys(ds)-> |
1875 | ds_next_snap_obj); | |
d4a72f23 | 1876 | scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; |
428870ff | 1877 | } else { |
d4a72f23 | 1878 | SET_BOOKMARK(&scn_phys->scn_bookmark, |
428870ff BB |
1879 | ZB_DESTROYED_OBJSET, 0, 0, 0); |
1880 | zfs_dbgmsg("destroying ds %llu; currently traversing; " | |
1881 | "reset bookmark to -1,0,0,0", | |
1882 | (u_longlong_t)ds->ds_object); | |
1883 | } | |
d4a72f23 TC |
1884 | } |
1885 | } | |
1886 | ||
1887 | /* | |
1888 | * Invoked when a dataset is destroyed. We need to make sure that: | |
1889 | * | |
1890 | * 1) If it is the dataset that was currently being scanned, we write | |
1891 | * a new dsl_scan_phys_t and marking the objset reference in it | |
1892 | * as destroyed. | |
1893 | * 2) Remove it from the work queue, if it was present. | |
1894 | * | |
1895 | * If the dataset was actually a snapshot, instead of marking the dataset | |
1896 | * as destroyed, we instead substitute the next snapshot in line. | |
1897 | */ | |
1898 | void | |
1899 | dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) | |
1900 | { | |
1901 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
1902 | dsl_scan_t *scn = dp->dp_scan; | |
1903 | uint64_t mintxg; | |
1904 | ||
1905 | if (!dsl_scan_is_running(scn)) | |
1906 | return; | |
1907 | ||
1908 | ds_destroyed_scn_phys(ds, &scn->scn_phys); | |
1909 | ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); | |
1910 | ||
1911 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
1912 | scan_ds_queue_remove(scn, ds->ds_object); | |
1913 | if (ds->ds_is_snapshot) | |
1914 | scan_ds_queue_insert(scn, | |
1915 | dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); | |
1916 | } | |
1917 | ||
1918 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
1919 | ds->ds_object, &mintxg) == 0) { | |
d683ddbb | 1920 | ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); |
428870ff BB |
1921 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
1922 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
0c66c32d | 1923 | if (ds->ds_is_snapshot) { |
428870ff BB |
1924 | /* |
1925 | * We keep the same mintxg; it could be > | |
1926 | * ds_creation_txg if the previous snapshot was | |
1927 | * deleted too. | |
1928 | */ | |
1929 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
1930 | scn->scn_phys.scn_queue_obj, | |
d683ddbb JG |
1931 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
1932 | mintxg, tx) == 0); | |
428870ff BB |
1933 | zfs_dbgmsg("destroying ds %llu; in queue; " |
1934 | "replacing with %llu", | |
1935 | (u_longlong_t)ds->ds_object, | |
d683ddbb JG |
1936 | (u_longlong_t)dsl_dataset_phys(ds)-> |
1937 | ds_next_snap_obj); | |
428870ff BB |
1938 | } else { |
1939 | zfs_dbgmsg("destroying ds %llu; in queue; removing", | |
1940 | (u_longlong_t)ds->ds_object); | |
1941 | } | |
428870ff BB |
1942 | } |
1943 | ||
1944 | /* | |
1945 | * dsl_scan_sync() should be called after this, and should sync | |
1946 | * out our changed state, but just to be safe, do it here. | |
1947 | */ | |
d4a72f23 TC |
1948 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
1949 | } | |
1950 | ||
1951 | static void | |
1952 | ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) | |
1953 | { | |
1954 | if (scn_bookmark->zb_objset == ds->ds_object) { | |
1955 | scn_bookmark->zb_objset = | |
1956 | dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
1957 | zfs_dbgmsg("snapshotting ds %llu; currently traversing; " | |
1958 | "reset zb_objset to %llu", | |
1959 | (u_longlong_t)ds->ds_object, | |
1960 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); | |
1961 | } | |
428870ff BB |
1962 | } |
1963 | ||
d4a72f23 TC |
1964 | /* |
1965 | * Called when a dataset is snapshotted. If we were currently traversing | |
1966 | * this snapshot, we reset our bookmark to point at the newly created | |
1967 | * snapshot. We also modify our work queue to remove the old snapshot and | |
1968 | * replace with the new one. | |
1969 | */ | |
428870ff BB |
1970 | void |
1971 | dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) | |
1972 | { | |
1973 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
1974 | dsl_scan_t *scn = dp->dp_scan; | |
1975 | uint64_t mintxg; | |
1976 | ||
d4a72f23 | 1977 | if (!dsl_scan_is_running(scn)) |
428870ff BB |
1978 | return; |
1979 | ||
d683ddbb | 1980 | ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); |
428870ff | 1981 | |
d4a72f23 TC |
1982 | ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); |
1983 | ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); | |
1984 | ||
1985 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
1986 | scan_ds_queue_remove(scn, ds->ds_object); | |
1987 | scan_ds_queue_insert(scn, | |
1988 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); | |
1989 | } | |
1990 | ||
1991 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
1992 | ds->ds_object, &mintxg) == 0) { | |
428870ff BB |
1993 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
1994 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
1995 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
1996 | scn->scn_phys.scn_queue_obj, | |
d683ddbb | 1997 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); |
428870ff BB |
1998 | zfs_dbgmsg("snapshotting ds %llu; in queue; " |
1999 | "replacing with %llu", | |
2000 | (u_longlong_t)ds->ds_object, | |
d683ddbb | 2001 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); |
428870ff | 2002 | } |
d4a72f23 TC |
2003 | |
2004 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); | |
428870ff BB |
2005 | } |
2006 | ||
d4a72f23 TC |
2007 | static void |
2008 | ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, | |
2009 | zbookmark_phys_t *scn_bookmark) | |
428870ff | 2010 | { |
d4a72f23 TC |
2011 | if (scn_bookmark->zb_objset == ds1->ds_object) { |
2012 | scn_bookmark->zb_objset = ds2->ds_object; | |
428870ff BB |
2013 | zfs_dbgmsg("clone_swap ds %llu; currently traversing; " |
2014 | "reset zb_objset to %llu", | |
2015 | (u_longlong_t)ds1->ds_object, | |
2016 | (u_longlong_t)ds2->ds_object); | |
d4a72f23 TC |
2017 | } else if (scn_bookmark->zb_objset == ds2->ds_object) { |
2018 | scn_bookmark->zb_objset = ds1->ds_object; | |
428870ff BB |
2019 | zfs_dbgmsg("clone_swap ds %llu; currently traversing; " |
2020 | "reset zb_objset to %llu", | |
2021 | (u_longlong_t)ds2->ds_object, | |
2022 | (u_longlong_t)ds1->ds_object); | |
2023 | } | |
d4a72f23 TC |
2024 | } |
2025 | ||
2026 | /* | |
2027 | * Called when a parent dataset and its clone are swapped. If we were | |
2028 | * currently traversing the dataset, we need to switch to traversing the | |
2029 | * newly promoted parent. | |
2030 | */ | |
2031 | void | |
2032 | dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) | |
2033 | { | |
2034 | dsl_pool_t *dp = ds1->ds_dir->dd_pool; | |
2035 | dsl_scan_t *scn = dp->dp_scan; | |
2036 | uint64_t mintxg; | |
2037 | ||
2038 | if (!dsl_scan_is_running(scn)) | |
2039 | return; | |
2040 | ||
2041 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); | |
2042 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); | |
2043 | ||
2044 | if (scan_ds_queue_contains(scn, ds1->ds_object, &mintxg)) { | |
2045 | scan_ds_queue_remove(scn, ds1->ds_object); | |
2046 | scan_ds_queue_insert(scn, ds2->ds_object, mintxg); | |
2047 | } | |
2048 | if (scan_ds_queue_contains(scn, ds2->ds_object, &mintxg)) { | |
2049 | scan_ds_queue_remove(scn, ds2->ds_object); | |
2050 | scan_ds_queue_insert(scn, ds1->ds_object, mintxg); | |
2051 | } | |
428870ff BB |
2052 | |
2053 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2054 | ds1->ds_object, &mintxg) == 0) { | |
2055 | int err; | |
d683ddbb JG |
2056 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); |
2057 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
428870ff BB |
2058 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2059 | scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); | |
2060 | err = zap_add_int_key(dp->dp_meta_objset, | |
2061 | scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); | |
2062 | VERIFY(err == 0 || err == EEXIST); | |
2063 | if (err == EEXIST) { | |
2064 | /* Both were there to begin with */ | |
2065 | VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, | |
2066 | scn->scn_phys.scn_queue_obj, | |
2067 | ds1->ds_object, mintxg, tx)); | |
2068 | } | |
2069 | zfs_dbgmsg("clone_swap ds %llu; in queue; " | |
2070 | "replacing with %llu", | |
2071 | (u_longlong_t)ds1->ds_object, | |
2072 | (u_longlong_t)ds2->ds_object); | |
d4a72f23 TC |
2073 | } |
2074 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2075 | ds2->ds_object, &mintxg) == 0) { | |
d683ddbb JG |
2076 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); |
2077 | ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
428870ff BB |
2078 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2079 | scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); | |
2080 | VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, | |
2081 | scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); | |
2082 | zfs_dbgmsg("clone_swap ds %llu; in queue; " | |
2083 | "replacing with %llu", | |
2084 | (u_longlong_t)ds2->ds_object, | |
2085 | (u_longlong_t)ds1->ds_object); | |
2086 | } | |
2087 | ||
d4a72f23 | 2088 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
428870ff BB |
2089 | } |
2090 | ||
428870ff BB |
2091 | /* ARGSUSED */ |
2092 | static int | |
13fe0198 | 2093 | enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2094 | { |
d4a72f23 | 2095 | uint64_t originobj = *(uint64_t *)arg; |
428870ff BB |
2096 | dsl_dataset_t *ds; |
2097 | int err; | |
428870ff BB |
2098 | dsl_scan_t *scn = dp->dp_scan; |
2099 | ||
d4a72f23 | 2100 | if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) |
13fe0198 MA |
2101 | return (0); |
2102 | ||
2103 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); | |
428870ff BB |
2104 | if (err) |
2105 | return (err); | |
2106 | ||
d4a72f23 | 2107 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { |
13fe0198 MA |
2108 | dsl_dataset_t *prev; |
2109 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 2110 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); |
428870ff | 2111 | |
13fe0198 MA |
2112 | dsl_dataset_rele(ds, FTAG); |
2113 | if (err) | |
2114 | return (err); | |
2115 | ds = prev; | |
428870ff | 2116 | } |
d4a72f23 TC |
2117 | scan_ds_queue_insert(scn, ds->ds_object, |
2118 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2119 | dsl_dataset_rele(ds, FTAG); |
2120 | return (0); | |
2121 | } | |
2122 | ||
2123 | static void | |
2124 | dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) | |
2125 | { | |
2126 | dsl_pool_t *dp = scn->scn_dp; | |
2127 | dsl_dataset_t *ds; | |
2128 | ||
2129 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); | |
2130 | ||
b77222c8 MA |
2131 | if (scn->scn_phys.scn_cur_min_txg >= |
2132 | scn->scn_phys.scn_max_txg) { | |
2133 | /* | |
2134 | * This can happen if this snapshot was created after the | |
2135 | * scan started, and we already completed a previous snapshot | |
2136 | * that was created after the scan started. This snapshot | |
2137 | * only references blocks with: | |
2138 | * | |
2139 | * birth < our ds_creation_txg | |
2140 | * cur_min_txg is no less than ds_creation_txg. | |
2141 | * We have already visited these blocks. | |
2142 | * or | |
2143 | * birth > scn_max_txg | |
2144 | * The scan requested not to visit these blocks. | |
2145 | * | |
2146 | * Subsequent snapshots (and clones) can reference our | |
2147 | * blocks, or blocks with even higher birth times. | |
2148 | * Therefore we do not need to visit them either, | |
2149 | * so we do not add them to the work queue. | |
2150 | * | |
2151 | * Note that checking for cur_min_txg >= cur_max_txg | |
2152 | * is not sufficient, because in that case we may need to | |
2153 | * visit subsequent snapshots. This happens when min_txg > 0, | |
2154 | * which raises cur_min_txg. In this case we will visit | |
2155 | * this dataset but skip all of its blocks, because the | |
2156 | * rootbp's birth time is < cur_min_txg. Then we will | |
2157 | * add the next snapshots/clones to the work queue. | |
2158 | */ | |
eca7b760 | 2159 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
b77222c8 MA |
2160 | dsl_dataset_name(ds, dsname); |
2161 | zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " | |
2162 | "cur_min_txg (%llu) >= max_txg (%llu)", | |
d4a72f23 TC |
2163 | (longlong_t)dsobj, dsname, |
2164 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2165 | (longlong_t)scn->scn_phys.scn_max_txg); | |
b77222c8 MA |
2166 | kmem_free(dsname, MAXNAMELEN); |
2167 | ||
2168 | goto out; | |
2169 | } | |
2170 | ||
572e2857 | 2171 | /* |
a1d477c2 | 2172 | * Only the ZIL in the head (non-snapshot) is valid. Even though |
572e2857 | 2173 | * snapshots can have ZIL block pointers (which may be the same |
a1d477c2 MA |
2174 | * BP as in the head), they must be ignored. In addition, $ORIGIN |
2175 | * doesn't have a objset (i.e. its ds_bp is a hole) so we don't | |
2176 | * need to look for a ZIL in it either. So we traverse the ZIL here, | |
2177 | * rather than in scan_recurse(), because the regular snapshot | |
2178 | * block-sharing rules don't apply to it. | |
572e2857 | 2179 | */ |
a1d477c2 | 2180 | if (!dsl_dataset_is_snapshot(ds) && |
5e097c67 MA |
2181 | (dp->dp_origin_snap == NULL || |
2182 | ds->ds_dir != dp->dp_origin_snap->ds_dir)) { | |
a1d477c2 MA |
2183 | objset_t *os; |
2184 | if (dmu_objset_from_ds(ds, &os) != 0) { | |
2185 | goto out; | |
2186 | } | |
572e2857 | 2187 | dsl_scan_zil(dp, &os->os_zil_header); |
a1d477c2 | 2188 | } |
572e2857 | 2189 | |
428870ff BB |
2190 | /* |
2191 | * Iterate over the bps in this ds. | |
2192 | */ | |
2193 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
cc9bb3e5 | 2194 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 2195 | dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); |
cc9bb3e5 | 2196 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
428870ff | 2197 | |
1c27024e | 2198 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
428870ff BB |
2199 | dsl_dataset_name(ds, dsname); |
2200 | zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " | |
0ea05c64 | 2201 | "suspending=%u", |
428870ff BB |
2202 | (longlong_t)dsobj, dsname, |
2203 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2204 | (longlong_t)scn->scn_phys.scn_cur_max_txg, | |
0ea05c64 | 2205 | (int)scn->scn_suspending); |
eca7b760 | 2206 | kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); |
428870ff | 2207 | |
0ea05c64 | 2208 | if (scn->scn_suspending) |
428870ff BB |
2209 | goto out; |
2210 | ||
2211 | /* | |
2212 | * We've finished this pass over this dataset. | |
2213 | */ | |
2214 | ||
2215 | /* | |
2216 | * If we did not completely visit this dataset, do another pass. | |
2217 | */ | |
2218 | if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { | |
2219 | zfs_dbgmsg("incomplete pass; visiting again"); | |
2220 | scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; | |
d4a72f23 TC |
2221 | scan_ds_queue_insert(scn, ds->ds_object, |
2222 | scn->scn_phys.scn_cur_max_txg); | |
428870ff BB |
2223 | goto out; |
2224 | } | |
2225 | ||
2226 | /* | |
13a2ff27 | 2227 | * Add descendant datasets to work queue. |
428870ff | 2228 | */ |
d683ddbb | 2229 | if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { |
d4a72f23 | 2230 | scan_ds_queue_insert(scn, |
d683ddbb | 2231 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
d4a72f23 | 2232 | dsl_dataset_phys(ds)->ds_creation_txg); |
428870ff | 2233 | } |
d683ddbb | 2234 | if (dsl_dataset_phys(ds)->ds_num_children > 1) { |
428870ff | 2235 | boolean_t usenext = B_FALSE; |
d683ddbb | 2236 | if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { |
428870ff BB |
2237 | uint64_t count; |
2238 | /* | |
2239 | * A bug in a previous version of the code could | |
2240 | * cause upgrade_clones_cb() to not set | |
2241 | * ds_next_snap_obj when it should, leading to a | |
2242 | * missing entry. Therefore we can only use the | |
2243 | * next_clones_obj when its count is correct. | |
2244 | */ | |
2245 | int err = zap_count(dp->dp_meta_objset, | |
d683ddbb | 2246 | dsl_dataset_phys(ds)->ds_next_clones_obj, &count); |
428870ff | 2247 | if (err == 0 && |
d683ddbb | 2248 | count == dsl_dataset_phys(ds)->ds_num_children - 1) |
428870ff BB |
2249 | usenext = B_TRUE; |
2250 | } | |
2251 | ||
2252 | if (usenext) { | |
d4a72f23 TC |
2253 | zap_cursor_t zc; |
2254 | zap_attribute_t za; | |
2255 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
2256 | dsl_dataset_phys(ds)->ds_next_clones_obj); | |
2257 | zap_cursor_retrieve(&zc, &za) == 0; | |
2258 | (void) zap_cursor_advance(&zc)) { | |
2259 | scan_ds_queue_insert(scn, | |
2260 | zfs_strtonum(za.za_name, NULL), | |
2261 | dsl_dataset_phys(ds)->ds_creation_txg); | |
2262 | } | |
2263 | zap_cursor_fini(&zc); | |
428870ff | 2264 | } else { |
13fe0198 | 2265 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 TC |
2266 | enqueue_clones_cb, &ds->ds_object, |
2267 | DS_FIND_CHILDREN)); | |
428870ff BB |
2268 | } |
2269 | } | |
2270 | ||
2271 | out: | |
2272 | dsl_dataset_rele(ds, FTAG); | |
2273 | } | |
2274 | ||
2275 | /* ARGSUSED */ | |
2276 | static int | |
13fe0198 | 2277 | enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2278 | { |
428870ff BB |
2279 | dsl_dataset_t *ds; |
2280 | int err; | |
428870ff BB |
2281 | dsl_scan_t *scn = dp->dp_scan; |
2282 | ||
13fe0198 | 2283 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
428870ff BB |
2284 | if (err) |
2285 | return (err); | |
2286 | ||
d683ddbb | 2287 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
428870ff | 2288 | dsl_dataset_t *prev; |
d683ddbb JG |
2289 | err = dsl_dataset_hold_obj(dp, |
2290 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
428870ff BB |
2291 | if (err) { |
2292 | dsl_dataset_rele(ds, FTAG); | |
2293 | return (err); | |
2294 | } | |
2295 | ||
2296 | /* | |
2297 | * If this is a clone, we don't need to worry about it for now. | |
2298 | */ | |
d683ddbb | 2299 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { |
428870ff BB |
2300 | dsl_dataset_rele(ds, FTAG); |
2301 | dsl_dataset_rele(prev, FTAG); | |
2302 | return (0); | |
2303 | } | |
2304 | dsl_dataset_rele(ds, FTAG); | |
2305 | ds = prev; | |
2306 | } | |
2307 | ||
d4a72f23 TC |
2308 | scan_ds_queue_insert(scn, ds->ds_object, |
2309 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2310 | dsl_dataset_rele(ds, FTAG); |
2311 | return (0); | |
2312 | } | |
2313 | ||
d4a72f23 TC |
2314 | /* ARGSUSED */ |
2315 | void | |
2316 | dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, | |
2317 | ddt_entry_t *dde, dmu_tx_t *tx) | |
2318 | { | |
2319 | const ddt_key_t *ddk = &dde->dde_key; | |
2320 | ddt_phys_t *ddp = dde->dde_phys; | |
2321 | blkptr_t bp; | |
2322 | zbookmark_phys_t zb = { 0 }; | |
2323 | int p; | |
2324 | ||
f90a30ad | 2325 | if (!dsl_scan_is_running(scn)) |
d4a72f23 TC |
2326 | return; |
2327 | ||
2328 | for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { | |
2329 | if (ddp->ddp_phys_birth == 0 || | |
2330 | ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) | |
2331 | continue; | |
2332 | ddt_bp_create(checksum, ddk, ddp, &bp); | |
2333 | ||
2334 | scn->scn_visited_this_txg++; | |
2335 | scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); | |
2336 | } | |
2337 | } | |
2338 | ||
428870ff BB |
2339 | /* |
2340 | * Scrub/dedup interaction. | |
2341 | * | |
2342 | * If there are N references to a deduped block, we don't want to scrub it | |
2343 | * N times -- ideally, we should scrub it exactly once. | |
2344 | * | |
2345 | * We leverage the fact that the dde's replication class (enum ddt_class) | |
2346 | * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest | |
2347 | * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. | |
2348 | * | |
2349 | * To prevent excess scrubbing, the scrub begins by walking the DDT | |
2350 | * to find all blocks with refcnt > 1, and scrubs each of these once. | |
2351 | * Since there are two replication classes which contain blocks with | |
2352 | * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. | |
2353 | * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. | |
2354 | * | |
2355 | * There would be nothing more to say if a block's refcnt couldn't change | |
2356 | * during a scrub, but of course it can so we must account for changes | |
2357 | * in a block's replication class. | |
2358 | * | |
2359 | * Here's an example of what can occur: | |
2360 | * | |
2361 | * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 | |
2362 | * when visited during the top-down scrub phase, it will be scrubbed twice. | |
2363 | * This negates our scrub optimization, but is otherwise harmless. | |
2364 | * | |
2365 | * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 | |
2366 | * on each visit during the top-down scrub phase, it will never be scrubbed. | |
2367 | * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's | |
2368 | * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to | |
2369 | * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 | |
2370 | * while a scrub is in progress, it scrubs the block right then. | |
2371 | */ | |
2372 | static void | |
2373 | dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) | |
2374 | { | |
2375 | ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; | |
2598c001 | 2376 | ddt_entry_t dde; |
428870ff BB |
2377 | int error; |
2378 | uint64_t n = 0; | |
2379 | ||
2598c001 BB |
2380 | bzero(&dde, sizeof (ddt_entry_t)); |
2381 | ||
428870ff BB |
2382 | while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { |
2383 | ddt_t *ddt; | |
2384 | ||
2385 | if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) | |
2386 | break; | |
2387 | dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", | |
2388 | (longlong_t)ddb->ddb_class, | |
2389 | (longlong_t)ddb->ddb_type, | |
2390 | (longlong_t)ddb->ddb_checksum, | |
2391 | (longlong_t)ddb->ddb_cursor); | |
2392 | ||
2393 | /* There should be no pending changes to the dedup table */ | |
2394 | ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; | |
2395 | ASSERT(avl_first(&ddt->ddt_tree) == NULL); | |
2396 | ||
2397 | dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); | |
2398 | n++; | |
2399 | ||
0ea05c64 | 2400 | if (dsl_scan_check_suspend(scn, NULL)) |
428870ff BB |
2401 | break; |
2402 | } | |
2403 | ||
0ea05c64 AP |
2404 | zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; " |
2405 | "suspending=%u", (longlong_t)n, | |
2406 | (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); | |
428870ff BB |
2407 | |
2408 | ASSERT(error == 0 || error == ENOENT); | |
2409 | ASSERT(error != ENOENT || | |
2410 | ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); | |
2411 | } | |
2412 | ||
d4a72f23 TC |
2413 | static uint64_t |
2414 | dsl_scan_ds_maxtxg(dsl_dataset_t *ds) | |
428870ff | 2415 | { |
d4a72f23 TC |
2416 | uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; |
2417 | if (ds->ds_is_snapshot) | |
2418 | return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); | |
2419 | return (smt); | |
428870ff BB |
2420 | } |
2421 | ||
2422 | static void | |
2423 | dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) | |
2424 | { | |
d4a72f23 | 2425 | scan_ds_t *sds; |
428870ff | 2426 | dsl_pool_t *dp = scn->scn_dp; |
428870ff BB |
2427 | |
2428 | if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= | |
2429 | scn->scn_phys.scn_ddt_class_max) { | |
2430 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
2431 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
2432 | dsl_scan_ddt(scn, tx); | |
0ea05c64 | 2433 | if (scn->scn_suspending) |
428870ff BB |
2434 | return; |
2435 | } | |
2436 | ||
2437 | if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { | |
2438 | /* First do the MOS & ORIGIN */ | |
2439 | ||
2440 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
2441 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
2442 | dsl_scan_visit_rootbp(scn, NULL, | |
2443 | &dp->dp_meta_rootbp, tx); | |
2444 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
0ea05c64 | 2445 | if (scn->scn_suspending) |
428870ff BB |
2446 | return; |
2447 | ||
2448 | if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { | |
13fe0198 | 2449 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 | 2450 | enqueue_cb, NULL, DS_FIND_CHILDREN)); |
428870ff BB |
2451 | } else { |
2452 | dsl_scan_visitds(scn, | |
2453 | dp->dp_origin_snap->ds_object, tx); | |
2454 | } | |
0ea05c64 | 2455 | ASSERT(!scn->scn_suspending); |
428870ff BB |
2456 | } else if (scn->scn_phys.scn_bookmark.zb_objset != |
2457 | ZB_DESTROYED_OBJSET) { | |
d4a72f23 | 2458 | uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; |
428870ff | 2459 | /* |
d4a72f23 | 2460 | * If we were suspended, continue from here. Note if the |
0ea05c64 | 2461 | * ds we were suspended on was deleted, the zb_objset may |
428870ff BB |
2462 | * be -1, so we will skip this and find a new objset |
2463 | * below. | |
2464 | */ | |
d4a72f23 | 2465 | dsl_scan_visitds(scn, dsobj, tx); |
0ea05c64 | 2466 | if (scn->scn_suspending) |
428870ff BB |
2467 | return; |
2468 | } | |
2469 | ||
2470 | /* | |
d4a72f23 | 2471 | * In case we suspended right at the end of the ds, zero the |
428870ff BB |
2472 | * bookmark so we don't think that we're still trying to resume. |
2473 | */ | |
5dbd68a3 | 2474 | bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); |
428870ff | 2475 | |
d4a72f23 TC |
2476 | /* |
2477 | * Keep pulling things out of the dataset avl queue. Updates to the | |
2478 | * persistent zap-object-as-queue happen only at checkpoints. | |
2479 | */ | |
2480 | while ((sds = avl_first(&scn->scn_queue)) != NULL) { | |
428870ff | 2481 | dsl_dataset_t *ds; |
d4a72f23 TC |
2482 | uint64_t dsobj = sds->sds_dsobj; |
2483 | uint64_t txg = sds->sds_txg; | |
428870ff | 2484 | |
d4a72f23 TC |
2485 | /* dequeue and free the ds from the queue */ |
2486 | scan_ds_queue_remove(scn, dsobj); | |
2487 | sds = NULL; | |
428870ff | 2488 | |
d4a72f23 | 2489 | /* set up min / max txg */ |
428870ff | 2490 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
d4a72f23 | 2491 | if (txg != 0) { |
428870ff | 2492 | scn->scn_phys.scn_cur_min_txg = |
d4a72f23 | 2493 | MAX(scn->scn_phys.scn_min_txg, txg); |
428870ff BB |
2494 | } else { |
2495 | scn->scn_phys.scn_cur_min_txg = | |
2496 | MAX(scn->scn_phys.scn_min_txg, | |
d683ddbb | 2497 | dsl_dataset_phys(ds)->ds_prev_snap_txg); |
428870ff BB |
2498 | } |
2499 | scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); | |
2500 | dsl_dataset_rele(ds, FTAG); | |
2501 | ||
2502 | dsl_scan_visitds(scn, dsobj, tx); | |
0ea05c64 | 2503 | if (scn->scn_suspending) |
d4a72f23 | 2504 | return; |
428870ff | 2505 | } |
d4a72f23 TC |
2506 | |
2507 | /* No more objsets to fetch, we're done */ | |
2508 | scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; | |
2509 | ASSERT0(scn->scn_suspending); | |
2510 | } | |
2511 | ||
2512 | static uint64_t | |
2513 | dsl_scan_count_leaves(vdev_t *vd) | |
2514 | { | |
2515 | uint64_t i, leaves = 0; | |
2516 | ||
2517 | /* we only count leaves that belong to the main pool and are readable */ | |
2518 | if (vd->vdev_islog || vd->vdev_isspare || | |
2519 | vd->vdev_isl2cache || !vdev_readable(vd)) | |
2520 | return (0); | |
2521 | ||
2522 | if (vd->vdev_ops->vdev_op_leaf) | |
2523 | return (1); | |
2524 | ||
2525 | for (i = 0; i < vd->vdev_children; i++) { | |
2526 | leaves += dsl_scan_count_leaves(vd->vdev_child[i]); | |
2527 | } | |
2528 | ||
2529 | return (leaves); | |
2530 | } | |
2531 | ||
2532 | static void | |
2533 | scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) | |
2534 | { | |
2535 | int i; | |
2536 | uint64_t cur_size = 0; | |
2537 | ||
2538 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
2539 | cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); | |
2540 | } | |
2541 | ||
2542 | q->q_total_zio_size_this_txg += cur_size; | |
2543 | q->q_zios_this_txg++; | |
2544 | } | |
2545 | ||
2546 | static void | |
2547 | scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, | |
2548 | uint64_t end) | |
2549 | { | |
2550 | q->q_total_seg_size_this_txg += end - start; | |
2551 | q->q_segs_this_txg++; | |
2552 | } | |
2553 | ||
2554 | static boolean_t | |
2555 | scan_io_queue_check_suspend(dsl_scan_t *scn) | |
2556 | { | |
2557 | /* See comment in dsl_scan_check_suspend() */ | |
2558 | uint64_t curr_time_ns = gethrtime(); | |
2559 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
2560 | uint64_t sync_time_ns = curr_time_ns - | |
2561 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
2562 | int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; | |
2563 | int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? | |
2564 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; | |
2565 | ||
2566 | return ((NSEC2MSEC(scan_time_ns) > mintime && | |
2567 | (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || | |
2568 | txg_sync_waiting(scn->scn_dp) || | |
2569 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
2570 | spa_shutting_down(scn->scn_dp->dp_spa)); | |
2571 | } | |
2572 | ||
2573 | /* | |
13a2ff27 | 2574 | * Given a list of scan_io_t's in io_list, this issues the I/Os out to |
d4a72f23 TC |
2575 | * disk. This consumes the io_list and frees the scan_io_t's. This is |
2576 | * called when emptying queues, either when we're up against the memory | |
2577 | * limit or when we have finished scanning. Returns B_TRUE if we stopped | |
13a2ff27 | 2578 | * processing the list before we finished. Any sios that were not issued |
d4a72f23 TC |
2579 | * will remain in the io_list. |
2580 | */ | |
2581 | static boolean_t | |
2582 | scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) | |
2583 | { | |
2584 | dsl_scan_t *scn = queue->q_scn; | |
2585 | scan_io_t *sio; | |
2586 | int64_t bytes_issued = 0; | |
2587 | boolean_t suspended = B_FALSE; | |
2588 | ||
2589 | while ((sio = list_head(io_list)) != NULL) { | |
2590 | blkptr_t bp; | |
2591 | ||
2592 | if (scan_io_queue_check_suspend(scn)) { | |
2593 | suspended = B_TRUE; | |
2594 | break; | |
2595 | } | |
2596 | ||
2597 | sio2bp(sio, &bp, queue->q_vd->vdev_id); | |
2598 | bytes_issued += sio->sio_asize; | |
2599 | scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, | |
2600 | &sio->sio_zb, queue); | |
2601 | (void) list_remove_head(io_list); | |
2602 | scan_io_queues_update_zio_stats(queue, &bp); | |
2603 | kmem_cache_free(sio_cache, sio); | |
2604 | } | |
2605 | ||
2606 | atomic_add_64(&scn->scn_bytes_pending, -bytes_issued); | |
2607 | ||
2608 | return (suspended); | |
2609 | } | |
2610 | ||
2611 | /* | |
2612 | * This function removes sios from an IO queue which reside within a given | |
2613 | * range_seg_t and inserts them (in offset order) into a list. Note that | |
2614 | * we only ever return a maximum of 32 sios at once. If there are more sios | |
2615 | * to process within this segment that did not make it onto the list we | |
2616 | * return B_TRUE and otherwise B_FALSE. | |
2617 | */ | |
2618 | static boolean_t | |
2619 | scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) | |
2620 | { | |
2621 | scan_io_t srch_sio, *sio, *next_sio; | |
2622 | avl_index_t idx; | |
2623 | uint_t num_sios = 0; | |
2624 | int64_t bytes_issued = 0; | |
2625 | ||
2626 | ASSERT(rs != NULL); | |
2627 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
2628 | ||
2629 | srch_sio.sio_offset = rs->rs_start; | |
2630 | ||
2631 | /* | |
2632 | * The exact start of the extent might not contain any matching zios, | |
2633 | * so if that's the case, examine the next one in the tree. | |
2634 | */ | |
2635 | sio = avl_find(&queue->q_sios_by_addr, &srch_sio, &idx); | |
2636 | if (sio == NULL) | |
2637 | sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); | |
2638 | ||
2639 | while (sio != NULL && sio->sio_offset < rs->rs_end && num_sios <= 32) { | |
2640 | ASSERT3U(sio->sio_offset, >=, rs->rs_start); | |
2641 | ASSERT3U(sio->sio_offset + sio->sio_asize, <=, rs->rs_end); | |
2642 | ||
2643 | next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); | |
2644 | avl_remove(&queue->q_sios_by_addr, sio); | |
2645 | ||
2646 | bytes_issued += sio->sio_asize; | |
2647 | num_sios++; | |
2648 | list_insert_tail(list, sio); | |
2649 | sio = next_sio; | |
2650 | } | |
2651 | ||
2652 | /* | |
2653 | * We limit the number of sios we process at once to 32 to avoid | |
2654 | * biting off more than we can chew. If we didn't take everything | |
2655 | * in the segment we update it to reflect the work we were able to | |
2656 | * complete. Otherwise, we remove it from the range tree entirely. | |
2657 | */ | |
2658 | if (sio != NULL && sio->sio_offset < rs->rs_end) { | |
2659 | range_tree_adjust_fill(queue->q_exts_by_addr, rs, | |
2660 | -bytes_issued); | |
2661 | range_tree_resize_segment(queue->q_exts_by_addr, rs, | |
2662 | sio->sio_offset, rs->rs_end - sio->sio_offset); | |
2663 | ||
2664 | return (B_TRUE); | |
2665 | } else { | |
2666 | range_tree_remove(queue->q_exts_by_addr, rs->rs_start, | |
2667 | rs->rs_end - rs->rs_start); | |
2668 | return (B_FALSE); | |
2669 | } | |
2670 | } | |
2671 | ||
2672 | /* | |
2673 | * This is called from the queue emptying thread and selects the next | |
13a2ff27 | 2674 | * extent from which we are to issue I/Os. The behavior of this function |
d4a72f23 TC |
2675 | * depends on the state of the scan, the current memory consumption and |
2676 | * whether or not we are performing a scan shutdown. | |
2677 | * 1) We select extents in an elevator algorithm (LBA-order) if the scan | |
2678 | * needs to perform a checkpoint | |
2679 | * 2) We select the largest available extent if we are up against the | |
2680 | * memory limit. | |
2681 | * 3) Otherwise we don't select any extents. | |
2682 | */ | |
2683 | static range_seg_t * | |
2684 | scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) | |
2685 | { | |
2686 | dsl_scan_t *scn = queue->q_scn; | |
2687 | ||
2688 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
2689 | ASSERT(scn->scn_is_sorted); | |
2690 | ||
2691 | /* handle tunable overrides */ | |
2692 | if (scn->scn_checkpointing || scn->scn_clearing) { | |
2693 | if (zfs_scan_issue_strategy == 1) { | |
2694 | return (range_tree_first(queue->q_exts_by_addr)); | |
2695 | } else if (zfs_scan_issue_strategy == 2) { | |
2696 | return (avl_first(&queue->q_exts_by_size)); | |
2697 | } | |
2698 | } | |
2699 | ||
2700 | /* | |
2701 | * During normal clearing, we want to issue our largest segments | |
2702 | * first, keeping IO as sequential as possible, and leaving the | |
2703 | * smaller extents for later with the hope that they might eventually | |
2704 | * grow to larger sequential segments. However, when the scan is | |
2705 | * checkpointing, no new extents will be added to the sorting queue, | |
2706 | * so the way we are sorted now is as good as it will ever get. | |
2707 | * In this case, we instead switch to issuing extents in LBA order. | |
2708 | */ | |
2709 | if (scn->scn_checkpointing) { | |
2710 | return (range_tree_first(queue->q_exts_by_addr)); | |
2711 | } else if (scn->scn_clearing) { | |
2712 | return (avl_first(&queue->q_exts_by_size)); | |
2713 | } else { | |
2714 | return (NULL); | |
2715 | } | |
2716 | } | |
2717 | ||
2718 | static void | |
2719 | scan_io_queues_run_one(void *arg) | |
2720 | { | |
2721 | dsl_scan_io_queue_t *queue = arg; | |
2722 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
2723 | boolean_t suspended = B_FALSE; | |
2724 | range_seg_t *rs = NULL; | |
2725 | scan_io_t *sio = NULL; | |
2726 | list_t sio_list; | |
2727 | uint64_t bytes_per_leaf = zfs_scan_vdev_limit; | |
2728 | uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd); | |
2729 | ||
2730 | ASSERT(queue->q_scn->scn_is_sorted); | |
2731 | ||
2732 | list_create(&sio_list, sizeof (scan_io_t), | |
2733 | offsetof(scan_io_t, sio_nodes.sio_list_node)); | |
2734 | mutex_enter(q_lock); | |
2735 | ||
2736 | /* calculate maximum in-flight bytes for this txg (min 1MB) */ | |
2737 | queue->q_maxinflight_bytes = | |
2738 | MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); | |
2739 | ||
2740 | /* reset per-queue scan statistics for this txg */ | |
2741 | queue->q_total_seg_size_this_txg = 0; | |
2742 | queue->q_segs_this_txg = 0; | |
2743 | queue->q_total_zio_size_this_txg = 0; | |
2744 | queue->q_zios_this_txg = 0; | |
2745 | ||
2746 | /* loop until we run out of time or sios */ | |
2747 | while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) { | |
2748 | uint64_t seg_start = 0, seg_end = 0; | |
2749 | boolean_t more_left = B_TRUE; | |
2750 | ||
2751 | ASSERT(list_is_empty(&sio_list)); | |
2752 | ||
2753 | /* loop while we still have sios left to process in this rs */ | |
2754 | while (more_left) { | |
2755 | scan_io_t *first_sio, *last_sio; | |
2756 | ||
2757 | /* | |
2758 | * We have selected which extent needs to be | |
2759 | * processed next. Gather up the corresponding sios. | |
2760 | */ | |
2761 | more_left = scan_io_queue_gather(queue, rs, &sio_list); | |
2762 | ASSERT(!list_is_empty(&sio_list)); | |
2763 | first_sio = list_head(&sio_list); | |
2764 | last_sio = list_tail(&sio_list); | |
2765 | ||
2766 | seg_end = last_sio->sio_offset + last_sio->sio_asize; | |
2767 | if (seg_start == 0) | |
2768 | seg_start = first_sio->sio_offset; | |
2769 | ||
2770 | /* | |
2771 | * Issuing sios can take a long time so drop the | |
2772 | * queue lock. The sio queue won't be updated by | |
2773 | * other threads since we're in syncing context so | |
2774 | * we can be sure that our trees will remain exactly | |
2775 | * as we left them. | |
2776 | */ | |
2777 | mutex_exit(q_lock); | |
2778 | suspended = scan_io_queue_issue(queue, &sio_list); | |
2779 | mutex_enter(q_lock); | |
2780 | ||
2781 | if (suspended) | |
2782 | break; | |
2783 | } | |
2784 | ||
2785 | /* update statistics for debugging purposes */ | |
2786 | scan_io_queues_update_seg_stats(queue, seg_start, seg_end); | |
2787 | ||
2788 | if (suspended) | |
2789 | break; | |
2790 | } | |
2791 | ||
2792 | /* | |
2793 | * If we were suspended in the middle of processing, | |
2794 | * requeue any unfinished sios and exit. | |
2795 | */ | |
2796 | while ((sio = list_head(&sio_list)) != NULL) { | |
2797 | list_remove(&sio_list, sio); | |
2798 | scan_io_queue_insert_impl(queue, sio); | |
2799 | } | |
2800 | ||
2801 | mutex_exit(q_lock); | |
2802 | list_destroy(&sio_list); | |
2803 | } | |
2804 | ||
2805 | /* | |
2806 | * Performs an emptying run on all scan queues in the pool. This just | |
2807 | * punches out one thread per top-level vdev, each of which processes | |
2808 | * only that vdev's scan queue. We can parallelize the I/O here because | |
13a2ff27 | 2809 | * we know that each queue's I/Os only affect its own top-level vdev. |
d4a72f23 TC |
2810 | * |
2811 | * This function waits for the queue runs to complete, and must be | |
2812 | * called from dsl_scan_sync (or in general, syncing context). | |
2813 | */ | |
2814 | static void | |
2815 | scan_io_queues_run(dsl_scan_t *scn) | |
2816 | { | |
2817 | spa_t *spa = scn->scn_dp->dp_spa; | |
2818 | ||
2819 | ASSERT(scn->scn_is_sorted); | |
2820 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); | |
2821 | ||
2822 | if (scn->scn_bytes_pending == 0) | |
2823 | return; | |
2824 | ||
2825 | if (scn->scn_taskq == NULL) { | |
2826 | int nthreads = spa->spa_root_vdev->vdev_children; | |
2827 | ||
2828 | /* | |
2829 | * We need to make this taskq *always* execute as many | |
2830 | * threads in parallel as we have top-level vdevs and no | |
2831 | * less, otherwise strange serialization of the calls to | |
2832 | * scan_io_queues_run_one can occur during spa_sync runs | |
2833 | * and that significantly impacts performance. | |
2834 | */ | |
2835 | scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads, | |
2836 | minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE); | |
2837 | } | |
2838 | ||
2839 | for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
2840 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
2841 | ||
2842 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
2843 | if (vd->vdev_scan_io_queue != NULL) { | |
2844 | VERIFY(taskq_dispatch(scn->scn_taskq, | |
2845 | scan_io_queues_run_one, vd->vdev_scan_io_queue, | |
2846 | TQ_SLEEP) != TASKQID_INVALID); | |
2847 | } | |
2848 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
2849 | } | |
2850 | ||
2851 | /* | |
13a2ff27 | 2852 | * Wait for the queues to finish issuing their IOs for this run |
d4a72f23 TC |
2853 | * before we return. There may still be IOs in flight at this |
2854 | * point. | |
2855 | */ | |
2856 | taskq_wait(scn->scn_taskq); | |
428870ff BB |
2857 | } |
2858 | ||
9ae529ec | 2859 | static boolean_t |
a1d477c2 | 2860 | dsl_scan_async_block_should_pause(dsl_scan_t *scn) |
428870ff | 2861 | { |
428870ff BB |
2862 | uint64_t elapsed_nanosecs; |
2863 | ||
78e2739d MA |
2864 | if (zfs_recover) |
2865 | return (B_FALSE); | |
2866 | ||
a1d477c2 | 2867 | if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks) |
36283ca2 MG |
2868 | return (B_TRUE); |
2869 | ||
428870ff | 2870 | elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; |
9ae529ec | 2871 | return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || |
a1d477c2 | 2872 | (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && |
428870ff | 2873 | txg_sync_waiting(scn->scn_dp)) || |
9ae529ec CS |
2874 | spa_shutting_down(scn->scn_dp->dp_spa)); |
2875 | } | |
2876 | ||
2877 | static int | |
2878 | dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) | |
2879 | { | |
2880 | dsl_scan_t *scn = arg; | |
2881 | ||
2882 | if (!scn->scn_is_bptree || | |
2883 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { | |
a1d477c2 | 2884 | if (dsl_scan_async_block_should_pause(scn)) |
2e528b49 | 2885 | return (SET_ERROR(ERESTART)); |
9ae529ec | 2886 | } |
428870ff BB |
2887 | |
2888 | zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, | |
2889 | dmu_tx_get_txg(tx), bp, 0)); | |
2890 | dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, | |
2891 | -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), | |
2892 | -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); | |
2893 | scn->scn_visited_this_txg++; | |
2894 | return (0); | |
2895 | } | |
2896 | ||
d4a72f23 TC |
2897 | static void |
2898 | dsl_scan_update_stats(dsl_scan_t *scn) | |
2899 | { | |
2900 | spa_t *spa = scn->scn_dp->dp_spa; | |
2901 | uint64_t i; | |
2902 | uint64_t seg_size_total = 0, zio_size_total = 0; | |
2903 | uint64_t seg_count_total = 0, zio_count_total = 0; | |
2904 | ||
2905 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
2906 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
2907 | dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; | |
2908 | ||
2909 | if (queue == NULL) | |
2910 | continue; | |
2911 | ||
2912 | seg_size_total += queue->q_total_seg_size_this_txg; | |
2913 | zio_size_total += queue->q_total_zio_size_this_txg; | |
2914 | seg_count_total += queue->q_segs_this_txg; | |
2915 | zio_count_total += queue->q_zios_this_txg; | |
2916 | } | |
2917 | ||
2918 | if (seg_count_total == 0 || zio_count_total == 0) { | |
2919 | scn->scn_avg_seg_size_this_txg = 0; | |
2920 | scn->scn_avg_zio_size_this_txg = 0; | |
2921 | scn->scn_segs_this_txg = 0; | |
2922 | scn->scn_zios_this_txg = 0; | |
2923 | return; | |
2924 | } | |
2925 | ||
2926 | scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; | |
2927 | scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; | |
2928 | scn->scn_segs_this_txg = seg_count_total; | |
2929 | scn->scn_zios_this_txg = zio_count_total; | |
2930 | } | |
2931 | ||
a1d477c2 MA |
2932 | static int |
2933 | dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) | |
2934 | { | |
2935 | dsl_scan_t *scn = arg; | |
2936 | const dva_t *dva = &bp->blk_dva[0]; | |
2937 | ||
2938 | if (dsl_scan_async_block_should_pause(scn)) | |
2939 | return (SET_ERROR(ERESTART)); | |
2940 | ||
2941 | spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, | |
2942 | DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), | |
2943 | DVA_GET_ASIZE(dva), tx); | |
2944 | scn->scn_visited_this_txg++; | |
2945 | return (0); | |
2946 | } | |
2947 | ||
428870ff BB |
2948 | boolean_t |
2949 | dsl_scan_active(dsl_scan_t *scn) | |
2950 | { | |
2951 | spa_t *spa = scn->scn_dp->dp_spa; | |
2952 | uint64_t used = 0, comp, uncomp; | |
2953 | ||
2954 | if (spa->spa_load_state != SPA_LOAD_NONE) | |
2955 | return (B_FALSE); | |
2956 | if (spa_shutting_down(spa)) | |
2957 | return (B_FALSE); | |
d4a72f23 | 2958 | if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || |
fbeddd60 | 2959 | (scn->scn_async_destroying && !scn->scn_async_stalled)) |
428870ff BB |
2960 | return (B_TRUE); |
2961 | ||
2962 | if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { | |
2963 | (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, | |
2964 | &used, &comp, &uncomp); | |
2965 | } | |
2966 | return (used != 0); | |
2967 | } | |
2968 | ||
d4a72f23 TC |
2969 | static boolean_t |
2970 | dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, | |
2971 | uint64_t phys_birth) | |
2972 | { | |
2973 | vdev_t *vd; | |
2974 | ||
9e052db4 MA |
2975 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); |
2976 | ||
2977 | if (vd->vdev_ops == &vdev_indirect_ops) { | |
2978 | /* | |
2979 | * The indirect vdev can point to multiple | |
2980 | * vdevs. For simplicity, always create | |
2981 | * the resilver zio_t. zio_vdev_io_start() | |
2982 | * will bypass the child resilver i/o's if | |
2983 | * they are on vdevs that don't have DTL's. | |
2984 | */ | |
2985 | return (B_TRUE); | |
2986 | } | |
2987 | ||
d4a72f23 TC |
2988 | if (DVA_GET_GANG(dva)) { |
2989 | /* | |
2990 | * Gang members may be spread across multiple | |
2991 | * vdevs, so the best estimate we have is the | |
2992 | * scrub range, which has already been checked. | |
2993 | * XXX -- it would be better to change our | |
2994 | * allocation policy to ensure that all | |
2995 | * gang members reside on the same vdev. | |
2996 | */ | |
2997 | return (B_TRUE); | |
2998 | } | |
2999 | ||
d4a72f23 TC |
3000 | /* |
3001 | * Check if the txg falls within the range which must be | |
3002 | * resilvered. DVAs outside this range can always be skipped. | |
3003 | */ | |
3004 | if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)) | |
3005 | return (B_FALSE); | |
3006 | ||
3007 | /* | |
3008 | * Check if the top-level vdev must resilver this offset. | |
3009 | * When the offset does not intersect with a dirty leaf DTL | |
3010 | * then it may be possible to skip the resilver IO. The psize | |
3011 | * is provided instead of asize to simplify the check for RAIDZ. | |
3012 | */ | |
3013 | if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize)) | |
3014 | return (B_FALSE); | |
3015 | ||
3016 | return (B_TRUE); | |
3017 | } | |
3018 | ||
d2734cce SD |
3019 | static int |
3020 | dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) | |
428870ff BB |
3021 | { |
3022 | dsl_scan_t *scn = dp->dp_scan; | |
3023 | spa_t *spa = dp->dp_spa; | |
d2734cce | 3024 | int err = 0; |
428870ff | 3025 | |
d2734cce SD |
3026 | if (spa_suspend_async_destroy(spa)) |
3027 | return (0); | |
428870ff | 3028 | |
ba5ad9a4 | 3029 | if (zfs_free_bpobj_enabled && |
d4a72f23 | 3030 | spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
9ae529ec | 3031 | scn->scn_is_bptree = B_FALSE; |
a1d477c2 | 3032 | scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; |
d4a72f23 | 3033 | scn->scn_zio_root = zio_root(spa, NULL, |
428870ff BB |
3034 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3035 | err = bpobj_iterate(&dp->dp_free_bpobj, | |
9ae529ec | 3036 | dsl_scan_free_block_cb, scn, tx); |
d4a72f23 TC |
3037 | VERIFY0(zio_wait(scn->scn_zio_root)); |
3038 | scn->scn_zio_root = NULL; | |
9ae529ec | 3039 | |
fbeddd60 MA |
3040 | if (err != 0 && err != ERESTART) |
3041 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3042 | } | |
13fe0198 | 3043 | |
fbeddd60 MA |
3044 | if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { |
3045 | ASSERT(scn->scn_async_destroying); | |
3046 | scn->scn_is_bptree = B_TRUE; | |
d4a72f23 | 3047 | scn->scn_zio_root = zio_root(spa, NULL, |
fbeddd60 MA |
3048 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3049 | err = bptree_iterate(dp->dp_meta_objset, | |
3050 | dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); | |
3051 | VERIFY0(zio_wait(scn->scn_zio_root)); | |
d4a72f23 | 3052 | scn->scn_zio_root = NULL; |
fbeddd60 MA |
3053 | |
3054 | if (err == EIO || err == ECKSUM) { | |
3055 | err = 0; | |
3056 | } else if (err != 0 && err != ERESTART) { | |
3057 | zfs_panic_recover("error %u from " | |
3058 | "traverse_dataset_destroyed()", err); | |
9ae529ec | 3059 | } |
fbeddd60 | 3060 | |
fbeddd60 MA |
3061 | if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { |
3062 | /* finished; deactivate async destroy feature */ | |
3063 | spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); | |
3064 | ASSERT(!spa_feature_is_active(spa, | |
3065 | SPA_FEATURE_ASYNC_DESTROY)); | |
3066 | VERIFY0(zap_remove(dp->dp_meta_objset, | |
3067 | DMU_POOL_DIRECTORY_OBJECT, | |
3068 | DMU_POOL_BPTREE_OBJ, tx)); | |
3069 | VERIFY0(bptree_free(dp->dp_meta_objset, | |
3070 | dp->dp_bptree_obj, tx)); | |
3071 | dp->dp_bptree_obj = 0; | |
3072 | scn->scn_async_destroying = B_FALSE; | |
905edb40 | 3073 | scn->scn_async_stalled = B_FALSE; |
89b1cd65 | 3074 | } else { |
3075 | /* | |
905edb40 MA |
3076 | * If we didn't make progress, mark the async |
3077 | * destroy as stalled, so that we will not initiate | |
3078 | * a spa_sync() on its behalf. Note that we only | |
3079 | * check this if we are not finished, because if the | |
3080 | * bptree had no blocks for us to visit, we can | |
3081 | * finish without "making progress". | |
89b1cd65 | 3082 | */ |
3083 | scn->scn_async_stalled = | |
3084 | (scn->scn_visited_this_txg == 0); | |
428870ff | 3085 | } |
fbeddd60 MA |
3086 | } |
3087 | if (scn->scn_visited_this_txg) { | |
3088 | zfs_dbgmsg("freed %llu blocks in %llums from " | |
3089 | "free_bpobj/bptree txg %llu; err=%u", | |
3090 | (longlong_t)scn->scn_visited_this_txg, | |
3091 | (longlong_t) | |
3092 | NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), | |
3093 | (longlong_t)tx->tx_txg, err); | |
3094 | scn->scn_visited_this_txg = 0; | |
3095 | ||
3096 | /* | |
3097 | * Write out changes to the DDT that may be required as a | |
3098 | * result of the blocks freed. This ensures that the DDT | |
3099 | * is clean when a scrub/resilver runs. | |
3100 | */ | |
3101 | ddt_sync(spa, tx->tx_txg); | |
3102 | } | |
3103 | if (err != 0) | |
d2734cce | 3104 | return (err); |
7c9abfa7 GM |
3105 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && |
3106 | zfs_free_leak_on_eio && | |
d683ddbb JG |
3107 | (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || |
3108 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || | |
3109 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { | |
fbeddd60 MA |
3110 | /* |
3111 | * We have finished background destroying, but there is still | |
3112 | * some space left in the dp_free_dir. Transfer this leaked | |
3113 | * space to the dp_leak_dir. | |
3114 | */ | |
3115 | if (dp->dp_leak_dir == NULL) { | |
3116 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); | |
3117 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
3118 | LEAK_DIR_NAME, tx); | |
3119 | VERIFY0(dsl_pool_open_special_dir(dp, | |
3120 | LEAK_DIR_NAME, &dp->dp_leak_dir)); | |
3121 | rrw_exit(&dp->dp_config_rwlock, FTAG); | |
3122 | } | |
3123 | dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, | |
d683ddbb JG |
3124 | dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3125 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3126 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3127 | dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, |
d683ddbb JG |
3128 | -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3129 | -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3130 | -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3131 | } |
a1d477c2 | 3132 | |
7c9abfa7 | 3133 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) { |
9b67f605 | 3134 | /* finished; verify that space accounting went to zero */ |
d683ddbb JG |
3135 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); |
3136 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); | |
3137 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); | |
428870ff BB |
3138 | } |
3139 | ||
a1d477c2 MA |
3140 | EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), |
3141 | 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
3142 | DMU_POOL_OBSOLETE_BPOBJ)); | |
3143 | if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { | |
3144 | ASSERT(spa_feature_is_active(dp->dp_spa, | |
3145 | SPA_FEATURE_OBSOLETE_COUNTS)); | |
3146 | ||
3147 | scn->scn_is_bptree = B_FALSE; | |
3148 | scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; | |
3149 | err = bpobj_iterate(&dp->dp_obsolete_bpobj, | |
3150 | dsl_scan_obsolete_block_cb, scn, tx); | |
3151 | if (err != 0 && err != ERESTART) | |
3152 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3153 | ||
3154 | if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) | |
3155 | dsl_pool_destroy_obsolete_bpobj(dp, tx); | |
3156 | } | |
d2734cce SD |
3157 | return (0); |
3158 | } | |
3159 | ||
3160 | /* | |
3161 | * This is the primary entry point for scans that is called from syncing | |
3162 | * context. Scans must happen entirely during syncing context so that we | |
3163 | * cna guarantee that blocks we are currently scanning will not change out | |
3164 | * from under us. While a scan is active, this function controls how quickly | |
3165 | * transaction groups proceed, instead of the normal handling provided by | |
3166 | * txg_sync_thread(). | |
3167 | */ | |
3168 | void | |
3169 | dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) | |
3170 | { | |
3171 | int err = 0; | |
3172 | dsl_scan_t *scn = dp->dp_scan; | |
3173 | spa_t *spa = dp->dp_spa; | |
3174 | state_sync_type_t sync_type = SYNC_OPTIONAL; | |
3175 | ||
3176 | /* | |
3177 | * Check for scn_restart_txg before checking spa_load_state, so | |
3178 | * that we can restart an old-style scan while the pool is being | |
3179 | * imported (see dsl_scan_init). | |
3180 | */ | |
3181 | if (dsl_scan_restarting(scn, tx)) { | |
3182 | pool_scan_func_t func = POOL_SCAN_SCRUB; | |
3183 | dsl_scan_done(scn, B_FALSE, tx); | |
3184 | if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) | |
3185 | func = POOL_SCAN_RESILVER; | |
3186 | zfs_dbgmsg("restarting scan func=%u txg=%llu", | |
3187 | func, (longlong_t)tx->tx_txg); | |
3188 | dsl_scan_setup_sync(&func, tx); | |
3189 | } | |
3190 | ||
3191 | /* | |
3192 | * Only process scans in sync pass 1. | |
3193 | */ | |
3194 | if (spa_sync_pass(spa) > 1) | |
3195 | return; | |
3196 | ||
3197 | /* | |
3198 | * If the spa is shutting down, then stop scanning. This will | |
3199 | * ensure that the scan does not dirty any new data during the | |
3200 | * shutdown phase. | |
3201 | */ | |
3202 | if (spa_shutting_down(spa)) | |
3203 | return; | |
3204 | ||
3205 | /* | |
3206 | * If the scan is inactive due to a stalled async destroy, try again. | |
3207 | */ | |
3208 | if (!scn->scn_async_stalled && !dsl_scan_active(scn)) | |
3209 | return; | |
3210 | ||
3211 | /* reset scan statistics */ | |
3212 | scn->scn_visited_this_txg = 0; | |
3213 | scn->scn_holes_this_txg = 0; | |
3214 | scn->scn_lt_min_this_txg = 0; | |
3215 | scn->scn_gt_max_this_txg = 0; | |
3216 | scn->scn_ddt_contained_this_txg = 0; | |
3217 | scn->scn_objsets_visited_this_txg = 0; | |
3218 | scn->scn_avg_seg_size_this_txg = 0; | |
3219 | scn->scn_segs_this_txg = 0; | |
3220 | scn->scn_avg_zio_size_this_txg = 0; | |
3221 | scn->scn_zios_this_txg = 0; | |
3222 | scn->scn_suspending = B_FALSE; | |
3223 | scn->scn_sync_start_time = gethrtime(); | |
3224 | spa->spa_scrub_active = B_TRUE; | |
3225 | ||
3226 | /* | |
3227 | * First process the async destroys. If we suspend, don't do | |
3228 | * any scrubbing or resilvering. This ensures that there are no | |
3229 | * async destroys while we are scanning, so the scan code doesn't | |
3230 | * have to worry about traversing it. It is also faster to free the | |
3231 | * blocks than to scrub them. | |
3232 | */ | |
3233 | err = dsl_process_async_destroys(dp, tx); | |
3234 | if (err != 0) | |
3235 | return; | |
a1d477c2 | 3236 | |
d4a72f23 | 3237 | if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) |
428870ff BB |
3238 | return; |
3239 | ||
d4a72f23 TC |
3240 | /* |
3241 | * Wait a few txgs after importing to begin scanning so that | |
3242 | * we can get the pool imported quickly. | |
3243 | */ | |
3244 | if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) | |
5d1f7fb6 | 3245 | return; |
5d1f7fb6 | 3246 | |
d4a72f23 TC |
3247 | /* |
3248 | * It is possible to switch from unsorted to sorted at any time, | |
3249 | * but afterwards the scan will remain sorted unless reloaded from | |
3250 | * a checkpoint after a reboot. | |
3251 | */ | |
3252 | if (!zfs_scan_legacy) { | |
3253 | scn->scn_is_sorted = B_TRUE; | |
3254 | if (scn->scn_last_checkpoint == 0) | |
3255 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
3256 | } | |
0ea05c64 | 3257 | |
d4a72f23 TC |
3258 | /* |
3259 | * For sorted scans, determine what kind of work we will be doing | |
3260 | * this txg based on our memory limitations and whether or not we | |
3261 | * need to perform a checkpoint. | |
3262 | */ | |
3263 | if (scn->scn_is_sorted) { | |
3264 | /* | |
3265 | * If we are over our checkpoint interval, set scn_clearing | |
3266 | * so that we can begin checkpointing immediately. The | |
13a2ff27 | 3267 | * checkpoint allows us to save a consistent bookmark |
d4a72f23 TC |
3268 | * representing how much data we have scrubbed so far. |
3269 | * Otherwise, use the memory limit to determine if we should | |
3270 | * scan for metadata or start issue scrub IOs. We accumulate | |
3271 | * metadata until we hit our hard memory limit at which point | |
3272 | * we issue scrub IOs until we are at our soft memory limit. | |
3273 | */ | |
3274 | if (scn->scn_checkpointing || | |
3275 | ddi_get_lbolt() - scn->scn_last_checkpoint > | |
3276 | SEC_TO_TICK(zfs_scan_checkpoint_intval)) { | |
3277 | if (!scn->scn_checkpointing) | |
3278 | zfs_dbgmsg("begin scan checkpoint"); | |
3279 | ||
3280 | scn->scn_checkpointing = B_TRUE; | |
3281 | scn->scn_clearing = B_TRUE; | |
3282 | } else { | |
3283 | boolean_t should_clear = dsl_scan_should_clear(scn); | |
3284 | if (should_clear && !scn->scn_clearing) { | |
3285 | zfs_dbgmsg("begin scan clearing"); | |
3286 | scn->scn_clearing = B_TRUE; | |
3287 | } else if (!should_clear && scn->scn_clearing) { | |
3288 | zfs_dbgmsg("finish scan clearing"); | |
3289 | scn->scn_clearing = B_FALSE; | |
3290 | } | |
3291 | } | |
428870ff | 3292 | } else { |
d4a72f23 TC |
3293 | ASSERT0(scn->scn_checkpointing); |
3294 | ASSERT0(scn->scn_clearing); | |
428870ff BB |
3295 | } |
3296 | ||
d4a72f23 TC |
3297 | if (!scn->scn_clearing && scn->scn_done_txg == 0) { |
3298 | /* Need to scan metadata for more blocks to scrub */ | |
3299 | dsl_scan_phys_t *scnp = &scn->scn_phys; | |
3300 | taskqid_t prefetch_tqid; | |
3301 | uint64_t bytes_per_leaf = zfs_scan_vdev_limit; | |
3302 | uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev); | |
428870ff | 3303 | |
d4a72f23 | 3304 | /* |
f90a30ad | 3305 | * Recalculate the max number of in-flight bytes for pool-wide |
d4a72f23 TC |
3306 | * scanning operations (minimum 1MB). Limits for the issuing |
3307 | * phase are done per top-level vdev and are handled separately. | |
3308 | */ | |
3309 | scn->scn_maxinflight_bytes = | |
3310 | MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); | |
3311 | ||
3312 | if (scnp->scn_ddt_bookmark.ddb_class <= | |
3313 | scnp->scn_ddt_class_max) { | |
3314 | ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); | |
3315 | zfs_dbgmsg("doing scan sync txg %llu; " | |
3316 | "ddt bm=%llu/%llu/%llu/%llx", | |
3317 | (longlong_t)tx->tx_txg, | |
3318 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
3319 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
3320 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
3321 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
3322 | } else { | |
3323 | zfs_dbgmsg("doing scan sync txg %llu; " | |
3324 | "bm=%llu/%llu/%llu/%llu", | |
3325 | (longlong_t)tx->tx_txg, | |
3326 | (longlong_t)scnp->scn_bookmark.zb_objset, | |
3327 | (longlong_t)scnp->scn_bookmark.zb_object, | |
3328 | (longlong_t)scnp->scn_bookmark.zb_level, | |
3329 | (longlong_t)scnp->scn_bookmark.zb_blkid); | |
3330 | } | |
428870ff | 3331 | |
d4a72f23 TC |
3332 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, |
3333 | NULL, ZIO_FLAG_CANFAIL); | |
428870ff | 3334 | |
d4a72f23 TC |
3335 | scn->scn_prefetch_stop = B_FALSE; |
3336 | prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, | |
3337 | dsl_scan_prefetch_thread, scn, TQ_SLEEP); | |
3338 | ASSERT(prefetch_tqid != TASKQID_INVALID); | |
428870ff | 3339 | |
d4a72f23 TC |
3340 | dsl_pool_config_enter(dp, FTAG); |
3341 | dsl_scan_visit(scn, tx); | |
3342 | dsl_pool_config_exit(dp, FTAG); | |
428870ff | 3343 | |
d4a72f23 TC |
3344 | mutex_enter(&dp->dp_spa->spa_scrub_lock); |
3345 | scn->scn_prefetch_stop = B_TRUE; | |
3346 | cv_broadcast(&spa->spa_scrub_io_cv); | |
3347 | mutex_exit(&dp->dp_spa->spa_scrub_lock); | |
428870ff | 3348 | |
d4a72f23 TC |
3349 | taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); |
3350 | (void) zio_wait(scn->scn_zio_root); | |
3351 | scn->scn_zio_root = NULL; | |
3352 | ||
3353 | zfs_dbgmsg("scan visited %llu blocks in %llums " | |
3354 | "(%llu os's, %llu holes, %llu < mintxg, " | |
3355 | "%llu in ddt, %llu > maxtxg)", | |
3356 | (longlong_t)scn->scn_visited_this_txg, | |
3357 | (longlong_t)NSEC2MSEC(gethrtime() - | |
3358 | scn->scn_sync_start_time), | |
3359 | (longlong_t)scn->scn_objsets_visited_this_txg, | |
3360 | (longlong_t)scn->scn_holes_this_txg, | |
3361 | (longlong_t)scn->scn_lt_min_this_txg, | |
3362 | (longlong_t)scn->scn_ddt_contained_this_txg, | |
3363 | (longlong_t)scn->scn_gt_max_this_txg); | |
3364 | ||
3365 | if (!scn->scn_suspending) { | |
3366 | ASSERT0(avl_numnodes(&scn->scn_queue)); | |
3367 | scn->scn_done_txg = tx->tx_txg + 1; | |
3368 | if (scn->scn_is_sorted) { | |
3369 | scn->scn_checkpointing = B_TRUE; | |
3370 | scn->scn_clearing = B_TRUE; | |
3371 | } | |
3372 | zfs_dbgmsg("scan complete txg %llu", | |
3373 | (longlong_t)tx->tx_txg); | |
3374 | } | |
3375 | } else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) { | |
3376 | /* need to issue scrubbing IOs from per-vdev queues */ | |
3377 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, | |
3378 | NULL, ZIO_FLAG_CANFAIL); | |
3379 | scan_io_queues_run(scn); | |
3380 | (void) zio_wait(scn->scn_zio_root); | |
3381 | scn->scn_zio_root = NULL; | |
3382 | ||
3383 | /* calculate and dprintf the current memory usage */ | |
3384 | (void) dsl_scan_should_clear(scn); | |
3385 | dsl_scan_update_stats(scn); | |
3386 | ||
3387 | zfs_dbgmsg("scan issued %llu blocks (%llu segs) in %llums " | |
3388 | "(avg_block_size = %llu, avg_seg_size = %llu)", | |
3389 | (longlong_t)scn->scn_zios_this_txg, | |
3390 | (longlong_t)scn->scn_segs_this_txg, | |
3391 | (longlong_t)NSEC2MSEC(gethrtime() - | |
3392 | scn->scn_sync_start_time), | |
3393 | (longlong_t)scn->scn_avg_zio_size_this_txg, | |
3394 | (longlong_t)scn->scn_avg_seg_size_this_txg); | |
3395 | } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { | |
3396 | /* Finished with everything. Mark the scrub as complete */ | |
3397 | zfs_dbgmsg("scan issuing complete txg %llu", | |
3398 | (longlong_t)tx->tx_txg); | |
3399 | ASSERT3U(scn->scn_done_txg, !=, 0); | |
3400 | ASSERT0(spa->spa_scrub_inflight); | |
3401 | ASSERT0(scn->scn_bytes_pending); | |
3402 | dsl_scan_done(scn, B_TRUE, tx); | |
3403 | sync_type = SYNC_MANDATORY; | |
428870ff | 3404 | } |
428870ff | 3405 | |
d4a72f23 | 3406 | dsl_scan_sync_state(scn, tx, sync_type); |
428870ff BB |
3407 | } |
3408 | ||
428870ff | 3409 | static void |
d4a72f23 | 3410 | count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp) |
428870ff BB |
3411 | { |
3412 | int i; | |
3413 | ||
d4a72f23 TC |
3414 | /* update the spa's stats on how many bytes we have issued */ |
3415 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
3416 | atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued, | |
3417 | DVA_GET_ASIZE(&bp->blk_dva[i])); | |
3418 | } | |
3419 | ||
428870ff BB |
3420 | /* |
3421 | * If we resume after a reboot, zab will be NULL; don't record | |
3422 | * incomplete stats in that case. | |
3423 | */ | |
3424 | if (zab == NULL) | |
3425 | return; | |
3426 | ||
d4a72f23 TC |
3427 | mutex_enter(&zab->zab_lock); |
3428 | ||
428870ff BB |
3429 | for (i = 0; i < 4; i++) { |
3430 | int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; | |
3431 | int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; | |
9ae529ec CS |
3432 | |
3433 | if (t & DMU_OT_NEWTYPE) | |
3434 | t = DMU_OT_OTHER; | |
1c27024e DB |
3435 | zfs_blkstat_t *zb = &zab->zab_type[l][t]; |
3436 | int equal; | |
428870ff BB |
3437 | |
3438 | zb->zb_count++; | |
3439 | zb->zb_asize += BP_GET_ASIZE(bp); | |
3440 | zb->zb_lsize += BP_GET_LSIZE(bp); | |
3441 | zb->zb_psize += BP_GET_PSIZE(bp); | |
3442 | zb->zb_gangs += BP_COUNT_GANG(bp); | |
3443 | ||
3444 | switch (BP_GET_NDVAS(bp)) { | |
3445 | case 2: | |
3446 | if (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
3447 | DVA_GET_VDEV(&bp->blk_dva[1])) | |
3448 | zb->zb_ditto_2_of_2_samevdev++; | |
3449 | break; | |
3450 | case 3: | |
3451 | equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
3452 | DVA_GET_VDEV(&bp->blk_dva[1])) + | |
3453 | (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
3454 | DVA_GET_VDEV(&bp->blk_dva[2])) + | |
3455 | (DVA_GET_VDEV(&bp->blk_dva[1]) == | |
3456 | DVA_GET_VDEV(&bp->blk_dva[2])); | |
3457 | if (equal == 1) | |
3458 | zb->zb_ditto_2_of_3_samevdev++; | |
3459 | else if (equal == 3) | |
3460 | zb->zb_ditto_3_of_3_samevdev++; | |
3461 | break; | |
3462 | } | |
3463 | } | |
d4a72f23 TC |
3464 | |
3465 | mutex_exit(&zab->zab_lock); | |
428870ff BB |
3466 | } |
3467 | ||
3468 | static void | |
d4a72f23 | 3469 | scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) |
428870ff | 3470 | { |
d4a72f23 TC |
3471 | avl_index_t idx; |
3472 | int64_t asize = sio->sio_asize; | |
3473 | dsl_scan_t *scn = queue->q_scn; | |
428870ff | 3474 | |
d4a72f23 | 3475 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); |
428870ff | 3476 | |
d4a72f23 TC |
3477 | if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { |
3478 | /* block is already scheduled for reading */ | |
3479 | atomic_add_64(&scn->scn_bytes_pending, -asize); | |
3480 | kmem_cache_free(sio_cache, sio); | |
3481 | return; | |
428870ff | 3482 | } |
d4a72f23 TC |
3483 | avl_insert(&queue->q_sios_by_addr, sio, idx); |
3484 | range_tree_add(queue->q_exts_by_addr, sio->sio_offset, asize); | |
428870ff BB |
3485 | } |
3486 | ||
d4a72f23 TC |
3487 | /* |
3488 | * Given all the info we got from our metadata scanning process, we | |
3489 | * construct a scan_io_t and insert it into the scan sorting queue. The | |
3490 | * I/O must already be suitable for us to process. This is controlled | |
3491 | * by dsl_scan_enqueue(). | |
3492 | */ | |
3493 | static void | |
3494 | scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, | |
3495 | int zio_flags, const zbookmark_phys_t *zb) | |
3d6da72d | 3496 | { |
d4a72f23 TC |
3497 | dsl_scan_t *scn = queue->q_scn; |
3498 | scan_io_t *sio = kmem_cache_alloc(sio_cache, KM_SLEEP); | |
3d6da72d | 3499 | |
d4a72f23 TC |
3500 | ASSERT0(BP_IS_GANG(bp)); |
3501 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3d6da72d | 3502 | |
d4a72f23 TC |
3503 | bp2sio(bp, sio, dva_i); |
3504 | sio->sio_flags = zio_flags; | |
3505 | sio->sio_zb = *zb; | |
3d6da72d IH |
3506 | |
3507 | /* | |
d4a72f23 TC |
3508 | * Increment the bytes pending counter now so that we can't |
3509 | * get an integer underflow in case the worker processes the | |
3510 | * zio before we get to incrementing this counter. | |
3d6da72d | 3511 | */ |
d4a72f23 TC |
3512 | atomic_add_64(&scn->scn_bytes_pending, sio->sio_asize); |
3513 | ||
3514 | scan_io_queue_insert_impl(queue, sio); | |
3515 | } | |
3516 | ||
3517 | /* | |
3518 | * Given a set of I/O parameters as discovered by the metadata traversal | |
3519 | * process, attempts to place the I/O into the sorted queues (if allowed), | |
3520 | * or immediately executes the I/O. | |
3521 | */ | |
3522 | static void | |
3523 | dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
3524 | const zbookmark_phys_t *zb) | |
3525 | { | |
3526 | spa_t *spa = dp->dp_spa; | |
3527 | ||
3528 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
3d6da72d IH |
3529 | |
3530 | /* | |
d4a72f23 TC |
3531 | * Gang blocks are hard to issue sequentially, so we just issue them |
3532 | * here immediately instead of queuing them. | |
3d6da72d | 3533 | */ |
d4a72f23 TC |
3534 | if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { |
3535 | scan_exec_io(dp, bp, zio_flags, zb, NULL); | |
3536 | return; | |
3537 | } | |
3d6da72d | 3538 | |
d4a72f23 TC |
3539 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
3540 | dva_t dva; | |
3541 | vdev_t *vdev; | |
3542 | ||
3543 | dva = bp->blk_dva[i]; | |
3544 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); | |
3545 | ASSERT(vdev != NULL); | |
3546 | ||
3547 | mutex_enter(&vdev->vdev_scan_io_queue_lock); | |
3548 | if (vdev->vdev_scan_io_queue == NULL) | |
3549 | vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); | |
3550 | ASSERT(dp->dp_scan != NULL); | |
3551 | scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, | |
3552 | i, zio_flags, zb); | |
3553 | mutex_exit(&vdev->vdev_scan_io_queue_lock); | |
3554 | } | |
3d6da72d IH |
3555 | } |
3556 | ||
428870ff BB |
3557 | static int |
3558 | dsl_scan_scrub_cb(dsl_pool_t *dp, | |
5dbd68a3 | 3559 | const blkptr_t *bp, const zbookmark_phys_t *zb) |
428870ff BB |
3560 | { |
3561 | dsl_scan_t *scn = dp->dp_scan; | |
428870ff BB |
3562 | spa_t *spa = dp->dp_spa; |
3563 | uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); | |
d4a72f23 | 3564 | size_t psize = BP_GET_PSIZE(bp); |
d6320ddb | 3565 | boolean_t needs_io = B_FALSE; |
572e2857 | 3566 | int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; |
428870ff | 3567 | |
00c405b4 | 3568 | |
428870ff | 3569 | if (phys_birth <= scn->scn_phys.scn_min_txg || |
863522b1 SN |
3570 | phys_birth >= scn->scn_phys.scn_max_txg) { |
3571 | count_block(scn, dp->dp_blkstats, bp); | |
428870ff | 3572 | return (0); |
863522b1 | 3573 | } |
428870ff | 3574 | |
00c405b4 MA |
3575 | /* Embedded BP's have phys_birth==0, so we reject them above. */ |
3576 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
9b67f605 | 3577 | |
428870ff BB |
3578 | ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); |
3579 | if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { | |
3580 | zio_flags |= ZIO_FLAG_SCRUB; | |
428870ff | 3581 | needs_io = B_TRUE; |
a117a6d6 GW |
3582 | } else { |
3583 | ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); | |
428870ff | 3584 | zio_flags |= ZIO_FLAG_RESILVER; |
428870ff BB |
3585 | needs_io = B_FALSE; |
3586 | } | |
3587 | ||
3588 | /* If it's an intent log block, failure is expected. */ | |
3589 | if (zb->zb_level == ZB_ZIL_LEVEL) | |
3590 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
3591 | ||
1c27024e | 3592 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) { |
3d6da72d | 3593 | const dva_t *dva = &bp->blk_dva[d]; |
428870ff BB |
3594 | |
3595 | /* | |
3596 | * Keep track of how much data we've examined so that | |
3597 | * zpool(1M) status can make useful progress reports. | |
3598 | */ | |
3d6da72d IH |
3599 | scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva); |
3600 | spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva); | |
428870ff BB |
3601 | |
3602 | /* if it's a resilver, this may not be in the target range */ | |
3d6da72d IH |
3603 | if (!needs_io) |
3604 | needs_io = dsl_scan_need_resilver(spa, dva, psize, | |
3605 | phys_birth); | |
428870ff BB |
3606 | } |
3607 | ||
3608 | if (needs_io && !zfs_no_scrub_io) { | |
d4a72f23 TC |
3609 | dsl_scan_enqueue(dp, bp, zio_flags, zb); |
3610 | } else { | |
3611 | count_block(scn, dp->dp_blkstats, bp); | |
3612 | } | |
3613 | ||
3614 | /* do not relocate this block */ | |
3615 | return (0); | |
3616 | } | |
3617 | ||
3618 | static void | |
3619 | dsl_scan_scrub_done(zio_t *zio) | |
3620 | { | |
3621 | spa_t *spa = zio->io_spa; | |
3622 | blkptr_t *bp = zio->io_bp; | |
3623 | dsl_scan_io_queue_t *queue = zio->io_private; | |
3624 | ||
3625 | abd_free(zio->io_abd); | |
3626 | ||
3627 | if (queue == NULL) { | |
3628 | mutex_enter(&spa->spa_scrub_lock); | |
3629 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
3630 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
3631 | cv_broadcast(&spa->spa_scrub_io_cv); | |
3632 | mutex_exit(&spa->spa_scrub_lock); | |
3633 | } else { | |
3634 | mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); | |
3635 | ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); | |
3636 | queue->q_inflight_bytes -= BP_GET_PSIZE(bp); | |
3637 | cv_broadcast(&queue->q_zio_cv); | |
3638 | mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); | |
3639 | } | |
3640 | ||
3641 | if (zio->io_error && (zio->io_error != ECKSUM || | |
3642 | !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { | |
3643 | atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors); | |
3644 | } | |
3645 | } | |
428870ff | 3646 | |
d4a72f23 TC |
3647 | /* |
3648 | * Given a scanning zio's information, executes the zio. The zio need | |
3649 | * not necessarily be only sortable, this function simply executes the | |
3650 | * zio, no matter what it is. The optional queue argument allows the | |
3651 | * caller to specify that they want per top level vdev IO rate limiting | |
3652 | * instead of the legacy global limiting. | |
3653 | */ | |
3654 | static void | |
3655 | scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
3656 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) | |
3657 | { | |
3658 | spa_t *spa = dp->dp_spa; | |
3659 | dsl_scan_t *scn = dp->dp_scan; | |
3660 | size_t size = BP_GET_PSIZE(bp); | |
3661 | abd_t *data = abd_alloc_for_io(size, B_FALSE); | |
3662 | ||
f90a30ad BB |
3663 | ASSERT3U(scn->scn_maxinflight_bytes, >, 0); |
3664 | ||
d4a72f23 | 3665 | if (queue == NULL) { |
428870ff | 3666 | mutex_enter(&spa->spa_scrub_lock); |
d4a72f23 | 3667 | while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) |
428870ff | 3668 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); |
d4a72f23 | 3669 | spa->spa_scrub_inflight += BP_GET_PSIZE(bp); |
428870ff | 3670 | mutex_exit(&spa->spa_scrub_lock); |
d4a72f23 TC |
3671 | } else { |
3672 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
428870ff | 3673 | |
d4a72f23 TC |
3674 | mutex_enter(q_lock); |
3675 | while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) | |
3676 | cv_wait(&queue->q_zio_cv, q_lock); | |
3677 | queue->q_inflight_bytes += BP_GET_PSIZE(bp); | |
3678 | mutex_exit(q_lock); | |
3679 | } | |
3680 | ||
3681 | count_block(scn, dp->dp_blkstats, bp); | |
3682 | zio_nowait(zio_read(scn->scn_zio_root, spa, bp, data, size, | |
3683 | dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); | |
3684 | } | |
572e2857 | 3685 | |
d4a72f23 TC |
3686 | /* |
3687 | * This is the primary extent sorting algorithm. We balance two parameters: | |
3688 | * 1) how many bytes of I/O are in an extent | |
3689 | * 2) how well the extent is filled with I/O (as a fraction of its total size) | |
3690 | * Since we allow extents to have gaps between their constituent I/Os, it's | |
3691 | * possible to have a fairly large extent that contains the same amount of | |
3692 | * I/O bytes than a much smaller extent, which just packs the I/O more tightly. | |
3693 | * The algorithm sorts based on a score calculated from the extent's size, | |
3694 | * the relative fill volume (in %) and a "fill weight" parameter that controls | |
3695 | * the split between whether we prefer larger extents or more well populated | |
3696 | * extents: | |
3697 | * | |
3698 | * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) | |
3699 | * | |
3700 | * Example: | |
3701 | * 1) assume extsz = 64 MiB | |
3702 | * 2) assume fill = 32 MiB (extent is half full) | |
3703 | * 3) assume fill_weight = 3 | |
3704 | * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 | |
3705 | * SCORE = 32M + (50 * 3 * 32M) / 100 | |
3706 | * SCORE = 32M + (4800M / 100) | |
3707 | * SCORE = 32M + 48M | |
3708 | * ^ ^ | |
3709 | * | +--- final total relative fill-based score | |
3710 | * +--------- final total fill-based score | |
3711 | * SCORE = 80M | |
3712 | * | |
3713 | * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards | |
3714 | * extents that are more completely filled (in a 3:2 ratio) vs just larger. | |
3715 | * Note that as an optimization, we replace multiplication and division by | |
3716 | * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128). | |
3717 | */ | |
3718 | static int | |
3719 | ext_size_compare(const void *x, const void *y) | |
3720 | { | |
3721 | const range_seg_t *rsa = x, *rsb = y; | |
3722 | uint64_t sa = rsa->rs_end - rsa->rs_start, | |
3723 | sb = rsb->rs_end - rsb->rs_start; | |
3724 | uint64_t score_a, score_b; | |
3725 | ||
3726 | score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) * | |
3727 | fill_weight * rsa->rs_fill) >> 7); | |
3728 | score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) * | |
3729 | fill_weight * rsb->rs_fill) >> 7); | |
3730 | ||
3731 | if (score_a > score_b) | |
3732 | return (-1); | |
3733 | if (score_a == score_b) { | |
3734 | if (rsa->rs_start < rsb->rs_start) | |
3735 | return (-1); | |
3736 | if (rsa->rs_start == rsb->rs_start) | |
3737 | return (0); | |
3738 | return (1); | |
428870ff | 3739 | } |
d4a72f23 TC |
3740 | return (1); |
3741 | } | |
428870ff | 3742 | |
d4a72f23 TC |
3743 | /* |
3744 | * Comparator for the q_sios_by_addr tree. Sorting is simply performed | |
3745 | * based on LBA-order (from lowest to highest). | |
3746 | */ | |
3747 | static int | |
3748 | sio_addr_compare(const void *x, const void *y) | |
3749 | { | |
3750 | const scan_io_t *a = x, *b = y; | |
3751 | ||
3752 | if (a->sio_offset < b->sio_offset) | |
3753 | return (-1); | |
3754 | if (a->sio_offset == b->sio_offset) | |
3755 | return (0); | |
3756 | return (1); | |
3757 | } | |
3758 | ||
3759 | /* IO queues are created on demand when they are needed. */ | |
3760 | static dsl_scan_io_queue_t * | |
3761 | scan_io_queue_create(vdev_t *vd) | |
3762 | { | |
3763 | dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; | |
3764 | dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); | |
3765 | ||
3766 | q->q_scn = scn; | |
3767 | q->q_vd = vd; | |
3768 | cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); | |
3769 | q->q_exts_by_addr = range_tree_create_impl(&rt_avl_ops, | |
a1d477c2 | 3770 | &q->q_exts_by_size, ext_size_compare, zfs_scan_max_ext_gap); |
d4a72f23 TC |
3771 | avl_create(&q->q_sios_by_addr, sio_addr_compare, |
3772 | sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); | |
3773 | ||
3774 | return (q); | |
428870ff BB |
3775 | } |
3776 | ||
0ea05c64 | 3777 | /* |
d4a72f23 TC |
3778 | * Destroys a scan queue and all segments and scan_io_t's contained in it. |
3779 | * No further execution of I/O occurs, anything pending in the queue is | |
3780 | * simply freed without being executed. | |
0ea05c64 | 3781 | */ |
d4a72f23 TC |
3782 | void |
3783 | dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) | |
428870ff | 3784 | { |
d4a72f23 TC |
3785 | dsl_scan_t *scn = queue->q_scn; |
3786 | scan_io_t *sio; | |
3787 | void *cookie = NULL; | |
3788 | int64_t bytes_dequeued = 0; | |
3789 | ||
3790 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3791 | ||
3792 | while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != | |
3793 | NULL) { | |
3794 | ASSERT(range_tree_contains(queue->q_exts_by_addr, | |
3795 | sio->sio_offset, sio->sio_asize)); | |
3796 | bytes_dequeued += sio->sio_asize; | |
3797 | kmem_cache_free(sio_cache, sio); | |
3798 | } | |
428870ff | 3799 | |
d4a72f23 TC |
3800 | atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued); |
3801 | range_tree_vacate(queue->q_exts_by_addr, NULL, queue); | |
3802 | range_tree_destroy(queue->q_exts_by_addr); | |
3803 | avl_destroy(&queue->q_sios_by_addr); | |
3804 | cv_destroy(&queue->q_zio_cv); | |
428870ff | 3805 | |
d4a72f23 TC |
3806 | kmem_free(queue, sizeof (*queue)); |
3807 | } | |
0ea05c64 | 3808 | |
d4a72f23 TC |
3809 | /* |
3810 | * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is | |
3811 | * called on behalf of vdev_top_transfer when creating or destroying | |
3812 | * a mirror vdev due to zpool attach/detach. | |
3813 | */ | |
3814 | void | |
3815 | dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) | |
3816 | { | |
3817 | mutex_enter(&svd->vdev_scan_io_queue_lock); | |
3818 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
3819 | ||
3820 | VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); | |
3821 | tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; | |
3822 | svd->vdev_scan_io_queue = NULL; | |
a1d477c2 | 3823 | if (tvd->vdev_scan_io_queue != NULL) |
d4a72f23 | 3824 | tvd->vdev_scan_io_queue->q_vd = tvd; |
0ea05c64 | 3825 | |
d4a72f23 TC |
3826 | mutex_exit(&tvd->vdev_scan_io_queue_lock); |
3827 | mutex_exit(&svd->vdev_scan_io_queue_lock); | |
428870ff | 3828 | } |
c409e464 | 3829 | |
d4a72f23 TC |
3830 | static void |
3831 | scan_io_queues_destroy(dsl_scan_t *scn) | |
784d15c1 | 3832 | { |
d4a72f23 TC |
3833 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
3834 | ||
3835 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
3836 | vdev_t *tvd = rvd->vdev_child[i]; | |
3837 | ||
3838 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
3839 | if (tvd->vdev_scan_io_queue != NULL) | |
3840 | dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); | |
3841 | tvd->vdev_scan_io_queue = NULL; | |
3842 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
3843 | } | |
784d15c1 NR |
3844 | } |
3845 | ||
d4a72f23 TC |
3846 | static void |
3847 | dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) | |
3848 | { | |
3849 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
3850 | dsl_scan_t *scn = dp->dp_scan; | |
3851 | vdev_t *vdev; | |
3852 | kmutex_t *q_lock; | |
3853 | dsl_scan_io_queue_t *queue; | |
3854 | scan_io_t srch, *sio; | |
3855 | avl_index_t idx; | |
3856 | uint64_t start, size; | |
3857 | ||
3858 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); | |
3859 | ASSERT(vdev != NULL); | |
3860 | q_lock = &vdev->vdev_scan_io_queue_lock; | |
3861 | queue = vdev->vdev_scan_io_queue; | |
3862 | ||
3863 | mutex_enter(q_lock); | |
3864 | if (queue == NULL) { | |
3865 | mutex_exit(q_lock); | |
3866 | return; | |
3867 | } | |
3868 | ||
3869 | bp2sio(bp, &srch, dva_i); | |
3870 | start = srch.sio_offset; | |
3871 | size = srch.sio_asize; | |
3872 | ||
3873 | /* | |
3874 | * We can find the zio in two states: | |
3875 | * 1) Cold, just sitting in the queue of zio's to be issued at | |
3876 | * some point in the future. In this case, all we do is | |
3877 | * remove the zio from the q_sios_by_addr tree, decrement | |
3878 | * its data volume from the containing range_seg_t and | |
3879 | * resort the q_exts_by_size tree to reflect that the | |
3880 | * range_seg_t has lost some of its 'fill'. We don't shorten | |
3881 | * the range_seg_t - this is usually rare enough not to be | |
3882 | * worth the extra hassle of trying keep track of precise | |
3883 | * extent boundaries. | |
3884 | * 2) Hot, where the zio is currently in-flight in | |
3885 | * dsl_scan_issue_ios. In this case, we can't simply | |
3886 | * reach in and stop the in-flight zio's, so we instead | |
3887 | * block the caller. Eventually, dsl_scan_issue_ios will | |
3888 | * be done with issuing the zio's it gathered and will | |
3889 | * signal us. | |
3890 | */ | |
3891 | sio = avl_find(&queue->q_sios_by_addr, &srch, &idx); | |
3892 | if (sio != NULL) { | |
3893 | int64_t asize = sio->sio_asize; | |
3894 | blkptr_t tmpbp; | |
3895 | ||
3896 | /* Got it while it was cold in the queue */ | |
3897 | ASSERT3U(start, ==, sio->sio_offset); | |
3898 | ASSERT3U(size, ==, asize); | |
3899 | avl_remove(&queue->q_sios_by_addr, sio); | |
c409e464 | 3900 | |
d4a72f23 TC |
3901 | ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); |
3902 | range_tree_remove_fill(queue->q_exts_by_addr, start, size); | |
3903 | ||
3904 | /* | |
3905 | * We only update scn_bytes_pending in the cold path, | |
3906 | * otherwise it will already have been accounted for as | |
3907 | * part of the zio's execution. | |
3908 | */ | |
3909 | atomic_add_64(&scn->scn_bytes_pending, -asize); | |
c409e464 | 3910 | |
d4a72f23 TC |
3911 | /* count the block as though we issued it */ |
3912 | sio2bp(sio, &tmpbp, dva_i); | |
3913 | count_block(scn, dp->dp_blkstats, &tmpbp); | |
c409e464 | 3914 | |
d4a72f23 TC |
3915 | kmem_cache_free(sio_cache, sio); |
3916 | } | |
3917 | mutex_exit(q_lock); | |
3918 | } | |
c409e464 | 3919 | |
d4a72f23 TC |
3920 | /* |
3921 | * Callback invoked when a zio_free() zio is executing. This needs to be | |
3922 | * intercepted to prevent the zio from deallocating a particular portion | |
3923 | * of disk space and it then getting reallocated and written to, while we | |
3924 | * still have it queued up for processing. | |
3925 | */ | |
3926 | void | |
3927 | dsl_scan_freed(spa_t *spa, const blkptr_t *bp) | |
3928 | { | |
3929 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
3930 | dsl_scan_t *scn = dp->dp_scan; | |
3931 | ||
3932 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
3933 | ASSERT(scn != NULL); | |
3934 | if (!dsl_scan_is_running(scn)) | |
3935 | return; | |
3936 | ||
3937 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) | |
3938 | dsl_scan_freed_dva(spa, bp, i); | |
3939 | } | |
3940 | ||
93ce2b4c | 3941 | #if defined(_KERNEL) |
d4a72f23 TC |
3942 | /* CSTYLED */ |
3943 | module_param(zfs_scan_vdev_limit, ulong, 0644); | |
3944 | MODULE_PARM_DESC(zfs_scan_vdev_limit, | |
3945 | "Max bytes in flight per leaf vdev for scrubs and resilvers"); | |
3946 | ||
3947 | module_param(zfs_scrub_min_time_ms, int, 0644); | |
3948 | MODULE_PARM_DESC(zfs_scrub_min_time_ms, "Min millisecs to scrub per txg"); | |
c409e464 | 3949 | |
a1d477c2 MA |
3950 | module_param(zfs_obsolete_min_time_ms, int, 0644); |
3951 | MODULE_PARM_DESC(zfs_obsolete_min_time_ms, "Min millisecs to obsolete per txg"); | |
3952 | ||
c409e464 BB |
3953 | module_param(zfs_free_min_time_ms, int, 0644); |
3954 | MODULE_PARM_DESC(zfs_free_min_time_ms, "Min millisecs to free per txg"); | |
3955 | ||
3956 | module_param(zfs_resilver_min_time_ms, int, 0644); | |
3957 | MODULE_PARM_DESC(zfs_resilver_min_time_ms, "Min millisecs to resilver per txg"); | |
3958 | ||
3959 | module_param(zfs_no_scrub_io, int, 0644); | |
3960 | MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O"); | |
3961 | ||
3962 | module_param(zfs_no_scrub_prefetch, int, 0644); | |
3963 | MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching"); | |
36283ca2 | 3964 | |
02730c33 | 3965 | /* CSTYLED */ |
a1d477c2 MA |
3966 | module_param(zfs_async_block_max_blocks, ulong, 0644); |
3967 | MODULE_PARM_DESC(zfs_async_block_max_blocks, | |
3968 | "Max number of blocks freed in one txg"); | |
ba5ad9a4 GW |
3969 | |
3970 | module_param(zfs_free_bpobj_enabled, int, 0644); | |
3971 | MODULE_PARM_DESC(zfs_free_bpobj_enabled, "Enable processing of the free_bpobj"); | |
d4a72f23 TC |
3972 | |
3973 | module_param(zfs_scan_mem_lim_fact, int, 0644); | |
3974 | MODULE_PARM_DESC(zfs_scan_mem_lim_fact, "Fraction of RAM for scan hard limit"); | |
3975 | ||
3976 | module_param(zfs_scan_issue_strategy, int, 0644); | |
3977 | MODULE_PARM_DESC(zfs_scan_issue_strategy, | |
3978 | "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size"); | |
3979 | ||
3980 | module_param(zfs_scan_legacy, int, 0644); | |
3981 | MODULE_PARM_DESC(zfs_scan_legacy, "Scrub using legacy non-sequential method"); | |
3982 | ||
3983 | module_param(zfs_scan_checkpoint_intval, int, 0644); | |
3984 | MODULE_PARM_DESC(zfs_scan_checkpoint_intval, | |
3985 | "Scan progress on-disk checkpointing interval"); | |
3986 | ||
63f88c12 | 3987 | /* CSTYLED */ |
3988 | module_param(zfs_scan_max_ext_gap, ulong, 0644); | |
3989 | MODULE_PARM_DESC(zfs_scan_max_ext_gap, | |
3990 | "Max gap in bytes between sequential scrub / resilver I/Os"); | |
3991 | ||
d4a72f23 TC |
3992 | module_param(zfs_scan_mem_lim_soft_fact, int, 0644); |
3993 | MODULE_PARM_DESC(zfs_scan_mem_lim_soft_fact, | |
3994 | "Fraction of hard limit used as soft limit"); | |
3995 | ||
3996 | module_param(zfs_scan_strict_mem_lim, int, 0644); | |
3997 | MODULE_PARM_DESC(zfs_scan_strict_mem_lim, | |
3998 | "Tunable to attempt to reduce lock contention"); | |
3999 | ||
4000 | module_param(zfs_scan_fill_weight, int, 0644); | |
4001 | MODULE_PARM_DESC(zfs_scan_fill_weight, | |
4002 | "Tunable to adjust bias towards more filled segments during scans"); | |
c409e464 | 4003 | #endif |