]>
Commit | Line | Data |
---|---|---|
428870ff BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
428870ff BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
03e02e5b | 23 | * Copyright (c) 2011, 2021 by Delphix. All rights reserved. |
7c9abfa7 | 24 | * Copyright 2016 Gary Mills |
3c819a2c | 25 | * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. |
dce63135 | 26 | * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. |
d49d7336 | 27 | * Copyright 2019 Joyent, Inc. |
428870ff BB |
28 | */ |
29 | ||
30 | #include <sys/dsl_scan.h> | |
31 | #include <sys/dsl_pool.h> | |
32 | #include <sys/dsl_dataset.h> | |
33 | #include <sys/dsl_prop.h> | |
34 | #include <sys/dsl_dir.h> | |
35 | #include <sys/dsl_synctask.h> | |
36 | #include <sys/dnode.h> | |
37 | #include <sys/dmu_tx.h> | |
38 | #include <sys/dmu_objset.h> | |
39 | #include <sys/arc.h> | |
c0aea7cf | 40 | #include <sys/arc_impl.h> |
428870ff BB |
41 | #include <sys/zap.h> |
42 | #include <sys/zio.h> | |
43 | #include <sys/zfs_context.h> | |
44 | #include <sys/fs/zfs.h> | |
45 | #include <sys/zfs_znode.h> | |
46 | #include <sys/spa_impl.h> | |
47 | #include <sys/vdev_impl.h> | |
48 | #include <sys/zil_impl.h> | |
49 | #include <sys/zio_checksum.h> | |
67a1b037 | 50 | #include <sys/brt.h> |
428870ff BB |
51 | #include <sys/ddt.h> |
52 | #include <sys/sa.h> | |
53 | #include <sys/sa_impl.h> | |
9ae529ec | 54 | #include <sys/zfeature.h> |
a6255b7f | 55 | #include <sys/abd.h> |
d4a72f23 | 56 | #include <sys/range_tree.h> |
482eeef8 | 57 | #include <sys/dbuf.h> |
428870ff BB |
58 | #ifdef _KERNEL |
59 | #include <sys/zfs_vfsops.h> | |
60 | #endif | |
61 | ||
d4a72f23 TC |
62 | /* |
63 | * Grand theory statement on scan queue sorting | |
64 | * | |
65 | * Scanning is implemented by recursively traversing all indirection levels | |
66 | * in an object and reading all blocks referenced from said objects. This | |
67 | * results in us approximately traversing the object from lowest logical | |
68 | * offset to the highest. For best performance, we would want the logical | |
69 | * blocks to be physically contiguous. However, this is frequently not the | |
70 | * case with pools given the allocation patterns of copy-on-write filesystems. | |
71 | * So instead, we put the I/Os into a reordering queue and issue them in a | |
72 | * way that will most benefit physical disks (LBA-order). | |
73 | * | |
74 | * Queue management: | |
75 | * | |
76 | * Ideally, we would want to scan all metadata and queue up all block I/O | |
77 | * prior to starting to issue it, because that allows us to do an optimal | |
78 | * sorting job. This can however consume large amounts of memory. Therefore | |
79 | * we continuously monitor the size of the queues and constrain them to 5% | |
80 | * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this | |
81 | * limit, we clear out a few of the largest extents at the head of the queues | |
82 | * to make room for more scanning. Hopefully, these extents will be fairly | |
83 | * large and contiguous, allowing us to approach sequential I/O throughput | |
84 | * even without a fully sorted tree. | |
85 | * | |
86 | * Metadata scanning takes place in dsl_scan_visit(), which is called from | |
87 | * dsl_scan_sync() every spa_sync(). If we have either fully scanned all | |
88 | * metadata on the pool, or we need to make room in memory because our | |
89 | * queues are too large, dsl_scan_visit() is postponed and | |
90 | * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies | |
91 | * that metadata scanning and queued I/O issuing are mutually exclusive. This | |
92 | * allows us to provide maximum sequential I/O throughput for the majority of | |
93 | * I/O's issued since sequential I/O performance is significantly negatively | |
94 | * impacted if it is interleaved with random I/O. | |
95 | * | |
96 | * Implementation Notes | |
97 | * | |
98 | * One side effect of the queued scanning algorithm is that the scanning code | |
99 | * needs to be notified whenever a block is freed. This is needed to allow | |
100 | * the scanning code to remove these I/Os from the issuing queue. Additionally, | |
101 | * we do not attempt to queue gang blocks to be issued sequentially since this | |
13a2ff27 | 102 | * is very hard to do and would have an extremely limited performance benefit. |
d4a72f23 TC |
103 | * Instead, we simply issue gang I/Os as soon as we find them using the legacy |
104 | * algorithm. | |
105 | * | |
106 | * Backwards compatibility | |
107 | * | |
108 | * This new algorithm is backwards compatible with the legacy on-disk data | |
109 | * structures (and therefore does not require a new feature flag). | |
110 | * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan | |
111 | * will stop scanning metadata (in logical order) and wait for all outstanding | |
112 | * sorted I/O to complete. Once this is done, we write out a checkpoint | |
113 | * bookmark, indicating that we have scanned everything logically before it. | |
114 | * If the pool is imported on a machine without the new sorting algorithm, | |
115 | * the scan simply resumes from the last checkpoint using the legacy algorithm. | |
116 | */ | |
117 | ||
5dbd68a3 MA |
118 | typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, |
119 | const zbookmark_phys_t *); | |
428870ff | 120 | |
428870ff | 121 | static scan_cb_t dsl_scan_scrub_cb; |
428870ff | 122 | |
d4a72f23 TC |
123 | static int scan_ds_queue_compare(const void *a, const void *b); |
124 | static int scan_prefetch_queue_compare(const void *a, const void *b); | |
125 | static void scan_ds_queue_clear(dsl_scan_t *scn); | |
d6496040 | 126 | static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn); |
d4a72f23 TC |
127 | static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, |
128 | uint64_t *txg); | |
129 | static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); | |
130 | static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); | |
131 | static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); | |
c0aea7cf | 132 | static uint64_t dsl_scan_count_data_disks(spa_t *spa); |
482eeef8 | 133 | static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb); |
d4a72f23 | 134 | |
fdc2d303 | 135 | extern uint_t zfs_vdev_async_write_active_min_dirty_percent; |
82732299 | 136 | static int zfs_scan_blkstats = 0; |
d4a72f23 | 137 | |
c85ac731 BB |
138 | /* |
139 | * 'zpool status' uses bytes processed per pass to report throughput and | |
140 | * estimate time remaining. We define a pass to start when the scanning | |
141 | * phase completes for a sequential resilver. Optionally, this value | |
142 | * may be used to reset the pass statistics every N txgs to provide an | |
143 | * estimated completion time based on currently observed performance. | |
144 | */ | |
145 | static uint_t zfs_scan_report_txgs = 0; | |
146 | ||
d4a72f23 TC |
147 | /* |
148 | * By default zfs will check to ensure it is not over the hard memory | |
149 | * limit before each txg. If finer-grained control of this is needed | |
150 | * this value can be set to 1 to enable checking before scanning each | |
151 | * block. | |
152 | */ | |
18168da7 | 153 | static int zfs_scan_strict_mem_lim = B_FALSE; |
d4a72f23 TC |
154 | |
155 | /* | |
156 | * Maximum number of parallelly executed bytes per leaf vdev. We attempt | |
157 | * to strike a balance here between keeping the vdev queues full of I/Os | |
158 | * at all times and not overflowing the queues to cause long latency, | |
159 | * which would cause long txg sync times. No matter what, we will not | |
160 | * overload the drives with I/O, since that is protected by | |
161 | * zfs_vdev_scrub_max_active. | |
162 | */ | |
c0aea7cf | 163 | static uint64_t zfs_scan_vdev_limit = 16 << 20; |
d4a72f23 | 164 | |
fdc2d303 RY |
165 | static uint_t zfs_scan_issue_strategy = 0; |
166 | ||
167 | /* don't queue & sort zios, go direct */ | |
168 | static int zfs_scan_legacy = B_FALSE; | |
ab8d9c17 | 169 | static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ |
d4a72f23 TC |
170 | |
171 | /* | |
172 | * fill_weight is non-tunable at runtime, so we copy it at module init from | |
173 | * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would | |
174 | * break queue sorting. | |
175 | */ | |
fdc2d303 | 176 | static uint_t zfs_scan_fill_weight = 3; |
d4a72f23 TC |
177 | static uint64_t fill_weight; |
178 | ||
179 | /* See dsl_scan_should_clear() for details on the memory limit tunables */ | |
18168da7 AZ |
180 | static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ |
181 | static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ | |
fdc2d303 RY |
182 | |
183 | ||
184 | /* fraction of physmem */ | |
185 | static uint_t zfs_scan_mem_lim_fact = 20; | |
186 | ||
187 | /* fraction of mem lim above */ | |
188 | static uint_t zfs_scan_mem_lim_soft_fact = 20; | |
189 | ||
190 | /* minimum milliseconds to scrub per txg */ | |
191 | static uint_t zfs_scrub_min_time_ms = 1000; | |
192 | ||
193 | /* minimum milliseconds to obsolete per txg */ | |
194 | static uint_t zfs_obsolete_min_time_ms = 500; | |
195 | ||
196 | /* minimum milliseconds to free per txg */ | |
197 | static uint_t zfs_free_min_time_ms = 1000; | |
198 | ||
199 | /* minimum milliseconds to resilver per txg */ | |
200 | static uint_t zfs_resilver_min_time_ms = 3000; | |
201 | ||
202 | static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */ | |
cef48f14 | 203 | int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ |
18168da7 AZ |
204 | static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ |
205 | static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ | |
206 | static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; | |
36283ca2 | 207 | /* max number of blocks to free in a single TXG */ |
ab8d9c17 | 208 | static uint64_t zfs_async_block_max_blocks = UINT64_MAX; |
4fe3a842 | 209 | /* max number of dedup blocks to free in a single TXG */ |
ab8d9c17 | 210 | static uint64_t zfs_max_async_dedup_frees = 100000; |
428870ff | 211 | |
18168da7 AZ |
212 | /* set to disable resilver deferring */ |
213 | static int zfs_resilver_disable_defer = B_FALSE; | |
80a91e74 | 214 | |
d4a72f23 TC |
215 | /* |
216 | * We wait a few txgs after importing a pool to begin scanning so that | |
217 | * the import / mounting code isn't held up by scrub / resilver IO. | |
218 | * Unfortunately, it is a bit difficult to determine exactly how long | |
219 | * this will take since userspace will trigger fs mounts asynchronously | |
220 | * and the kernel will create zvol minors asynchronously. As a result, | |
221 | * the value provided here is a bit arbitrary, but represents a | |
222 | * reasonable estimate of how many txgs it will take to finish fully | |
223 | * importing a pool | |
224 | */ | |
225 | #define SCAN_IMPORT_WAIT_TXGS 5 | |
226 | ||
428870ff BB |
227 | #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ |
228 | ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ | |
229 | (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) | |
230 | ||
ba5ad9a4 GW |
231 | /* |
232 | * Enable/disable the processing of the free_bpobj object. | |
233 | */ | |
18168da7 | 234 | static int zfs_free_bpobj_enabled = 1; |
ba5ad9a4 | 235 | |
482eeef8 GA |
236 | /* Error blocks to be scrubbed in one txg. */ |
237 | unsigned long zfs_scrub_error_blocks_per_txg = 1 << 12; | |
238 | ||
428870ff BB |
239 | /* the order has to match pool_scan_type */ |
240 | static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { | |
241 | NULL, | |
242 | dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ | |
243 | dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ | |
244 | }; | |
245 | ||
d4a72f23 TC |
246 | /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ |
247 | typedef struct { | |
248 | uint64_t sds_dsobj; | |
249 | uint64_t sds_txg; | |
250 | avl_node_t sds_node; | |
251 | } scan_ds_t; | |
252 | ||
253 | /* | |
254 | * This controls what conditions are placed on dsl_scan_sync_state(): | |
1c0c729a AM |
255 | * SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0 |
256 | * SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0. | |
257 | * SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise | |
d4a72f23 TC |
258 | * write out the scn_phys_cached version. |
259 | * See dsl_scan_sync_state for details. | |
260 | */ | |
261 | typedef enum { | |
262 | SYNC_OPTIONAL, | |
263 | SYNC_MANDATORY, | |
264 | SYNC_CACHED | |
265 | } state_sync_type_t; | |
266 | ||
267 | /* | |
268 | * This struct represents the minimum information needed to reconstruct a | |
269 | * zio for sequential scanning. This is useful because many of these will | |
270 | * accumulate in the sequential IO queues before being issued, so saving | |
271 | * memory matters here. | |
272 | */ | |
273 | typedef struct scan_io { | |
274 | /* fields from blkptr_t */ | |
d4a72f23 TC |
275 | uint64_t sio_blk_prop; |
276 | uint64_t sio_phys_birth; | |
277 | uint64_t sio_birth; | |
278 | zio_cksum_t sio_cksum; | |
ab7615d9 | 279 | uint32_t sio_nr_dvas; |
d4a72f23 TC |
280 | |
281 | /* fields from zio_t */ | |
ab7615d9 | 282 | uint32_t sio_flags; |
d4a72f23 TC |
283 | zbookmark_phys_t sio_zb; |
284 | ||
285 | /* members for queue sorting */ | |
286 | union { | |
ab7615d9 | 287 | avl_node_t sio_addr_node; /* link into issuing queue */ |
d4a72f23 TC |
288 | list_node_t sio_list_node; /* link for issuing to disk */ |
289 | } sio_nodes; | |
ab7615d9 TC |
290 | |
291 | /* | |
292 | * There may be up to SPA_DVAS_PER_BP DVAs here from the bp, | |
293 | * depending on how many were in the original bp. Only the | |
294 | * first DVA is really used for sorting and issuing purposes. | |
295 | * The other DVAs (if provided) simply exist so that the zio | |
296 | * layer can find additional copies to repair from in the | |
297 | * event of an error. This array must go at the end of the | |
298 | * struct to allow this for the variable number of elements. | |
299 | */ | |
8e7ebf4e | 300 | dva_t sio_dva[]; |
d4a72f23 TC |
301 | } scan_io_t; |
302 | ||
ab7615d9 TC |
303 | #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x) |
304 | #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x) | |
305 | #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0]) | |
306 | #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0]) | |
307 | #define SIO_GET_END_OFFSET(sio) \ | |
308 | (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio)) | |
309 | #define SIO_GET_MUSED(sio) \ | |
310 | (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t))) | |
311 | ||
d4a72f23 TC |
312 | struct dsl_scan_io_queue { |
313 | dsl_scan_t *q_scn; /* associated dsl_scan_t */ | |
314 | vdev_t *q_vd; /* top-level vdev that this queue represents */ | |
dd867145 | 315 | zio_t *q_zio; /* scn_zio_root child for waiting on IO */ |
d4a72f23 TC |
316 | |
317 | /* trees used for sorting I/Os and extents of I/Os */ | |
318 | range_tree_t *q_exts_by_addr; | |
1c0c729a | 319 | zfs_btree_t q_exts_by_size; |
d4a72f23 | 320 | avl_tree_t q_sios_by_addr; |
ab7615d9 | 321 | uint64_t q_sio_memused; |
1c0c729a | 322 | uint64_t q_last_ext_addr; |
d4a72f23 TC |
323 | |
324 | /* members for zio rate limiting */ | |
325 | uint64_t q_maxinflight_bytes; | |
326 | uint64_t q_inflight_bytes; | |
327 | kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ | |
328 | ||
329 | /* per txg statistics */ | |
330 | uint64_t q_total_seg_size_this_txg; | |
331 | uint64_t q_segs_this_txg; | |
332 | uint64_t q_total_zio_size_this_txg; | |
333 | uint64_t q_zios_this_txg; | |
334 | }; | |
335 | ||
336 | /* private data for dsl_scan_prefetch_cb() */ | |
337 | typedef struct scan_prefetch_ctx { | |
c13060e4 | 338 | zfs_refcount_t spc_refcnt; /* refcount for memory management */ |
d4a72f23 TC |
339 | dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ |
340 | boolean_t spc_root; /* is this prefetch for an objset? */ | |
341 | uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ | |
342 | uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ | |
343 | } scan_prefetch_ctx_t; | |
344 | ||
345 | /* private data for dsl_scan_prefetch() */ | |
346 | typedef struct scan_prefetch_issue_ctx { | |
347 | avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ | |
348 | scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ | |
349 | blkptr_t spic_bp; /* bp to prefetch */ | |
350 | zbookmark_phys_t spic_zb; /* bookmark to prefetch */ | |
351 | } scan_prefetch_issue_ctx_t; | |
352 | ||
353 | static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
354 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); | |
355 | static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, | |
356 | scan_io_t *sio); | |
357 | ||
358 | static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); | |
359 | static void scan_io_queues_destroy(dsl_scan_t *scn); | |
360 | ||
ab7615d9 TC |
361 | static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP]; |
362 | ||
363 | /* sio->sio_nr_dvas must be set so we know which cache to free from */ | |
364 | static void | |
365 | sio_free(scan_io_t *sio) | |
366 | { | |
367 | ASSERT3U(sio->sio_nr_dvas, >, 0); | |
368 | ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); | |
369 | ||
370 | kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio); | |
371 | } | |
372 | ||
373 | /* It is up to the caller to set sio->sio_nr_dvas for freeing */ | |
374 | static scan_io_t * | |
375 | sio_alloc(unsigned short nr_dvas) | |
376 | { | |
377 | ASSERT3U(nr_dvas, >, 0); | |
378 | ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP); | |
379 | ||
380 | return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP)); | |
381 | } | |
d4a72f23 TC |
382 | |
383 | void | |
384 | scan_init(void) | |
385 | { | |
386 | /* | |
387 | * This is used in ext_size_compare() to weight segments | |
388 | * based on how sparse they are. This cannot be changed | |
389 | * mid-scan and the tree comparison functions don't currently | |
13a2ff27 | 390 | * have a mechanism for passing additional context to the |
d4a72f23 | 391 | * compare functions. Thus we store this value globally and |
13a2ff27 | 392 | * we only allow it to be set at module initialization time |
d4a72f23 TC |
393 | */ |
394 | fill_weight = zfs_scan_fill_weight; | |
395 | ||
ab7615d9 TC |
396 | for (int i = 0; i < SPA_DVAS_PER_BP; i++) { |
397 | char name[36]; | |
398 | ||
c9e319fa | 399 | (void) snprintf(name, sizeof (name), "sio_cache_%d", i); |
ab7615d9 TC |
400 | sio_cache[i] = kmem_cache_create(name, |
401 | (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))), | |
402 | 0, NULL, NULL, NULL, NULL, NULL, 0); | |
403 | } | |
d4a72f23 TC |
404 | } |
405 | ||
406 | void | |
407 | scan_fini(void) | |
408 | { | |
ab7615d9 TC |
409 | for (int i = 0; i < SPA_DVAS_PER_BP; i++) { |
410 | kmem_cache_destroy(sio_cache[i]); | |
411 | } | |
d4a72f23 TC |
412 | } |
413 | ||
414 | static inline boolean_t | |
415 | dsl_scan_is_running(const dsl_scan_t *scn) | |
416 | { | |
417 | return (scn->scn_phys.scn_state == DSS_SCANNING); | |
418 | } | |
419 | ||
420 | boolean_t | |
421 | dsl_scan_resilvering(dsl_pool_t *dp) | |
422 | { | |
423 | return (dsl_scan_is_running(dp->dp_scan) && | |
424 | dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); | |
425 | } | |
426 | ||
427 | static inline void | |
ab7615d9 | 428 | sio2bp(const scan_io_t *sio, blkptr_t *bp) |
d4a72f23 | 429 | { |
861166b0 | 430 | memset(bp, 0, sizeof (*bp)); |
d4a72f23 TC |
431 | bp->blk_prop = sio->sio_blk_prop; |
432 | bp->blk_phys_birth = sio->sio_phys_birth; | |
433 | bp->blk_birth = sio->sio_birth; | |
434 | bp->blk_fill = 1; /* we always only work with data pointers */ | |
435 | bp->blk_cksum = sio->sio_cksum; | |
ab7615d9 TC |
436 | |
437 | ASSERT3U(sio->sio_nr_dvas, >, 0); | |
438 | ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); | |
439 | ||
861166b0 | 440 | memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t)); |
d4a72f23 TC |
441 | } |
442 | ||
443 | static inline void | |
444 | bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) | |
445 | { | |
d4a72f23 TC |
446 | sio->sio_blk_prop = bp->blk_prop; |
447 | sio->sio_phys_birth = bp->blk_phys_birth; | |
448 | sio->sio_birth = bp->blk_birth; | |
449 | sio->sio_cksum = bp->blk_cksum; | |
ab7615d9 TC |
450 | sio->sio_nr_dvas = BP_GET_NDVAS(bp); |
451 | ||
452 | /* | |
453 | * Copy the DVAs to the sio. We need all copies of the block so | |
454 | * that the self healing code can use the alternate copies if the | |
455 | * first is corrupted. We want the DVA at index dva_i to be first | |
456 | * in the sio since this is the primary one that we want to issue. | |
457 | */ | |
458 | for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) { | |
459 | sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas]; | |
460 | } | |
d4a72f23 TC |
461 | } |
462 | ||
428870ff BB |
463 | int |
464 | dsl_scan_init(dsl_pool_t *dp, uint64_t txg) | |
465 | { | |
466 | int err; | |
467 | dsl_scan_t *scn; | |
468 | spa_t *spa = dp->dp_spa; | |
469 | uint64_t f; | |
470 | ||
471 | scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); | |
472 | scn->scn_dp = dp; | |
473 | ||
2696dfaf GW |
474 | /* |
475 | * It's possible that we're resuming a scan after a reboot so | |
476 | * make sure that the scan_async_destroying flag is initialized | |
477 | * appropriately. | |
478 | */ | |
479 | ASSERT(!scn->scn_async_destroying); | |
480 | scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, | |
fa86b5db | 481 | SPA_FEATURE_ASYNC_DESTROY); |
2696dfaf | 482 | |
f90a30ad BB |
483 | /* |
484 | * Calculate the max number of in-flight bytes for pool-wide | |
c0aea7cf BB |
485 | * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). |
486 | * Limits for the issuing phase are done per top-level vdev and | |
487 | * are handled separately. | |
f90a30ad | 488 | */ |
c0aea7cf BB |
489 | scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, |
490 | zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); | |
f90a30ad | 491 | |
d4a72f23 TC |
492 | avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), |
493 | offsetof(scan_ds_t, sds_node)); | |
494 | avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, | |
495 | sizeof (scan_prefetch_issue_ctx_t), | |
496 | offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); | |
497 | ||
428870ff BB |
498 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
499 | "scrub_func", sizeof (uint64_t), 1, &f); | |
500 | if (err == 0) { | |
501 | /* | |
502 | * There was an old-style scrub in progress. Restart a | |
503 | * new-style scrub from the beginning. | |
504 | */ | |
505 | scn->scn_restart_txg = txg; | |
6f57f1e3 | 506 | zfs_dbgmsg("old-style scrub was in progress for %s; " |
428870ff | 507 | "restarting new-style scrub in txg %llu", |
6f57f1e3 | 508 | spa->spa_name, |
d4a72f23 | 509 | (longlong_t)scn->scn_restart_txg); |
428870ff BB |
510 | |
511 | /* | |
512 | * Load the queue obj from the old location so that it | |
513 | * can be freed by dsl_scan_done(). | |
514 | */ | |
515 | (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
516 | "scrub_queue", sizeof (uint64_t), 1, | |
517 | &scn->scn_phys.scn_queue_obj); | |
518 | } else { | |
482eeef8 GA |
519 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
520 | DMU_POOL_ERRORSCRUB, sizeof (uint64_t), | |
521 | ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys); | |
522 | ||
523 | if (err != 0 && err != ENOENT) | |
524 | return (err); | |
525 | ||
428870ff BB |
526 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
527 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
528 | &scn->scn_phys); | |
482eeef8 | 529 | |
4f2dcb3e RY |
530 | /* |
531 | * Detect if the pool contains the signature of #2094. If it | |
532 | * does properly update the scn->scn_phys structure and notify | |
533 | * the administrator by setting an errata for the pool. | |
534 | */ | |
535 | if (err == EOVERFLOW) { | |
536 | uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; | |
537 | VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); | |
538 | VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, | |
539 | (23 * sizeof (uint64_t))); | |
540 | ||
541 | err = zap_lookup(dp->dp_meta_objset, | |
542 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, | |
543 | sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); | |
544 | if (err == 0) { | |
545 | uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; | |
546 | ||
547 | if (overflow & ~DSL_SCAN_FLAGS_MASK || | |
548 | scn->scn_async_destroying) { | |
549 | spa->spa_errata = | |
550 | ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; | |
d4a72f23 | 551 | return (EOVERFLOW); |
4f2dcb3e RY |
552 | } |
553 | ||
861166b0 | 554 | memcpy(&scn->scn_phys, zaptmp, |
4f2dcb3e RY |
555 | SCAN_PHYS_NUMINTS * sizeof (uint64_t)); |
556 | scn->scn_phys.scn_flags = overflow; | |
557 | ||
558 | /* Required scrub already in progress. */ | |
559 | if (scn->scn_phys.scn_state == DSS_FINISHED || | |
560 | scn->scn_phys.scn_state == DSS_CANCELED) | |
561 | spa->spa_errata = | |
562 | ZPOOL_ERRATA_ZOL_2094_SCRUB; | |
563 | } | |
564 | } | |
565 | ||
428870ff BB |
566 | if (err == ENOENT) |
567 | return (0); | |
568 | else if (err) | |
569 | return (err); | |
570 | ||
d4a72f23 TC |
571 | /* |
572 | * We might be restarting after a reboot, so jump the issued | |
573 | * counter to how far we've scanned. We know we're consistent | |
574 | * up to here. | |
575 | */ | |
576 | scn->scn_issued_before_pass = scn->scn_phys.scn_examined; | |
577 | ||
578 | if (dsl_scan_is_running(scn) && | |
428870ff BB |
579 | spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { |
580 | /* | |
581 | * A new-type scrub was in progress on an old | |
582 | * pool, and the pool was accessed by old | |
583 | * software. Restart from the beginning, since | |
584 | * the old software may have changed the pool in | |
585 | * the meantime. | |
586 | */ | |
587 | scn->scn_restart_txg = txg; | |
6f57f1e3 | 588 | zfs_dbgmsg("new-style scrub for %s was modified " |
428870ff | 589 | "by old software; restarting in txg %llu", |
6f57f1e3 | 590 | spa->spa_name, |
d4a72f23 | 591 | (longlong_t)scn->scn_restart_txg); |
41035a04 JP |
592 | } else if (dsl_scan_resilvering(dp)) { |
593 | /* | |
594 | * If a resilver is in progress and there are already | |
595 | * errors, restart it instead of finishing this scan and | |
596 | * then restarting it. If there haven't been any errors | |
597 | * then remember that the incore DTL is valid. | |
598 | */ | |
599 | if (scn->scn_phys.scn_errors > 0) { | |
600 | scn->scn_restart_txg = txg; | |
601 | zfs_dbgmsg("resilver can't excise DTL_MISSING " | |
6f57f1e3 RE |
602 | "when finished; restarting on %s in txg " |
603 | "%llu", | |
604 | spa->spa_name, | |
41035a04 JP |
605 | (u_longlong_t)scn->scn_restart_txg); |
606 | } else { | |
607 | /* it's safe to excise DTL when finished */ | |
608 | spa->spa_scrub_started = B_TRUE; | |
609 | } | |
d4a72f23 TC |
610 | } |
611 | } | |
612 | ||
861166b0 | 613 | memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); |
8cb119e3 | 614 | |
d4a72f23 TC |
615 | /* reload the queue into the in-core state */ |
616 | if (scn->scn_phys.scn_queue_obj != 0) { | |
617 | zap_cursor_t zc; | |
618 | zap_attribute_t za; | |
619 | ||
620 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
621 | scn->scn_phys.scn_queue_obj); | |
622 | zap_cursor_retrieve(&zc, &za) == 0; | |
623 | (void) zap_cursor_advance(&zc)) { | |
624 | scan_ds_queue_insert(scn, | |
625 | zfs_strtonum(za.za_name, NULL), | |
626 | za.za_first_integer); | |
428870ff | 627 | } |
d4a72f23 | 628 | zap_cursor_fini(&zc); |
428870ff BB |
629 | } |
630 | ||
631 | spa_scan_stat_init(spa); | |
c85ac731 BB |
632 | vdev_scan_stat_init(spa->spa_root_vdev); |
633 | ||
428870ff BB |
634 | return (0); |
635 | } | |
636 | ||
637 | void | |
638 | dsl_scan_fini(dsl_pool_t *dp) | |
639 | { | |
d4a72f23 TC |
640 | if (dp->dp_scan != NULL) { |
641 | dsl_scan_t *scn = dp->dp_scan; | |
642 | ||
643 | if (scn->scn_taskq != NULL) | |
644 | taskq_destroy(scn->scn_taskq); | |
d6496040 | 645 | |
d4a72f23 TC |
646 | scan_ds_queue_clear(scn); |
647 | avl_destroy(&scn->scn_queue); | |
d6496040 | 648 | scan_ds_prefetch_queue_clear(scn); |
d4a72f23 TC |
649 | avl_destroy(&scn->scn_prefetch_queue); |
650 | ||
428870ff BB |
651 | kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); |
652 | dp->dp_scan = NULL; | |
653 | } | |
654 | } | |
655 | ||
d4a72f23 TC |
656 | static boolean_t |
657 | dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) | |
658 | { | |
659 | return (scn->scn_restart_txg != 0 && | |
660 | scn->scn_restart_txg <= tx->tx_txg); | |
661 | } | |
662 | ||
3c819a2c JP |
663 | boolean_t |
664 | dsl_scan_resilver_scheduled(dsl_pool_t *dp) | |
665 | { | |
666 | return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) || | |
667 | (spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER)); | |
668 | } | |
669 | ||
d4a72f23 TC |
670 | boolean_t |
671 | dsl_scan_scrubbing(const dsl_pool_t *dp) | |
672 | { | |
673 | dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; | |
674 | ||
675 | return (scn_phys->scn_state == DSS_SCANNING && | |
676 | scn_phys->scn_func == POOL_SCAN_SCRUB); | |
677 | } | |
678 | ||
482eeef8 GA |
679 | boolean_t |
680 | dsl_errorscrubbing(const dsl_pool_t *dp) | |
681 | { | |
682 | dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys; | |
683 | ||
684 | return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING && | |
685 | errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB); | |
686 | } | |
687 | ||
688 | boolean_t | |
689 | dsl_errorscrub_is_paused(const dsl_scan_t *scn) | |
690 | { | |
691 | return (dsl_errorscrubbing(scn->scn_dp) && | |
692 | scn->errorscrub_phys.dep_paused_flags); | |
693 | } | |
694 | ||
d4a72f23 TC |
695 | boolean_t |
696 | dsl_scan_is_paused_scrub(const dsl_scan_t *scn) | |
697 | { | |
698 | return (dsl_scan_scrubbing(scn->scn_dp) && | |
699 | scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); | |
700 | } | |
701 | ||
482eeef8 GA |
702 | static void |
703 | dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) | |
704 | { | |
705 | scn->errorscrub_phys.dep_cursor = | |
706 | zap_cursor_serialize(&scn->errorscrub_cursor); | |
707 | ||
708 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
709 | DMU_POOL_DIRECTORY_OBJECT, | |
710 | DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS, | |
711 | &scn->errorscrub_phys, tx)); | |
712 | } | |
713 | ||
714 | static void | |
715 | dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx) | |
716 | { | |
717 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
718 | pool_scan_func_t *funcp = arg; | |
719 | dsl_pool_t *dp = scn->scn_dp; | |
720 | spa_t *spa = dp->dp_spa; | |
721 | ||
722 | ASSERT(!dsl_scan_is_running(scn)); | |
723 | ASSERT(!dsl_errorscrubbing(scn->scn_dp)); | |
724 | ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); | |
725 | ||
726 | memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys)); | |
727 | scn->errorscrub_phys.dep_func = *funcp; | |
728 | scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING; | |
729 | scn->errorscrub_phys.dep_start_time = gethrestime_sec(); | |
730 | scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa); | |
731 | scn->errorscrub_phys.dep_examined = 0; | |
732 | scn->errorscrub_phys.dep_errors = 0; | |
733 | scn->errorscrub_phys.dep_cursor = 0; | |
734 | zap_cursor_init_serialized(&scn->errorscrub_cursor, | |
735 | spa->spa_meta_objset, spa->spa_errlog_last, | |
736 | scn->errorscrub_phys.dep_cursor); | |
737 | ||
738 | vdev_config_dirty(spa->spa_root_vdev); | |
739 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START); | |
740 | ||
741 | dsl_errorscrub_sync_state(scn, tx); | |
742 | ||
743 | spa_history_log_internal(spa, "error scrub setup", tx, | |
744 | "func=%u mintxg=%u maxtxg=%llu", | |
745 | *funcp, 0, (u_longlong_t)tx->tx_txg); | |
746 | } | |
747 | ||
748 | static int | |
749 | dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx) | |
750 | { | |
751 | (void) arg; | |
752 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
753 | ||
754 | if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) { | |
755 | return (SET_ERROR(EBUSY)); | |
756 | } | |
757 | ||
758 | if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) { | |
759 | return (ECANCELED); | |
760 | } | |
761 | return (0); | |
762 | } | |
763 | ||
d4a72f23 TC |
764 | /* |
765 | * Writes out a persistent dsl_scan_phys_t record to the pool directory. | |
766 | * Because we can be running in the block sorting algorithm, we do not always | |
767 | * want to write out the record, only when it is "safe" to do so. This safety | |
768 | * condition is achieved by making sure that the sorting queues are empty | |
1c0c729a | 769 | * (scn_queues_pending == 0). When this condition is not true, the sync'd state |
d4a72f23 TC |
770 | * is inconsistent with how much actual scanning progress has been made. The |
771 | * kind of sync to be performed is specified by the sync_type argument. If the | |
772 | * sync is optional, we only sync if the queues are empty. If the sync is | |
773 | * mandatory, we do a hard ASSERT to make sure that the queues are empty. The | |
774 | * third possible state is a "cached" sync. This is done in response to: | |
775 | * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
776 | * destroyed, so we wouldn't be able to restart scanning from it. | |
777 | * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been | |
778 | * superseded by a newer snapshot. | |
779 | * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
780 | * swapped with its clone. | |
781 | * In all cases, a cached sync simply rewrites the last record we've written, | |
782 | * just slightly modified. For the modifications that are performed to the | |
783 | * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, | |
784 | * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. | |
785 | */ | |
786 | static void | |
787 | dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) | |
788 | { | |
789 | int i; | |
790 | spa_t *spa = scn->scn_dp->dp_spa; | |
791 | ||
1c0c729a AM |
792 | ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0); |
793 | if (scn->scn_queues_pending == 0) { | |
d4a72f23 TC |
794 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { |
795 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
796 | dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; | |
797 | ||
798 | if (q == NULL) | |
799 | continue; | |
800 | ||
801 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
802 | ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); | |
ca577779 PD |
803 | ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==, |
804 | NULL); | |
d4a72f23 TC |
805 | ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); |
806 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
807 | } | |
808 | ||
809 | if (scn->scn_phys.scn_queue_obj != 0) | |
810 | scan_ds_queue_sync(scn, tx); | |
811 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
812 | DMU_POOL_DIRECTORY_OBJECT, | |
813 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
814 | &scn->scn_phys, tx)); | |
861166b0 | 815 | memcpy(&scn->scn_phys_cached, &scn->scn_phys, |
d4a72f23 TC |
816 | sizeof (scn->scn_phys)); |
817 | ||
818 | if (scn->scn_checkpointing) | |
6f57f1e3 RE |
819 | zfs_dbgmsg("finish scan checkpoint for %s", |
820 | spa->spa_name); | |
d4a72f23 TC |
821 | |
822 | scn->scn_checkpointing = B_FALSE; | |
823 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
824 | } else if (sync_type == SYNC_CACHED) { | |
825 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
826 | DMU_POOL_DIRECTORY_OBJECT, | |
827 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
828 | &scn->scn_phys_cached, tx)); | |
829 | } | |
830 | } | |
831 | ||
600a1dc5 | 832 | int |
13fe0198 | 833 | dsl_scan_setup_check(void *arg, dmu_tx_t *tx) |
428870ff | 834 | { |
14e4e3cb | 835 | (void) arg; |
13fe0198 | 836 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
9a49d3f3 | 837 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
428870ff | 838 | |
482eeef8 GA |
839 | if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) || |
840 | dsl_errorscrubbing(scn->scn_dp)) | |
2e528b49 | 841 | return (SET_ERROR(EBUSY)); |
428870ff BB |
842 | |
843 | return (0); | |
844 | } | |
845 | ||
b2255edc | 846 | void |
13fe0198 | 847 | dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) |
428870ff | 848 | { |
482eeef8 | 849 | (void) arg; |
13fe0198 MA |
850 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
851 | pool_scan_func_t *funcp = arg; | |
428870ff BB |
852 | dmu_object_type_t ot = 0; |
853 | dsl_pool_t *dp = scn->scn_dp; | |
854 | spa_t *spa = dp->dp_spa; | |
855 | ||
d4a72f23 | 856 | ASSERT(!dsl_scan_is_running(scn)); |
428870ff | 857 | ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); |
861166b0 | 858 | memset(&scn->scn_phys, 0, sizeof (scn->scn_phys)); |
482eeef8 GA |
859 | |
860 | /* | |
861 | * If we are starting a fresh scrub, we erase the error scrub | |
862 | * information from disk. | |
863 | */ | |
864 | memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys)); | |
865 | dsl_errorscrub_sync_state(scn, tx); | |
866 | ||
428870ff BB |
867 | scn->scn_phys.scn_func = *funcp; |
868 | scn->scn_phys.scn_state = DSS_SCANNING; | |
869 | scn->scn_phys.scn_min_txg = 0; | |
870 | scn->scn_phys.scn_max_txg = tx->tx_txg; | |
871 | scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ | |
872 | scn->scn_phys.scn_start_time = gethrestime_sec(); | |
873 | scn->scn_phys.scn_errors = 0; | |
874 | scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; | |
d4a72f23 | 875 | scn->scn_issued_before_pass = 0; |
428870ff | 876 | scn->scn_restart_txg = 0; |
5d1f7fb6 | 877 | scn->scn_done_txg = 0; |
d4a72f23 TC |
878 | scn->scn_last_checkpoint = 0; |
879 | scn->scn_checkpointing = B_FALSE; | |
428870ff | 880 | spa_scan_stat_init(spa); |
c85ac731 | 881 | vdev_scan_stat_init(spa->spa_root_vdev); |
428870ff BB |
882 | |
883 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
884 | scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; | |
885 | ||
886 | /* rewrite all disk labels */ | |
887 | vdev_config_dirty(spa->spa_root_vdev); | |
888 | ||
889 | if (vdev_resilver_needed(spa->spa_root_vdev, | |
890 | &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { | |
9a49d3f3 BB |
891 | nvlist_t *aux = fnvlist_alloc(); |
892 | fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, | |
893 | "healing"); | |
894 | spa_event_notify(spa, NULL, aux, | |
12fa0466 | 895 | ESC_ZFS_RESILVER_START); |
9a49d3f3 | 896 | nvlist_free(aux); |
428870ff | 897 | } else { |
12fa0466 | 898 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); |
428870ff BB |
899 | } |
900 | ||
901 | spa->spa_scrub_started = B_TRUE; | |
902 | /* | |
903 | * If this is an incremental scrub, limit the DDT scrub phase | |
904 | * to just the auto-ditto class (for correctness); the rest | |
905 | * of the scrub should go faster using top-down pruning. | |
906 | */ | |
907 | if (scn->scn_phys.scn_min_txg > TXG_INITIAL) | |
908 | scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; | |
909 | ||
9a49d3f3 BB |
910 | /* |
911 | * When starting a resilver clear any existing rebuild state. | |
912 | * This is required to prevent stale rebuild status from | |
913 | * being reported when a rebuild is run, then a resilver and | |
914 | * finally a scrub. In which case only the scrub status | |
915 | * should be reported by 'zpool status'. | |
916 | */ | |
917 | if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) { | |
918 | vdev_t *rvd = spa->spa_root_vdev; | |
919 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
920 | vdev_t *vd = rvd->vdev_child[i]; | |
921 | vdev_rebuild_clear_sync( | |
922 | (void *)(uintptr_t)vd->vdev_id, tx); | |
923 | } | |
924 | } | |
428870ff BB |
925 | } |
926 | ||
927 | /* back to the generic stuff */ | |
928 | ||
82732299 AM |
929 | if (zfs_scan_blkstats) { |
930 | if (dp->dp_blkstats == NULL) { | |
931 | dp->dp_blkstats = | |
932 | vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); | |
933 | } | |
934 | memset(&dp->dp_blkstats->zab_type, 0, | |
935 | sizeof (dp->dp_blkstats->zab_type)); | |
936 | } else { | |
937 | if (dp->dp_blkstats) { | |
938 | vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); | |
939 | dp->dp_blkstats = NULL; | |
940 | } | |
428870ff | 941 | } |
428870ff BB |
942 | |
943 | if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) | |
944 | ot = DMU_OT_ZAP_OTHER; | |
945 | ||
946 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, | |
947 | ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); | |
948 | ||
861166b0 | 949 | memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); |
d4a72f23 TC |
950 | |
951 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); | |
428870ff | 952 | |
6f1ffb06 | 953 | spa_history_log_internal(spa, "scan setup", tx, |
428870ff | 954 | "func=%u mintxg=%llu maxtxg=%llu", |
74756182 MM |
955 | *funcp, (u_longlong_t)scn->scn_phys.scn_min_txg, |
956 | (u_longlong_t)scn->scn_phys.scn_max_txg); | |
428870ff BB |
957 | } |
958 | ||
d4a72f23 | 959 | /* |
482eeef8 GA |
960 | * Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub, |
961 | * error scrub or resilver. Can also be called to resume a paused scrub or | |
962 | * error scrub. | |
d4a72f23 TC |
963 | */ |
964 | int | |
965 | dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) | |
966 | { | |
967 | spa_t *spa = dp->dp_spa; | |
968 | dsl_scan_t *scn = dp->dp_scan; | |
969 | ||
970 | /* | |
971 | * Purge all vdev caches and probe all devices. We do this here | |
972 | * rather than in sync context because this requires a writer lock | |
973 | * on the spa_config lock, which we can't do from sync context. The | |
974 | * spa_scrub_reopen flag indicates that vdev_open() should not | |
975 | * attempt to start another scrub. | |
976 | */ | |
977 | spa_vdev_state_enter(spa, SCL_NONE); | |
978 | spa->spa_scrub_reopen = B_TRUE; | |
979 | vdev_reopen(spa->spa_root_vdev); | |
980 | spa->spa_scrub_reopen = B_FALSE; | |
981 | (void) spa_vdev_state_exit(spa, NULL, 0); | |
982 | ||
80a91e74 | 983 | if (func == POOL_SCAN_RESILVER) { |
3c819a2c | 984 | dsl_scan_restart_resilver(spa->spa_dsl_pool, 0); |
80a91e74 TC |
985 | return (0); |
986 | } | |
987 | ||
482eeef8 GA |
988 | if (func == POOL_SCAN_ERRORSCRUB) { |
989 | if (dsl_errorscrub_is_paused(dp->dp_scan)) { | |
990 | /* | |
991 | * got error scrub start cmd, resume paused error scrub. | |
992 | */ | |
993 | int err = dsl_scrub_set_pause_resume(scn->scn_dp, | |
994 | POOL_SCRUB_NORMAL); | |
995 | if (err == 0) { | |
996 | spa_event_notify(spa, NULL, NULL, | |
997 | ESC_ZFS_ERRORSCRUB_RESUME); | |
998 | return (ECANCELED); | |
999 | } | |
1000 | return (SET_ERROR(err)); | |
1001 | } | |
1002 | ||
1003 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
1004 | dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync, | |
1005 | &func, 0, ZFS_SPACE_CHECK_RESERVED)); | |
1006 | } | |
1007 | ||
d4a72f23 TC |
1008 | if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { |
1009 | /* got scrub start cmd, resume paused scrub */ | |
1010 | int err = dsl_scrub_set_pause_resume(scn->scn_dp, | |
1011 | POOL_SCRUB_NORMAL); | |
43cb30b3 SEF |
1012 | if (err == 0) { |
1013 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); | |
28caa74b | 1014 | return (SET_ERROR(ECANCELED)); |
43cb30b3 | 1015 | } |
d4a72f23 TC |
1016 | return (SET_ERROR(err)); |
1017 | } | |
1018 | ||
1019 | return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, | |
d2734cce | 1020 | dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); |
d4a72f23 TC |
1021 | } |
1022 | ||
482eeef8 GA |
1023 | static void |
1024 | dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | |
1025 | { | |
1026 | dsl_pool_t *dp = scn->scn_dp; | |
1027 | spa_t *spa = dp->dp_spa; | |
1028 | ||
1029 | if (complete) { | |
1030 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH); | |
1031 | spa_history_log_internal(spa, "error scrub done", tx, | |
1032 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); | |
1033 | } else { | |
1034 | spa_history_log_internal(spa, "error scrub canceled", tx, | |
1035 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); | |
1036 | } | |
1037 | ||
1038 | scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED; | |
1039 | spa->spa_scrub_active = B_FALSE; | |
1040 | spa_errlog_rotate(spa); | |
1041 | scn->errorscrub_phys.dep_end_time = gethrestime_sec(); | |
1042 | zap_cursor_fini(&scn->errorscrub_cursor); | |
1043 | ||
1044 | if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) | |
1045 | spa->spa_errata = 0; | |
1046 | ||
1047 | ASSERT(!dsl_errorscrubbing(scn->scn_dp)); | |
1048 | } | |
1049 | ||
428870ff BB |
1050 | static void |
1051 | dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | |
1052 | { | |
1053 | static const char *old_names[] = { | |
1054 | "scrub_bookmark", | |
1055 | "scrub_ddt_bookmark", | |
1056 | "scrub_ddt_class_max", | |
1057 | "scrub_queue", | |
1058 | "scrub_min_txg", | |
1059 | "scrub_max_txg", | |
1060 | "scrub_func", | |
1061 | "scrub_errors", | |
1062 | NULL | |
1063 | }; | |
1064 | ||
1065 | dsl_pool_t *dp = scn->scn_dp; | |
1066 | spa_t *spa = dp->dp_spa; | |
1067 | int i; | |
1068 | ||
1069 | /* Remove any remnants of an old-style scrub. */ | |
1070 | for (i = 0; old_names[i]; i++) { | |
1071 | (void) zap_remove(dp->dp_meta_objset, | |
1072 | DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); | |
1073 | } | |
1074 | ||
1075 | if (scn->scn_phys.scn_queue_obj != 0) { | |
d4a72f23 | 1076 | VERIFY0(dmu_object_free(dp->dp_meta_objset, |
428870ff BB |
1077 | scn->scn_phys.scn_queue_obj, tx)); |
1078 | scn->scn_phys.scn_queue_obj = 0; | |
1079 | } | |
d4a72f23 | 1080 | scan_ds_queue_clear(scn); |
d6496040 | 1081 | scan_ds_prefetch_queue_clear(scn); |
428870ff | 1082 | |
0ea05c64 AP |
1083 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; |
1084 | ||
428870ff BB |
1085 | /* |
1086 | * If we were "restarted" from a stopped state, don't bother | |
1087 | * with anything else. | |
1088 | */ | |
d4a72f23 TC |
1089 | if (!dsl_scan_is_running(scn)) { |
1090 | ASSERT(!scn->scn_is_sorted); | |
428870ff | 1091 | return; |
d4a72f23 | 1092 | } |
428870ff | 1093 | |
d4a72f23 TC |
1094 | if (scn->scn_is_sorted) { |
1095 | scan_io_queues_destroy(scn); | |
1096 | scn->scn_is_sorted = B_FALSE; | |
1097 | ||
1098 | if (scn->scn_taskq != NULL) { | |
1099 | taskq_destroy(scn->scn_taskq); | |
1100 | scn->scn_taskq = NULL; | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; | |
428870ff | 1105 | |
e60e158e JG |
1106 | spa_notify_waiters(spa); |
1107 | ||
784d15c1 NR |
1108 | if (dsl_scan_restarting(scn, tx)) |
1109 | spa_history_log_internal(spa, "scan aborted, restarting", tx, | |
018f2604 | 1110 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); |
784d15c1 NR |
1111 | else if (!complete) |
1112 | spa_history_log_internal(spa, "scan cancelled", tx, | |
018f2604 | 1113 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); |
784d15c1 NR |
1114 | else |
1115 | spa_history_log_internal(spa, "scan done", tx, | |
018f2604 | 1116 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); |
428870ff BB |
1117 | |
1118 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
428870ff BB |
1119 | spa->spa_scrub_active = B_FALSE; |
1120 | ||
1121 | /* | |
1122 | * If the scrub/resilver completed, update all DTLs to | |
1123 | * reflect this. Whether it succeeded or not, vacate | |
1124 | * all temporary scrub DTLs. | |
d2734cce SD |
1125 | * |
1126 | * As the scrub does not currently support traversing | |
1127 | * data that have been freed but are part of a checkpoint, | |
1128 | * we don't mark the scrub as done in the DTLs as faults | |
1129 | * may still exist in those vdevs. | |
428870ff | 1130 | */ |
d2734cce SD |
1131 | if (complete && |
1132 | !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { | |
1133 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
9a49d3f3 BB |
1134 | scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE); |
1135 | ||
1136 | if (scn->scn_phys.scn_min_txg) { | |
1137 | nvlist_t *aux = fnvlist_alloc(); | |
1138 | fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, | |
1139 | "healing"); | |
1140 | spa_event_notify(spa, NULL, aux, | |
1141 | ESC_ZFS_RESILVER_FINISH); | |
1142 | nvlist_free(aux); | |
1143 | } else { | |
1144 | spa_event_notify(spa, NULL, NULL, | |
1145 | ESC_ZFS_SCRUB_FINISH); | |
1146 | } | |
d2734cce SD |
1147 | } else { |
1148 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
9a49d3f3 | 1149 | 0, B_TRUE, B_FALSE); |
428870ff BB |
1150 | } |
1151 | spa_errlog_rotate(spa); | |
1152 | ||
41035a04 JP |
1153 | /* |
1154 | * Don't clear flag until after vdev_dtl_reassess to ensure that | |
1155 | * DTL_MISSING will get updated when possible. | |
1156 | */ | |
1157 | spa->spa_scrub_started = B_FALSE; | |
1158 | ||
428870ff BB |
1159 | /* |
1160 | * We may have finished replacing a device. | |
1161 | * Let the async thread assess this and handle the detach. | |
1162 | */ | |
1163 | spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); | |
80a91e74 TC |
1164 | |
1165 | /* | |
3c819a2c | 1166 | * Clear any resilver_deferred flags in the config. |
80a91e74 TC |
1167 | * If there are drives that need resilvering, kick |
1168 | * off an asynchronous request to start resilver. | |
3c819a2c | 1169 | * vdev_clear_resilver_deferred() may update the config |
80a91e74 TC |
1170 | * before the resilver can restart. In the event of |
1171 | * a crash during this period, the spa loading code | |
1172 | * will find the drives that need to be resilvered | |
3c819a2c | 1173 | * and start the resilver then. |
80a91e74 | 1174 | */ |
3c819a2c JP |
1175 | if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) && |
1176 | vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) { | |
1177 | spa_history_log_internal(spa, | |
1178 | "starting deferred resilver", tx, "errors=%llu", | |
018f2604 | 1179 | (u_longlong_t)spa_approx_errlog_size(spa)); |
3c819a2c | 1180 | spa_async_request(spa, SPA_ASYNC_RESILVER); |
80a91e74 | 1181 | } |
03e02e5b DB |
1182 | |
1183 | /* Clear recent error events (i.e. duplicate events tracking) */ | |
1184 | if (complete) | |
1185 | zfs_ereport_clear(spa, NULL); | |
428870ff BB |
1186 | } |
1187 | ||
1188 | scn->scn_phys.scn_end_time = gethrestime_sec(); | |
4f2dcb3e RY |
1189 | |
1190 | if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) | |
1191 | spa->spa_errata = 0; | |
d4a72f23 TC |
1192 | |
1193 | ASSERT(!dsl_scan_is_running(scn)); | |
428870ff BB |
1194 | } |
1195 | ||
482eeef8 GA |
1196 | static int |
1197 | dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx) | |
1198 | { | |
1199 | pool_scrub_cmd_t *cmd = arg; | |
1200 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1201 | dsl_scan_t *scn = dp->dp_scan; | |
1202 | ||
1203 | if (*cmd == POOL_SCRUB_PAUSE) { | |
1204 | /* | |
1205 | * can't pause a error scrub when there is no in-progress | |
1206 | * error scrub. | |
1207 | */ | |
1208 | if (!dsl_errorscrubbing(dp)) | |
1209 | return (SET_ERROR(ENOENT)); | |
1210 | ||
1211 | /* can't pause a paused error scrub */ | |
1212 | if (dsl_errorscrub_is_paused(scn)) | |
1213 | return (SET_ERROR(EBUSY)); | |
1214 | } else if (*cmd != POOL_SCRUB_NORMAL) { | |
1215 | return (SET_ERROR(ENOTSUP)); | |
1216 | } | |
1217 | ||
1218 | return (0); | |
1219 | } | |
1220 | ||
1221 | static void | |
1222 | dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx) | |
1223 | { | |
1224 | pool_scrub_cmd_t *cmd = arg; | |
1225 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1226 | spa_t *spa = dp->dp_spa; | |
1227 | dsl_scan_t *scn = dp->dp_scan; | |
1228 | ||
1229 | if (*cmd == POOL_SCRUB_PAUSE) { | |
1230 | spa->spa_scan_pass_errorscrub_pause = gethrestime_sec(); | |
1231 | scn->errorscrub_phys.dep_paused_flags = B_TRUE; | |
1232 | dsl_errorscrub_sync_state(scn, tx); | |
1233 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED); | |
1234 | } else { | |
1235 | ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); | |
1236 | if (dsl_errorscrub_is_paused(scn)) { | |
1237 | /* | |
1238 | * We need to keep track of how much time we spend | |
1239 | * paused per pass so that we can adjust the error scrub | |
1240 | * rate shown in the output of 'zpool status'. | |
1241 | */ | |
1242 | spa->spa_scan_pass_errorscrub_spent_paused += | |
1243 | gethrestime_sec() - | |
1244 | spa->spa_scan_pass_errorscrub_pause; | |
1245 | ||
1246 | spa->spa_scan_pass_errorscrub_pause = 0; | |
1247 | scn->errorscrub_phys.dep_paused_flags = B_FALSE; | |
1248 | ||
1249 | zap_cursor_init_serialized( | |
1250 | &scn->errorscrub_cursor, | |
1251 | spa->spa_meta_objset, spa->spa_errlog_last, | |
1252 | scn->errorscrub_phys.dep_cursor); | |
1253 | ||
1254 | dsl_errorscrub_sync_state(scn, tx); | |
1255 | } | |
1256 | } | |
1257 | } | |
1258 | ||
1259 | static int | |
1260 | dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx) | |
1261 | { | |
1262 | (void) arg; | |
1263 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
1264 | /* can't cancel a error scrub when there is no one in-progress */ | |
1265 | if (!dsl_errorscrubbing(scn->scn_dp)) | |
1266 | return (SET_ERROR(ENOENT)); | |
1267 | return (0); | |
1268 | } | |
1269 | ||
1270 | static void | |
1271 | dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx) | |
1272 | { | |
1273 | (void) arg; | |
1274 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
1275 | ||
1276 | dsl_errorscrub_done(scn, B_FALSE, tx); | |
1277 | dsl_errorscrub_sync_state(scn, tx); | |
1278 | spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, | |
1279 | ESC_ZFS_ERRORSCRUB_ABORT); | |
1280 | } | |
1281 | ||
428870ff | 1282 | static int |
13fe0198 | 1283 | dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) |
428870ff | 1284 | { |
14e4e3cb | 1285 | (void) arg; |
13fe0198 | 1286 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff | 1287 | |
d4a72f23 | 1288 | if (!dsl_scan_is_running(scn)) |
2e528b49 | 1289 | return (SET_ERROR(ENOENT)); |
428870ff BB |
1290 | return (0); |
1291 | } | |
1292 | ||
428870ff | 1293 | static void |
13fe0198 | 1294 | dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) |
428870ff | 1295 | { |
14e4e3cb | 1296 | (void) arg; |
13fe0198 | 1297 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff BB |
1298 | |
1299 | dsl_scan_done(scn, B_FALSE, tx); | |
d4a72f23 | 1300 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); |
43cb30b3 | 1301 | spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); |
428870ff BB |
1302 | } |
1303 | ||
1304 | int | |
1305 | dsl_scan_cancel(dsl_pool_t *dp) | |
1306 | { | |
482eeef8 GA |
1307 | if (dsl_errorscrubbing(dp)) { |
1308 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
1309 | dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync, | |
1310 | NULL, 3, ZFS_SPACE_CHECK_RESERVED)); | |
1311 | } | |
13fe0198 | 1312 | return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, |
3d45fdd6 | 1313 | dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); |
428870ff BB |
1314 | } |
1315 | ||
0ea05c64 AP |
1316 | static int |
1317 | dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) | |
1318 | { | |
1319 | pool_scrub_cmd_t *cmd = arg; | |
1320 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1321 | dsl_scan_t *scn = dp->dp_scan; | |
1322 | ||
1323 | if (*cmd == POOL_SCRUB_PAUSE) { | |
1324 | /* can't pause a scrub when there is no in-progress scrub */ | |
1325 | if (!dsl_scan_scrubbing(dp)) | |
1326 | return (SET_ERROR(ENOENT)); | |
1327 | ||
1328 | /* can't pause a paused scrub */ | |
1329 | if (dsl_scan_is_paused_scrub(scn)) | |
1330 | return (SET_ERROR(EBUSY)); | |
1331 | } else if (*cmd != POOL_SCRUB_NORMAL) { | |
1332 | return (SET_ERROR(ENOTSUP)); | |
1333 | } | |
1334 | ||
1335 | return (0); | |
1336 | } | |
1337 | ||
1338 | static void | |
1339 | dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) | |
1340 | { | |
1341 | pool_scrub_cmd_t *cmd = arg; | |
1342 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1343 | spa_t *spa = dp->dp_spa; | |
1344 | dsl_scan_t *scn = dp->dp_scan; | |
1345 | ||
0ea05c64 AP |
1346 | if (*cmd == POOL_SCRUB_PAUSE) { |
1347 | /* can't pause a scrub when there is no in-progress scrub */ | |
1348 | spa->spa_scan_pass_scrub_pause = gethrestime_sec(); | |
1349 | scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; | |
8cb119e3 | 1350 | scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED; |
d4a72f23 | 1351 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
43cb30b3 | 1352 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); |
e60e158e | 1353 | spa_notify_waiters(spa); |
0ea05c64 AP |
1354 | } else { |
1355 | ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); | |
1356 | if (dsl_scan_is_paused_scrub(scn)) { | |
1357 | /* | |
1358 | * We need to keep track of how much time we spend | |
1359 | * paused per pass so that we can adjust the scrub rate | |
1360 | * shown in the output of 'zpool status' | |
1361 | */ | |
1362 | spa->spa_scan_pass_scrub_spent_paused += | |
1363 | gethrestime_sec() - spa->spa_scan_pass_scrub_pause; | |
1364 | spa->spa_scan_pass_scrub_pause = 0; | |
1365 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; | |
8cb119e3 | 1366 | scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED; |
d4a72f23 | 1367 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
0ea05c64 AP |
1368 | } |
1369 | } | |
1370 | } | |
1371 | ||
1372 | /* | |
1373 | * Set scrub pause/resume state if it makes sense to do so | |
1374 | */ | |
1375 | int | |
1376 | dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) | |
1377 | { | |
482eeef8 GA |
1378 | if (dsl_errorscrubbing(dp)) { |
1379 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
1380 | dsl_errorscrub_pause_resume_check, | |
1381 | dsl_errorscrub_pause_resume_sync, &cmd, 3, | |
1382 | ZFS_SPACE_CHECK_RESERVED)); | |
1383 | } | |
0ea05c64 AP |
1384 | return (dsl_sync_task(spa_name(dp->dp_spa), |
1385 | dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, | |
1386 | ZFS_SPACE_CHECK_RESERVED)); | |
1387 | } | |
1388 | ||
0ea05c64 | 1389 | |
d4a72f23 TC |
1390 | /* start a new scan, or restart an existing one. */ |
1391 | void | |
3c819a2c | 1392 | dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg) |
d4a72f23 TC |
1393 | { |
1394 | if (txg == 0) { | |
1395 | dmu_tx_t *tx; | |
1396 | tx = dmu_tx_create_dd(dp->dp_mos_dir); | |
1397 | VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); | |
0ea05c64 | 1398 | |
d4a72f23 TC |
1399 | txg = dmu_tx_get_txg(tx); |
1400 | dp->dp_scan->scn_restart_txg = txg; | |
1401 | dmu_tx_commit(tx); | |
1402 | } else { | |
1403 | dp->dp_scan->scn_restart_txg = txg; | |
1404 | } | |
6f57f1e3 RE |
1405 | zfs_dbgmsg("restarting resilver for %s at txg=%llu", |
1406 | dp->dp_spa->spa_name, (longlong_t)txg); | |
0ea05c64 AP |
1407 | } |
1408 | ||
428870ff BB |
1409 | void |
1410 | dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) | |
1411 | { | |
1412 | zio_free(dp->dp_spa, txg, bp); | |
1413 | } | |
1414 | ||
1415 | void | |
1416 | dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) | |
1417 | { | |
1418 | ASSERT(dsl_pool_sync_context(dp)); | |
1419 | zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); | |
1420 | } | |
1421 | ||
d4a72f23 TC |
1422 | static int |
1423 | scan_ds_queue_compare(const void *a, const void *b) | |
428870ff | 1424 | { |
d4a72f23 TC |
1425 | const scan_ds_t *sds_a = a, *sds_b = b; |
1426 | ||
1427 | if (sds_a->sds_dsobj < sds_b->sds_dsobj) | |
1428 | return (-1); | |
1429 | if (sds_a->sds_dsobj == sds_b->sds_dsobj) | |
1430 | return (0); | |
1431 | return (1); | |
428870ff BB |
1432 | } |
1433 | ||
1434 | static void | |
d4a72f23 TC |
1435 | scan_ds_queue_clear(dsl_scan_t *scn) |
1436 | { | |
1437 | void *cookie = NULL; | |
1438 | scan_ds_t *sds; | |
1439 | while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { | |
1440 | kmem_free(sds, sizeof (*sds)); | |
1441 | } | |
1442 | } | |
1443 | ||
1444 | static boolean_t | |
1445 | scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) | |
428870ff | 1446 | { |
d4a72f23 TC |
1447 | scan_ds_t srch, *sds; |
1448 | ||
1449 | srch.sds_dsobj = dsobj; | |
1450 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1451 | if (sds != NULL && txg != NULL) | |
1452 | *txg = sds->sds_txg; | |
1453 | return (sds != NULL); | |
428870ff BB |
1454 | } |
1455 | ||
d4a72f23 TC |
1456 | static void |
1457 | scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) | |
1458 | { | |
1459 | scan_ds_t *sds; | |
1460 | avl_index_t where; | |
1461 | ||
1462 | sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); | |
1463 | sds->sds_dsobj = dsobj; | |
1464 | sds->sds_txg = txg; | |
1465 | ||
1466 | VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); | |
1467 | avl_insert(&scn->scn_queue, sds, where); | |
1468 | } | |
1469 | ||
1470 | static void | |
1471 | scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) | |
1472 | { | |
1473 | scan_ds_t srch, *sds; | |
1474 | ||
1475 | srch.sds_dsobj = dsobj; | |
1476 | ||
1477 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1478 | VERIFY(sds != NULL); | |
1479 | avl_remove(&scn->scn_queue, sds); | |
1480 | kmem_free(sds, sizeof (*sds)); | |
1481 | } | |
1482 | ||
1483 | static void | |
1484 | scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) | |
1485 | { | |
1486 | dsl_pool_t *dp = scn->scn_dp; | |
1487 | spa_t *spa = dp->dp_spa; | |
1488 | dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? | |
1489 | DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; | |
1490 | ||
1c0c729a | 1491 | ASSERT0(scn->scn_queues_pending); |
d4a72f23 TC |
1492 | ASSERT(scn->scn_phys.scn_queue_obj != 0); |
1493 | ||
1494 | VERIFY0(dmu_object_free(dp->dp_meta_objset, | |
1495 | scn->scn_phys.scn_queue_obj, tx)); | |
1496 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, | |
1497 | DMU_OT_NONE, 0, tx); | |
1498 | for (scan_ds_t *sds = avl_first(&scn->scn_queue); | |
1499 | sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { | |
1500 | VERIFY0(zap_add_int_key(dp->dp_meta_objset, | |
1501 | scn->scn_phys.scn_queue_obj, sds->sds_dsobj, | |
1502 | sds->sds_txg, tx)); | |
1503 | } | |
1504 | } | |
1505 | ||
1506 | /* | |
1507 | * Computes the memory limit state that we're currently in. A sorted scan | |
1508 | * needs quite a bit of memory to hold the sorting queue, so we need to | |
1509 | * reasonably constrain the size so it doesn't impact overall system | |
1510 | * performance. We compute two limits: | |
1511 | * 1) Hard memory limit: if the amount of memory used by the sorting | |
1512 | * queues on a pool gets above this value, we stop the metadata | |
1513 | * scanning portion and start issuing the queued up and sorted | |
1514 | * I/Os to reduce memory usage. | |
1515 | * This limit is calculated as a fraction of physmem (by default 5%). | |
1516 | * We constrain the lower bound of the hard limit to an absolute | |
1517 | * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain | |
1518 | * the upper bound to 5% of the total pool size - no chance we'll | |
1519 | * ever need that much memory, but just to keep the value in check. | |
1520 | * 2) Soft memory limit: once we hit the hard memory limit, we start | |
1521 | * issuing I/O to reduce queue memory usage, but we don't want to | |
1522 | * completely empty out the queues, since we might be able to find I/Os | |
1523 | * that will fill in the gaps of our non-sequential IOs at some point | |
1524 | * in the future. So we stop the issuing of I/Os once the amount of | |
1525 | * memory used drops below the soft limit (at which point we stop issuing | |
1526 | * I/O and start scanning metadata again). | |
1527 | * | |
1528 | * This limit is calculated by subtracting a fraction of the hard | |
1529 | * limit from the hard limit. By default this fraction is 5%, so | |
1530 | * the soft limit is 95% of the hard limit. We cap the size of the | |
1531 | * difference between the hard and soft limits at an absolute | |
1532 | * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is | |
1533 | * sufficient to not cause too frequent switching between the | |
1534 | * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's | |
1535 | * worth of queues is about 1.2 GiB of on-pool data, so scanning | |
1536 | * that should take at least a decent fraction of a second). | |
1537 | */ | |
1538 | static boolean_t | |
1539 | dsl_scan_should_clear(dsl_scan_t *scn) | |
1540 | { | |
fa130e01 | 1541 | spa_t *spa = scn->scn_dp->dp_spa; |
d4a72f23 | 1542 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
fa130e01 AM |
1543 | uint64_t alloc, mlim_hard, mlim_soft, mused; |
1544 | ||
1545 | alloc = metaslab_class_get_alloc(spa_normal_class(spa)); | |
1546 | alloc += metaslab_class_get_alloc(spa_special_class(spa)); | |
1547 | alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); | |
d4a72f23 TC |
1548 | |
1549 | mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, | |
1550 | zfs_scan_mem_lim_min); | |
1551 | mlim_hard = MIN(mlim_hard, alloc / 20); | |
1552 | mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, | |
1553 | zfs_scan_mem_lim_soft_max); | |
1554 | mused = 0; | |
1555 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
1556 | vdev_t *tvd = rvd->vdev_child[i]; | |
1557 | dsl_scan_io_queue_t *queue; | |
1558 | ||
1559 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
1560 | queue = tvd->vdev_scan_io_queue; | |
1561 | if (queue != NULL) { | |
87b46d63 | 1562 | /* |
1c0c729a | 1563 | * # of extents in exts_by_addr = # in exts_by_size. |
87b46d63 AM |
1564 | * B-tree efficiency is ~75%, but can be as low as 50%. |
1565 | */ | |
ca577779 | 1566 | mused += zfs_btree_numnodes(&queue->q_exts_by_size) * |
1c0c729a AM |
1567 | ((sizeof (range_seg_gap_t) + sizeof (uint64_t)) * |
1568 | 3 / 2) + queue->q_sio_memused; | |
d4a72f23 TC |
1569 | } |
1570 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
1571 | } | |
1572 | ||
1573 | dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); | |
1574 | ||
1575 | if (mused == 0) | |
1c0c729a | 1576 | ASSERT0(scn->scn_queues_pending); |
d4a72f23 TC |
1577 | |
1578 | /* | |
1579 | * If we are above our hard limit, we need to clear out memory. | |
1580 | * If we are below our soft limit, we need to accumulate sequential IOs. | |
1581 | * Otherwise, we should keep doing whatever we are currently doing. | |
1582 | */ | |
1583 | if (mused >= mlim_hard) | |
1584 | return (B_TRUE); | |
1585 | else if (mused < mlim_soft) | |
1586 | return (B_FALSE); | |
1587 | else | |
1588 | return (scn->scn_clearing); | |
1589 | } | |
10400bfe | 1590 | |
428870ff | 1591 | static boolean_t |
0ea05c64 | 1592 | dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) |
428870ff | 1593 | { |
428870ff BB |
1594 | /* we never skip user/group accounting objects */ |
1595 | if (zb && (int64_t)zb->zb_object < 0) | |
1596 | return (B_FALSE); | |
1597 | ||
0ea05c64 AP |
1598 | if (scn->scn_suspending) |
1599 | return (B_TRUE); /* we're already suspending */ | |
428870ff | 1600 | |
9ae529ec | 1601 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) |
428870ff BB |
1602 | return (B_FALSE); /* we're resuming */ |
1603 | ||
5815f7ac TC |
1604 | /* We only know how to resume from level-0 and objset blocks. */ |
1605 | if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL)) | |
428870ff BB |
1606 | return (B_FALSE); |
1607 | ||
10400bfe | 1608 | /* |
0ea05c64 | 1609 | * We suspend if: |
10400bfe MA |
1610 | * - we have scanned for at least the minimum time (default 1 sec |
1611 | * for scrub, 3 sec for resilver), and either we have sufficient | |
1612 | * dirty data that we are starting to write more quickly | |
d4a72f23 TC |
1613 | * (default 30%), someone is explicitly waiting for this txg |
1614 | * to complete, or we have used up all of the time in the txg | |
1615 | * timeout (default 5 sec). | |
10400bfe MA |
1616 | * or |
1617 | * - the spa is shutting down because this pool is being exported | |
1618 | * or the machine is rebooting. | |
d4a72f23 TC |
1619 | * or |
1620 | * - the scan queue has reached its memory use limit | |
10400bfe | 1621 | */ |
d4a72f23 TC |
1622 | uint64_t curr_time_ns = gethrtime(); |
1623 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
1624 | uint64_t sync_time_ns = curr_time_ns - | |
1625 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1cd72b9c AM |
1626 | uint64_t dirty_min_bytes = zfs_dirty_data_max * |
1627 | zfs_vdev_async_write_active_min_dirty_percent / 100; | |
fdc2d303 | 1628 | uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? |
d4a72f23 TC |
1629 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; |
1630 | ||
1631 | if ((NSEC2MSEC(scan_time_ns) > mintime && | |
1cd72b9c | 1632 | (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || |
d4a72f23 TC |
1633 | txg_sync_waiting(scn->scn_dp) || |
1634 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
1635 | spa_shutting_down(scn->scn_dp->dp_spa) || | |
1636 | (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { | |
5815f7ac TC |
1637 | if (zb && zb->zb_level == ZB_ROOT_LEVEL) { |
1638 | dprintf("suspending at first available bookmark " | |
1639 | "%llx/%llx/%llx/%llx\n", | |
1640 | (longlong_t)zb->zb_objset, | |
1641 | (longlong_t)zb->zb_object, | |
1642 | (longlong_t)zb->zb_level, | |
1643 | (longlong_t)zb->zb_blkid); | |
1644 | SET_BOOKMARK(&scn->scn_phys.scn_bookmark, | |
1645 | zb->zb_objset, 0, 0, 0); | |
1646 | } else if (zb != NULL) { | |
0ea05c64 | 1647 | dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", |
428870ff BB |
1648 | (longlong_t)zb->zb_objset, |
1649 | (longlong_t)zb->zb_object, | |
1650 | (longlong_t)zb->zb_level, | |
1651 | (longlong_t)zb->zb_blkid); | |
1652 | scn->scn_phys.scn_bookmark = *zb; | |
d4a72f23 | 1653 | } else { |
21a4f5cc | 1654 | #ifdef ZFS_DEBUG |
d4a72f23 | 1655 | dsl_scan_phys_t *scnp = &scn->scn_phys; |
d4a72f23 TC |
1656 | dprintf("suspending at at DDT bookmark " |
1657 | "%llx/%llx/%llx/%llx\n", | |
1658 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
1659 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
1660 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
1661 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
21a4f5cc | 1662 | #endif |
428870ff | 1663 | } |
0ea05c64 | 1664 | scn->scn_suspending = B_TRUE; |
428870ff BB |
1665 | return (B_TRUE); |
1666 | } | |
1667 | return (B_FALSE); | |
1668 | } | |
1669 | ||
482eeef8 GA |
1670 | static boolean_t |
1671 | dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) | |
1672 | { | |
1673 | /* | |
1674 | * We suspend if: | |
1675 | * - we have scrubbed for at least the minimum time (default 1 sec | |
1676 | * for error scrub), someone is explicitly waiting for this txg | |
1677 | * to complete, or we have used up all of the time in the txg | |
1678 | * timeout (default 5 sec). | |
1679 | * or | |
1680 | * - the spa is shutting down because this pool is being exported | |
1681 | * or the machine is rebooting. | |
1682 | */ | |
1683 | uint64_t curr_time_ns = gethrtime(); | |
1684 | uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
1685 | uint64_t sync_time_ns = curr_time_ns - | |
1686 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1687 | int mintime = zfs_scrub_min_time_ms; | |
1688 | ||
1689 | if ((NSEC2MSEC(error_scrub_time_ns) > mintime && | |
1690 | (txg_sync_waiting(scn->scn_dp) || | |
1691 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
1692 | spa_shutting_down(scn->scn_dp->dp_spa)) { | |
1693 | if (zb) { | |
1694 | dprintf("error scrub suspending at bookmark " | |
1695 | "%llx/%llx/%llx/%llx\n", | |
1696 | (longlong_t)zb->zb_objset, | |
1697 | (longlong_t)zb->zb_object, | |
1698 | (longlong_t)zb->zb_level, | |
1699 | (longlong_t)zb->zb_blkid); | |
1700 | } | |
1701 | return (B_TRUE); | |
1702 | } | |
1703 | return (B_FALSE); | |
1704 | } | |
1705 | ||
428870ff BB |
1706 | typedef struct zil_scan_arg { |
1707 | dsl_pool_t *zsa_dp; | |
1708 | zil_header_t *zsa_zh; | |
1709 | } zil_scan_arg_t; | |
1710 | ||
428870ff | 1711 | static int |
61868bb1 CS |
1712 | dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, |
1713 | uint64_t claim_txg) | |
428870ff | 1714 | { |
14e4e3cb | 1715 | (void) zilog; |
428870ff BB |
1716 | zil_scan_arg_t *zsa = arg; |
1717 | dsl_pool_t *dp = zsa->zsa_dp; | |
1718 | dsl_scan_t *scn = dp->dp_scan; | |
1719 | zil_header_t *zh = zsa->zsa_zh; | |
5dbd68a3 | 1720 | zbookmark_phys_t zb; |
428870ff | 1721 | |
30af21b0 | 1722 | ASSERT(!BP_IS_REDACTED(bp)); |
b0bc7a84 | 1723 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) |
428870ff BB |
1724 | return (0); |
1725 | ||
1726 | /* | |
1727 | * One block ("stubby") can be allocated a long time ago; we | |
1728 | * want to visit that one because it has been allocated | |
1729 | * (on-disk) even if it hasn't been claimed (even though for | |
1730 | * scrub there's nothing to do to it). | |
1731 | */ | |
d2734cce | 1732 | if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) |
428870ff BB |
1733 | return (0); |
1734 | ||
1735 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], | |
1736 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
1737 | ||
1738 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1739 | return (0); | |
1740 | } | |
1741 | ||
428870ff | 1742 | static int |
61868bb1 CS |
1743 | dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, |
1744 | uint64_t claim_txg) | |
428870ff | 1745 | { |
14e4e3cb | 1746 | (void) zilog; |
428870ff BB |
1747 | if (lrc->lrc_txtype == TX_WRITE) { |
1748 | zil_scan_arg_t *zsa = arg; | |
1749 | dsl_pool_t *dp = zsa->zsa_dp; | |
1750 | dsl_scan_t *scn = dp->dp_scan; | |
1751 | zil_header_t *zh = zsa->zsa_zh; | |
61868bb1 CS |
1752 | const lr_write_t *lr = (const lr_write_t *)lrc; |
1753 | const blkptr_t *bp = &lr->lr_blkptr; | |
5dbd68a3 | 1754 | zbookmark_phys_t zb; |
428870ff | 1755 | |
30af21b0 | 1756 | ASSERT(!BP_IS_REDACTED(bp)); |
b0bc7a84 MG |
1757 | if (BP_IS_HOLE(bp) || |
1758 | bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) | |
428870ff BB |
1759 | return (0); |
1760 | ||
1761 | /* | |
1762 | * birth can be < claim_txg if this record's txg is | |
1763 | * already txg sync'ed (but this log block contains | |
1764 | * other records that are not synced) | |
1765 | */ | |
1766 | if (claim_txg == 0 || bp->blk_birth < claim_txg) | |
1767 | return (0); | |
1768 | ||
a6ccb36b | 1769 | ASSERT3U(BP_GET_LSIZE(bp), !=, 0); |
428870ff BB |
1770 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], |
1771 | lr->lr_foid, ZB_ZIL_LEVEL, | |
1772 | lr->lr_offset / BP_GET_LSIZE(bp)); | |
1773 | ||
1774 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1775 | } | |
1776 | return (0); | |
1777 | } | |
1778 | ||
1779 | static void | |
1780 | dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) | |
1781 | { | |
1782 | uint64_t claim_txg = zh->zh_claim_txg; | |
1783 | zil_scan_arg_t zsa = { dp, zh }; | |
1784 | zilog_t *zilog; | |
1785 | ||
d2734cce SD |
1786 | ASSERT(spa_writeable(dp->dp_spa)); |
1787 | ||
428870ff BB |
1788 | /* |
1789 | * We only want to visit blocks that have been claimed but not yet | |
1790 | * replayed (or, in read-only mode, blocks that *would* be claimed). | |
1791 | */ | |
d2734cce | 1792 | if (claim_txg == 0) |
428870ff BB |
1793 | return; |
1794 | ||
1795 | zilog = zil_alloc(dp->dp_meta_objset, zh); | |
1796 | ||
1797 | (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, | |
b5256303 | 1798 | claim_txg, B_FALSE); |
428870ff BB |
1799 | |
1800 | zil_free(zilog); | |
1801 | } | |
1802 | ||
d4a72f23 TC |
1803 | /* |
1804 | * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea | |
1805 | * here is to sort the AVL tree by the order each block will be needed. | |
1806 | */ | |
1807 | static int | |
1808 | scan_prefetch_queue_compare(const void *a, const void *b) | |
428870ff | 1809 | { |
d4a72f23 TC |
1810 | const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; |
1811 | const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; | |
1812 | const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; | |
428870ff | 1813 | |
d4a72f23 TC |
1814 | return (zbookmark_compare(spc_a->spc_datablkszsec, |
1815 | spc_a->spc_indblkshift, spc_b->spc_datablkszsec, | |
1816 | spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); | |
1817 | } | |
428870ff | 1818 | |
d4a72f23 | 1819 | static void |
dd66857d | 1820 | scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag) |
d4a72f23 | 1821 | { |
424fd7c3 TS |
1822 | if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { |
1823 | zfs_refcount_destroy(&spc->spc_refcnt); | |
d4a72f23 TC |
1824 | kmem_free(spc, sizeof (scan_prefetch_ctx_t)); |
1825 | } | |
1826 | } | |
1827 | ||
1828 | static scan_prefetch_ctx_t * | |
dd66857d | 1829 | scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag) |
d4a72f23 TC |
1830 | { |
1831 | scan_prefetch_ctx_t *spc; | |
1832 | ||
1833 | spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); | |
424fd7c3 | 1834 | zfs_refcount_create(&spc->spc_refcnt); |
c13060e4 | 1835 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1836 | spc->spc_scn = scn; |
1837 | if (dnp != NULL) { | |
1838 | spc->spc_datablkszsec = dnp->dn_datablkszsec; | |
1839 | spc->spc_indblkshift = dnp->dn_indblkshift; | |
1840 | spc->spc_root = B_FALSE; | |
1841 | } else { | |
1842 | spc->spc_datablkszsec = 0; | |
1843 | spc->spc_indblkshift = 0; | |
1844 | spc->spc_root = B_TRUE; | |
1845 | } | |
1846 | ||
1847 | return (spc); | |
1848 | } | |
1849 | ||
1850 | static void | |
dd66857d | 1851 | scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag) |
d4a72f23 | 1852 | { |
c13060e4 | 1853 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1854 | } |
1855 | ||
d6496040 TC |
1856 | static void |
1857 | scan_ds_prefetch_queue_clear(dsl_scan_t *scn) | |
1858 | { | |
1859 | spa_t *spa = scn->scn_dp->dp_spa; | |
1860 | void *cookie = NULL; | |
1861 | scan_prefetch_issue_ctx_t *spic = NULL; | |
1862 | ||
1863 | mutex_enter(&spa->spa_scrub_lock); | |
1864 | while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue, | |
1865 | &cookie)) != NULL) { | |
1866 | scan_prefetch_ctx_rele(spic->spic_spc, scn); | |
1867 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1868 | } | |
1869 | mutex_exit(&spa->spa_scrub_lock); | |
1870 | } | |
1871 | ||
d4a72f23 TC |
1872 | static boolean_t |
1873 | dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, | |
1874 | const zbookmark_phys_t *zb) | |
1875 | { | |
1876 | zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; | |
1877 | dnode_phys_t tmp_dnp; | |
1878 | dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; | |
1879 | ||
1880 | if (zb->zb_objset != last_zb->zb_objset) | |
1881 | return (B_TRUE); | |
1882 | if ((int64_t)zb->zb_object < 0) | |
1883 | return (B_FALSE); | |
1884 | ||
1885 | tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; | |
1886 | tmp_dnp.dn_indblkshift = spc->spc_indblkshift; | |
1887 | ||
1888 | if (zbookmark_subtree_completed(dnp, zb, last_zb)) | |
1889 | return (B_TRUE); | |
1890 | ||
1891 | return (B_FALSE); | |
1892 | } | |
1893 | ||
1894 | static void | |
1895 | dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) | |
1896 | { | |
1897 | avl_index_t idx; | |
1898 | dsl_scan_t *scn = spc->spc_scn; | |
1899 | spa_t *spa = scn->scn_dp->dp_spa; | |
1900 | scan_prefetch_issue_ctx_t *spic; | |
1901 | ||
30af21b0 | 1902 | if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp)) |
d4a72f23 TC |
1903 | return; |
1904 | ||
1905 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || | |
1906 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && | |
1907 | BP_GET_TYPE(bp) != DMU_OT_OBJSET)) | |
1908 | return; | |
1909 | ||
1910 | if (dsl_scan_check_prefetch_resume(spc, zb)) | |
1911 | return; | |
1912 | ||
1913 | scan_prefetch_ctx_add_ref(spc, scn); | |
1914 | spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); | |
1915 | spic->spic_spc = spc; | |
1916 | spic->spic_bp = *bp; | |
1917 | spic->spic_zb = *zb; | |
1918 | ||
1919 | /* | |
1920 | * Add the IO to the queue of blocks to prefetch. This allows us to | |
1921 | * prioritize blocks that we will need first for the main traversal | |
1922 | * thread. | |
1923 | */ | |
1924 | mutex_enter(&spa->spa_scrub_lock); | |
1925 | if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { | |
1926 | /* this block is already queued for prefetch */ | |
1927 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1928 | scan_prefetch_ctx_rele(spc, scn); | |
1929 | mutex_exit(&spa->spa_scrub_lock); | |
1930 | return; | |
1931 | } | |
1932 | ||
1933 | avl_insert(&scn->scn_prefetch_queue, spic, idx); | |
1934 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1935 | mutex_exit(&spa->spa_scrub_lock); | |
1936 | } | |
1937 | ||
1938 | static void | |
1939 | dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, | |
1940 | uint64_t objset, uint64_t object) | |
1941 | { | |
1942 | int i; | |
1943 | zbookmark_phys_t zb; | |
1944 | scan_prefetch_ctx_t *spc; | |
1945 | ||
1946 | if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
1947 | return; | |
1948 | ||
1949 | SET_BOOKMARK(&zb, objset, object, 0, 0); | |
1950 | ||
1951 | spc = scan_prefetch_ctx_create(scn, dnp, FTAG); | |
1952 | ||
1953 | for (i = 0; i < dnp->dn_nblkptr; i++) { | |
1954 | zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); | |
1955 | zb.zb_blkid = i; | |
1956 | dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); | |
1957 | } | |
1958 | ||
1959 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
1960 | zb.zb_level = 0; | |
1961 | zb.zb_blkid = DMU_SPILL_BLKID; | |
1962 | dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb); | |
1963 | } | |
1964 | ||
1965 | scan_prefetch_ctx_rele(spc, FTAG); | |
1966 | } | |
1967 | ||
65c7cc49 | 1968 | static void |
d4a72f23 TC |
1969 | dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, |
1970 | arc_buf_t *buf, void *private) | |
1971 | { | |
14e4e3cb | 1972 | (void) zio; |
d4a72f23 TC |
1973 | scan_prefetch_ctx_t *spc = private; |
1974 | dsl_scan_t *scn = spc->spc_scn; | |
1975 | spa_t *spa = scn->scn_dp->dp_spa; | |
1976 | ||
13a2ff27 | 1977 | /* broadcast that the IO has completed for rate limiting purposes */ |
d4a72f23 TC |
1978 | mutex_enter(&spa->spa_scrub_lock); |
1979 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
1980 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
1981 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1982 | mutex_exit(&spa->spa_scrub_lock); | |
1983 | ||
1984 | /* if there was an error or we are done prefetching, just cleanup */ | |
13a2ff27 | 1985 | if (buf == NULL || scn->scn_prefetch_stop) |
d4a72f23 TC |
1986 | goto out; |
1987 | ||
1988 | if (BP_GET_LEVEL(bp) > 0) { | |
1989 | int i; | |
1990 | blkptr_t *cbp; | |
1991 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
1992 | zbookmark_phys_t czb; | |
1993 | ||
1994 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { | |
1995 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
1996 | zb->zb_level - 1, zb->zb_blkid * epb + i); | |
1997 | dsl_scan_prefetch(spc, cbp, &czb); | |
1998 | } | |
1999 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { | |
2000 | dnode_phys_t *cdnp; | |
2001 | int i; | |
2002 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; | |
2003 | ||
2004 | for (i = 0, cdnp = buf->b_data; i < epb; | |
2005 | i += cdnp->dn_extra_slots + 1, | |
2006 | cdnp += cdnp->dn_extra_slots + 1) { | |
2007 | dsl_scan_prefetch_dnode(scn, cdnp, | |
2008 | zb->zb_objset, zb->zb_blkid * epb + i); | |
2009 | } | |
2010 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { | |
2011 | objset_phys_t *osp = buf->b_data; | |
2012 | ||
2013 | dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, | |
2014 | zb->zb_objset, DMU_META_DNODE_OBJECT); | |
2015 | ||
2016 | if (OBJSET_BUF_HAS_USERUSED(buf)) { | |
2017 | dsl_scan_prefetch_dnode(scn, | |
2018 | &osp->os_groupused_dnode, zb->zb_objset, | |
2019 | DMU_GROUPUSED_OBJECT); | |
2020 | dsl_scan_prefetch_dnode(scn, | |
2021 | &osp->os_userused_dnode, zb->zb_objset, | |
2022 | DMU_USERUSED_OBJECT); | |
2023 | } | |
2024 | } | |
2025 | ||
2026 | out: | |
2027 | if (buf != NULL) | |
2028 | arc_buf_destroy(buf, private); | |
2029 | scan_prefetch_ctx_rele(spc, scn); | |
2030 | } | |
2031 | ||
d4a72f23 TC |
2032 | static void |
2033 | dsl_scan_prefetch_thread(void *arg) | |
2034 | { | |
2035 | dsl_scan_t *scn = arg; | |
2036 | spa_t *spa = scn->scn_dp->dp_spa; | |
2037 | scan_prefetch_issue_ctx_t *spic; | |
2038 | ||
2039 | /* loop until we are told to stop */ | |
2040 | while (!scn->scn_prefetch_stop) { | |
2041 | arc_flags_t flags = ARC_FLAG_NOWAIT | | |
2042 | ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; | |
2043 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; | |
2044 | ||
2045 | mutex_enter(&spa->spa_scrub_lock); | |
2046 | ||
2047 | /* | |
2048 | * Wait until we have an IO to issue and are not above our | |
2049 | * maximum in flight limit. | |
2050 | */ | |
2051 | while (!scn->scn_prefetch_stop && | |
2052 | (avl_numnodes(&scn->scn_prefetch_queue) == 0 || | |
2053 | spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { | |
2054 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); | |
2055 | } | |
2056 | ||
2057 | /* recheck if we should stop since we waited for the cv */ | |
2058 | if (scn->scn_prefetch_stop) { | |
2059 | mutex_exit(&spa->spa_scrub_lock); | |
2060 | break; | |
2061 | } | |
2062 | ||
2063 | /* remove the prefetch IO from the tree */ | |
2064 | spic = avl_first(&scn->scn_prefetch_queue); | |
2065 | spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); | |
2066 | avl_remove(&scn->scn_prefetch_queue, spic); | |
2067 | ||
2068 | mutex_exit(&spa->spa_scrub_lock); | |
2069 | ||
2070 | if (BP_IS_PROTECTED(&spic->spic_bp)) { | |
2071 | ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE || | |
2072 | BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET); | |
2073 | ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0); | |
2074 | zio_flags |= ZIO_FLAG_RAW; | |
2075 | } | |
2076 | ||
2077 | /* issue the prefetch asynchronously */ | |
2078 | (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, | |
2079 | &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc, | |
a8b2e306 | 2080 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb); |
428870ff | 2081 | |
d4a72f23 | 2082 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); |
b5256303 TC |
2083 | } |
2084 | ||
d4a72f23 | 2085 | ASSERT(scn->scn_prefetch_stop); |
428870ff | 2086 | |
d4a72f23 TC |
2087 | /* free any prefetches we didn't get to complete */ |
2088 | mutex_enter(&spa->spa_scrub_lock); | |
2089 | while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { | |
2090 | avl_remove(&scn->scn_prefetch_queue, spic); | |
2091 | scan_prefetch_ctx_rele(spic->spic_spc, scn); | |
2092 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
2093 | } | |
2094 | ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); | |
2095 | mutex_exit(&spa->spa_scrub_lock); | |
428870ff BB |
2096 | } |
2097 | ||
2098 | static boolean_t | |
2099 | dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, | |
5dbd68a3 | 2100 | const zbookmark_phys_t *zb) |
428870ff BB |
2101 | { |
2102 | /* | |
2103 | * We never skip over user/group accounting objects (obj<0) | |
2104 | */ | |
9ae529ec | 2105 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && |
428870ff BB |
2106 | (int64_t)zb->zb_object >= 0) { |
2107 | /* | |
2108 | * If we already visited this bp & everything below (in | |
2109 | * a prior txg sync), don't bother doing it again. | |
2110 | */ | |
fcff0f35 PD |
2111 | if (zbookmark_subtree_completed(dnp, zb, |
2112 | &scn->scn_phys.scn_bookmark)) | |
428870ff BB |
2113 | return (B_TRUE); |
2114 | ||
2115 | /* | |
2116 | * If we found the block we're trying to resume from, or | |
33dba8c7 AM |
2117 | * we went past it, zero it out to indicate that it's OK |
2118 | * to start checking for suspending again. | |
428870ff | 2119 | */ |
33dba8c7 AM |
2120 | if (zbookmark_subtree_tbd(dnp, zb, |
2121 | &scn->scn_phys.scn_bookmark)) { | |
428870ff BB |
2122 | dprintf("resuming at %llx/%llx/%llx/%llx\n", |
2123 | (longlong_t)zb->zb_objset, | |
2124 | (longlong_t)zb->zb_object, | |
2125 | (longlong_t)zb->zb_level, | |
2126 | (longlong_t)zb->zb_blkid); | |
861166b0 | 2127 | memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb)); |
428870ff BB |
2128 | } |
2129 | } | |
2130 | return (B_FALSE); | |
2131 | } | |
2132 | ||
d4a72f23 TC |
2133 | static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
2134 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, | |
2135 | dmu_objset_type_t ostype, dmu_tx_t *tx); | |
2136 | inline __attribute__((always_inline)) static void dsl_scan_visitdnode( | |
2137 | dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, | |
2138 | dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); | |
2139 | ||
428870ff BB |
2140 | /* |
2141 | * Return nonzero on i/o error. | |
2142 | * Return new buf to write out in *bufp. | |
2143 | */ | |
10be533e | 2144 | inline __attribute__((always_inline)) static int |
428870ff BB |
2145 | dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, |
2146 | dnode_phys_t *dnp, const blkptr_t *bp, | |
ebcf4936 | 2147 | const zbookmark_phys_t *zb, dmu_tx_t *tx) |
428870ff BB |
2148 | { |
2149 | dsl_pool_t *dp = scn->scn_dp; | |
2cd0f98f | 2150 | spa_t *spa = dp->dp_spa; |
572e2857 | 2151 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; |
428870ff BB |
2152 | int err; |
2153 | ||
30af21b0 PD |
2154 | ASSERT(!BP_IS_REDACTED(bp)); |
2155 | ||
f3b08dfd GA |
2156 | /* |
2157 | * There is an unlikely case of encountering dnodes with contradicting | |
2158 | * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created | |
2159 | * or modified before commit 4254acb was merged. As it is not possible | |
2160 | * to know which of the two is correct, report an error. | |
2161 | */ | |
2162 | if (dnp != NULL && | |
2163 | dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) { | |
2164 | scn->scn_phys.scn_errors++; | |
431083f7 | 2165 | spa_log_error(spa, zb, &bp->blk_birth); |
f3b08dfd GA |
2166 | return (SET_ERROR(EINVAL)); |
2167 | } | |
2168 | ||
428870ff | 2169 | if (BP_GET_LEVEL(bp) > 0) { |
2a432414 | 2170 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff BB |
2171 | int i; |
2172 | blkptr_t *cbp; | |
2173 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
ebcf4936 | 2174 | arc_buf_t *buf; |
428870ff | 2175 | |
2cd0f98f | 2176 | err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 2177 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
2178 | if (err) { |
2179 | scn->scn_phys.scn_errors++; | |
2180 | return (err); | |
2181 | } | |
ebcf4936 | 2182 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { |
5dbd68a3 | 2183 | zbookmark_phys_t czb; |
428870ff BB |
2184 | |
2185 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
2186 | zb->zb_level - 1, | |
2187 | zb->zb_blkid * epb + i); | |
2188 | dsl_scan_visitbp(cbp, &czb, dnp, | |
ebcf4936 | 2189 | ds, scn, ostype, tx); |
428870ff | 2190 | } |
d3c2ae1c | 2191 | arc_buf_destroy(buf, &buf); |
428870ff | 2192 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { |
2a432414 | 2193 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 2194 | dnode_phys_t *cdnp; |
d4a72f23 | 2195 | int i; |
428870ff | 2196 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; |
ebcf4936 | 2197 | arc_buf_t *buf; |
428870ff | 2198 | |
b5256303 TC |
2199 | if (BP_IS_PROTECTED(bp)) { |
2200 | ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); | |
2201 | zio_flags |= ZIO_FLAG_RAW; | |
2202 | } | |
2203 | ||
2cd0f98f | 2204 | err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 2205 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
2206 | if (err) { |
2207 | scn->scn_phys.scn_errors++; | |
2208 | return (err); | |
2209 | } | |
50c957f7 NB |
2210 | for (i = 0, cdnp = buf->b_data; i < epb; |
2211 | i += cdnp->dn_extra_slots + 1, | |
2212 | cdnp += cdnp->dn_extra_slots + 1) { | |
428870ff | 2213 | dsl_scan_visitdnode(scn, ds, ostype, |
ebcf4936 | 2214 | cdnp, zb->zb_blkid * epb + i, tx); |
428870ff BB |
2215 | } |
2216 | ||
d3c2ae1c | 2217 | arc_buf_destroy(buf, &buf); |
428870ff | 2218 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { |
2a432414 | 2219 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 2220 | objset_phys_t *osp; |
ebcf4936 | 2221 | arc_buf_t *buf; |
428870ff | 2222 | |
2cd0f98f | 2223 | err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 2224 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
2225 | if (err) { |
2226 | scn->scn_phys.scn_errors++; | |
2227 | return (err); | |
2228 | } | |
2229 | ||
ebcf4936 | 2230 | osp = buf->b_data; |
428870ff | 2231 | |
428870ff | 2232 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 2233 | &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); |
428870ff | 2234 | |
ebcf4936 | 2235 | if (OBJSET_BUF_HAS_USERUSED(buf)) { |
428870ff | 2236 | /* |
9c5167d1 | 2237 | * We also always visit user/group/project accounting |
428870ff | 2238 | * objects, and never skip them, even if we are |
d4a72f23 TC |
2239 | * suspending. This is necessary so that the |
2240 | * space deltas from this txg get integrated. | |
428870ff | 2241 | */ |
9c5167d1 NF |
2242 | if (OBJSET_BUF_HAS_PROJECTUSED(buf)) |
2243 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
2244 | &osp->os_projectused_dnode, | |
2245 | DMU_PROJECTUSED_OBJECT, tx); | |
428870ff | 2246 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 2247 | &osp->os_groupused_dnode, |
428870ff BB |
2248 | DMU_GROUPUSED_OBJECT, tx); |
2249 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
ebcf4936 | 2250 | &osp->os_userused_dnode, |
428870ff BB |
2251 | DMU_USERUSED_OBJECT, tx); |
2252 | } | |
d3c2ae1c | 2253 | arc_buf_destroy(buf, &buf); |
3095ca91 MA |
2254 | } else if (!zfs_blkptr_verify(spa, bp, |
2255 | BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) { | |
2cd0f98f BB |
2256 | /* |
2257 | * Sanity check the block pointer contents, this is handled | |
2258 | * by arc_read() for the cases above. | |
2259 | */ | |
2260 | scn->scn_phys.scn_errors++; | |
431083f7 | 2261 | spa_log_error(spa, zb, &bp->blk_birth); |
2cd0f98f | 2262 | return (SET_ERROR(EINVAL)); |
428870ff BB |
2263 | } |
2264 | ||
2265 | return (0); | |
2266 | } | |
2267 | ||
10be533e | 2268 | inline __attribute__((always_inline)) static void |
428870ff | 2269 | dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, |
ebcf4936 | 2270 | dmu_objset_type_t ostype, dnode_phys_t *dnp, |
428870ff BB |
2271 | uint64_t object, dmu_tx_t *tx) |
2272 | { | |
2273 | int j; | |
2274 | ||
2275 | for (j = 0; j < dnp->dn_nblkptr; j++) { | |
5dbd68a3 | 2276 | zbookmark_phys_t czb; |
428870ff BB |
2277 | |
2278 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, | |
2279 | dnp->dn_nlevels - 1, j); | |
2280 | dsl_scan_visitbp(&dnp->dn_blkptr[j], | |
ebcf4936 | 2281 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
2282 | } |
2283 | ||
2284 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
5dbd68a3 | 2285 | zbookmark_phys_t czb; |
428870ff BB |
2286 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, |
2287 | 0, DMU_SPILL_BLKID); | |
50c957f7 | 2288 | dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), |
ebcf4936 | 2289 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
2290 | } |
2291 | } | |
2292 | ||
2293 | /* | |
2294 | * The arguments are in this order because mdb can only print the | |
2295 | * first 5; we want them to be useful. | |
2296 | */ | |
2297 | static void | |
5dbd68a3 | 2298 | dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
ebcf4936 MA |
2299 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, |
2300 | dmu_objset_type_t ostype, dmu_tx_t *tx) | |
428870ff BB |
2301 | { |
2302 | dsl_pool_t *dp = scn->scn_dp; | |
d4a72f23 | 2303 | blkptr_t *bp_toread = NULL; |
428870ff | 2304 | |
0ea05c64 | 2305 | if (dsl_scan_check_suspend(scn, zb)) |
d4a72f23 | 2306 | return; |
428870ff BB |
2307 | |
2308 | if (dsl_scan_check_resume(scn, dnp, zb)) | |
d4a72f23 | 2309 | return; |
428870ff BB |
2310 | |
2311 | scn->scn_visited_this_txg++; | |
2312 | ||
d4a72f23 TC |
2313 | if (BP_IS_HOLE(bp)) { |
2314 | scn->scn_holes_this_txg++; | |
2315 | return; | |
2316 | } | |
2317 | ||
30af21b0 PD |
2318 | if (BP_IS_REDACTED(bp)) { |
2319 | ASSERT(dsl_dataset_feature_is_active(ds, | |
2320 | SPA_FEATURE_REDACTED_DATASETS)); | |
2321 | return; | |
2322 | } | |
2323 | ||
34ce4c42 GA |
2324 | /* |
2325 | * Check if this block contradicts any filesystem flags. | |
2326 | */ | |
2327 | spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS; | |
2328 | if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) | |
2329 | ASSERT(dsl_dataset_feature_is_active(ds, f)); | |
2330 | ||
2331 | f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp)); | |
2332 | if (f != SPA_FEATURE_NONE) | |
2333 | ASSERT(dsl_dataset_feature_is_active(ds, f)); | |
2334 | ||
2335 | f = zio_compress_to_feature(BP_GET_COMPRESS(bp)); | |
2336 | if (f != SPA_FEATURE_NONE) | |
2337 | ASSERT(dsl_dataset_feature_is_active(ds, f)); | |
2338 | ||
2339 | if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { | |
2340 | scn->scn_lt_min_this_txg++; | |
2341 | return; | |
2342 | } | |
2343 | ||
d4a72f23 TC |
2344 | bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); |
2345 | *bp_toread = *bp; | |
428870ff | 2346 | |
ebcf4936 | 2347 | if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) |
161ce7ce | 2348 | goto out; |
428870ff BB |
2349 | |
2350 | /* | |
4e33ba4c | 2351 | * If dsl_scan_ddt() has already visited this block, it will have |
428870ff BB |
2352 | * already done any translations or scrubbing, so don't call the |
2353 | * callback again. | |
2354 | */ | |
2355 | if (ddt_class_contains(dp->dp_spa, | |
2356 | scn->scn_phys.scn_ddt_class_max, bp)) { | |
d4a72f23 | 2357 | scn->scn_ddt_contained_this_txg++; |
161ce7ce | 2358 | goto out; |
428870ff BB |
2359 | } |
2360 | ||
2361 | /* | |
2362 | * If this block is from the future (after cur_max_txg), then we | |
2363 | * are doing this on behalf of a deleted snapshot, and we will | |
2364 | * revisit the future block on the next pass of this dataset. | |
2365 | * Don't scan it now unless we need to because something | |
2366 | * under it was modified. | |
2367 | */ | |
d4a72f23 TC |
2368 | if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { |
2369 | scn->scn_gt_max_this_txg++; | |
2370 | goto out; | |
428870ff | 2371 | } |
d4a72f23 TC |
2372 | |
2373 | scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); | |
2374 | ||
161ce7ce | 2375 | out: |
d1d7e268 | 2376 | kmem_free(bp_toread, sizeof (blkptr_t)); |
428870ff BB |
2377 | } |
2378 | ||
2379 | static void | |
2380 | dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, | |
2381 | dmu_tx_t *tx) | |
2382 | { | |
5dbd68a3 | 2383 | zbookmark_phys_t zb; |
d4a72f23 | 2384 | scan_prefetch_ctx_t *spc; |
428870ff BB |
2385 | |
2386 | SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, | |
2387 | ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); | |
d4a72f23 TC |
2388 | |
2389 | if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { | |
2390 | SET_BOOKMARK(&scn->scn_prefetch_bookmark, | |
2391 | zb.zb_objset, 0, 0, 0); | |
2392 | } else { | |
2393 | scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; | |
2394 | } | |
2395 | ||
2396 | scn->scn_objsets_visited_this_txg++; | |
2397 | ||
2398 | spc = scan_prefetch_ctx_create(scn, NULL, FTAG); | |
2399 | dsl_scan_prefetch(spc, bp, &zb); | |
2400 | scan_prefetch_ctx_rele(spc, FTAG); | |
2401 | ||
2402 | dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); | |
428870ff BB |
2403 | |
2404 | dprintf_ds(ds, "finished scan%s", ""); | |
2405 | } | |
2406 | ||
d4a72f23 TC |
2407 | static void |
2408 | ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) | |
428870ff | 2409 | { |
d4a72f23 | 2410 | if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { |
0c66c32d | 2411 | if (ds->ds_is_snapshot) { |
b77222c8 MA |
2412 | /* |
2413 | * Note: | |
2414 | * - scn_cur_{min,max}_txg stays the same. | |
2415 | * - Setting the flag is not really necessary if | |
2416 | * scn_cur_max_txg == scn_max_txg, because there | |
2417 | * is nothing after this snapshot that we care | |
2418 | * about. However, we set it anyway and then | |
2419 | * ignore it when we retraverse it in | |
2420 | * dsl_scan_visitds(). | |
2421 | */ | |
d4a72f23 | 2422 | scn_phys->scn_bookmark.zb_objset = |
d683ddbb | 2423 | dsl_dataset_phys(ds)->ds_next_snap_obj; |
6f57f1e3 RE |
2424 | zfs_dbgmsg("destroying ds %llu on %s; currently " |
2425 | "traversing; reset zb_objset to %llu", | |
428870ff | 2426 | (u_longlong_t)ds->ds_object, |
6f57f1e3 | 2427 | ds->ds_dir->dd_pool->dp_spa->spa_name, |
d683ddbb JG |
2428 | (u_longlong_t)dsl_dataset_phys(ds)-> |
2429 | ds_next_snap_obj); | |
d4a72f23 | 2430 | scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; |
428870ff | 2431 | } else { |
d4a72f23 | 2432 | SET_BOOKMARK(&scn_phys->scn_bookmark, |
428870ff | 2433 | ZB_DESTROYED_OBJSET, 0, 0, 0); |
6f57f1e3 RE |
2434 | zfs_dbgmsg("destroying ds %llu on %s; currently " |
2435 | "traversing; reset bookmark to -1,0,0,0", | |
2436 | (u_longlong_t)ds->ds_object, | |
2437 | ds->ds_dir->dd_pool->dp_spa->spa_name); | |
428870ff | 2438 | } |
d4a72f23 TC |
2439 | } |
2440 | } | |
2441 | ||
2442 | /* | |
2443 | * Invoked when a dataset is destroyed. We need to make sure that: | |
2444 | * | |
2445 | * 1) If it is the dataset that was currently being scanned, we write | |
2446 | * a new dsl_scan_phys_t and marking the objset reference in it | |
2447 | * as destroyed. | |
2448 | * 2) Remove it from the work queue, if it was present. | |
2449 | * | |
2450 | * If the dataset was actually a snapshot, instead of marking the dataset | |
2451 | * as destroyed, we instead substitute the next snapshot in line. | |
2452 | */ | |
2453 | void | |
2454 | dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) | |
2455 | { | |
2456 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
2457 | dsl_scan_t *scn = dp->dp_scan; | |
2458 | uint64_t mintxg; | |
2459 | ||
2460 | if (!dsl_scan_is_running(scn)) | |
2461 | return; | |
2462 | ||
2463 | ds_destroyed_scn_phys(ds, &scn->scn_phys); | |
2464 | ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); | |
2465 | ||
2466 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
2467 | scan_ds_queue_remove(scn, ds->ds_object); | |
2468 | if (ds->ds_is_snapshot) | |
2469 | scan_ds_queue_insert(scn, | |
2470 | dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); | |
2471 | } | |
2472 | ||
2473 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2474 | ds->ds_object, &mintxg) == 0) { | |
d683ddbb | 2475 | ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); |
428870ff BB |
2476 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2477 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
0c66c32d | 2478 | if (ds->ds_is_snapshot) { |
428870ff BB |
2479 | /* |
2480 | * We keep the same mintxg; it could be > | |
2481 | * ds_creation_txg if the previous snapshot was | |
2482 | * deleted too. | |
2483 | */ | |
2484 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
2485 | scn->scn_phys.scn_queue_obj, | |
d683ddbb JG |
2486 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
2487 | mintxg, tx) == 0); | |
6f57f1e3 | 2488 | zfs_dbgmsg("destroying ds %llu on %s; in queue; " |
428870ff BB |
2489 | "replacing with %llu", |
2490 | (u_longlong_t)ds->ds_object, | |
6f57f1e3 | 2491 | dp->dp_spa->spa_name, |
d683ddbb JG |
2492 | (u_longlong_t)dsl_dataset_phys(ds)-> |
2493 | ds_next_snap_obj); | |
428870ff | 2494 | } else { |
6f57f1e3 RE |
2495 | zfs_dbgmsg("destroying ds %llu on %s; in queue; " |
2496 | "removing", | |
2497 | (u_longlong_t)ds->ds_object, | |
2498 | dp->dp_spa->spa_name); | |
428870ff | 2499 | } |
428870ff BB |
2500 | } |
2501 | ||
2502 | /* | |
2503 | * dsl_scan_sync() should be called after this, and should sync | |
2504 | * out our changed state, but just to be safe, do it here. | |
2505 | */ | |
d4a72f23 TC |
2506 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
2507 | } | |
2508 | ||
2509 | static void | |
2510 | ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) | |
2511 | { | |
2512 | if (scn_bookmark->zb_objset == ds->ds_object) { | |
2513 | scn_bookmark->zb_objset = | |
2514 | dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
6f57f1e3 | 2515 | zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; " |
d4a72f23 TC |
2516 | "reset zb_objset to %llu", |
2517 | (u_longlong_t)ds->ds_object, | |
6f57f1e3 | 2518 | ds->ds_dir->dd_pool->dp_spa->spa_name, |
d4a72f23 TC |
2519 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); |
2520 | } | |
428870ff BB |
2521 | } |
2522 | ||
d4a72f23 TC |
2523 | /* |
2524 | * Called when a dataset is snapshotted. If we were currently traversing | |
2525 | * this snapshot, we reset our bookmark to point at the newly created | |
2526 | * snapshot. We also modify our work queue to remove the old snapshot and | |
2527 | * replace with the new one. | |
2528 | */ | |
428870ff BB |
2529 | void |
2530 | dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) | |
2531 | { | |
2532 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
2533 | dsl_scan_t *scn = dp->dp_scan; | |
2534 | uint64_t mintxg; | |
2535 | ||
d4a72f23 | 2536 | if (!dsl_scan_is_running(scn)) |
428870ff BB |
2537 | return; |
2538 | ||
d683ddbb | 2539 | ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); |
428870ff | 2540 | |
d4a72f23 TC |
2541 | ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); |
2542 | ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); | |
2543 | ||
2544 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
2545 | scan_ds_queue_remove(scn, ds->ds_object); | |
2546 | scan_ds_queue_insert(scn, | |
2547 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); | |
2548 | } | |
2549 | ||
2550 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2551 | ds->ds_object, &mintxg) == 0) { | |
428870ff BB |
2552 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2553 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
2554 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
2555 | scn->scn_phys.scn_queue_obj, | |
d683ddbb | 2556 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); |
6f57f1e3 | 2557 | zfs_dbgmsg("snapshotting ds %llu on %s; in queue; " |
428870ff BB |
2558 | "replacing with %llu", |
2559 | (u_longlong_t)ds->ds_object, | |
6f57f1e3 | 2560 | dp->dp_spa->spa_name, |
d683ddbb | 2561 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); |
428870ff | 2562 | } |
d4a72f23 TC |
2563 | |
2564 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); | |
428870ff BB |
2565 | } |
2566 | ||
d4a72f23 TC |
2567 | static void |
2568 | ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, | |
2569 | zbookmark_phys_t *scn_bookmark) | |
428870ff | 2570 | { |
d4a72f23 TC |
2571 | if (scn_bookmark->zb_objset == ds1->ds_object) { |
2572 | scn_bookmark->zb_objset = ds2->ds_object; | |
6f57f1e3 | 2573 | zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " |
428870ff BB |
2574 | "reset zb_objset to %llu", |
2575 | (u_longlong_t)ds1->ds_object, | |
6f57f1e3 | 2576 | ds1->ds_dir->dd_pool->dp_spa->spa_name, |
428870ff | 2577 | (u_longlong_t)ds2->ds_object); |
d4a72f23 TC |
2578 | } else if (scn_bookmark->zb_objset == ds2->ds_object) { |
2579 | scn_bookmark->zb_objset = ds1->ds_object; | |
6f57f1e3 | 2580 | zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " |
428870ff BB |
2581 | "reset zb_objset to %llu", |
2582 | (u_longlong_t)ds2->ds_object, | |
6f57f1e3 | 2583 | ds2->ds_dir->dd_pool->dp_spa->spa_name, |
428870ff BB |
2584 | (u_longlong_t)ds1->ds_object); |
2585 | } | |
d4a72f23 TC |
2586 | } |
2587 | ||
2588 | /* | |
dd262c96 | 2589 | * Called when an origin dataset and its clone are swapped. If we were |
d4a72f23 | 2590 | * currently traversing the dataset, we need to switch to traversing the |
dd262c96 | 2591 | * newly promoted clone. |
d4a72f23 TC |
2592 | */ |
2593 | void | |
2594 | dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) | |
2595 | { | |
2596 | dsl_pool_t *dp = ds1->ds_dir->dd_pool; | |
2597 | dsl_scan_t *scn = dp->dp_scan; | |
dd262c96 AG |
2598 | uint64_t mintxg1, mintxg2; |
2599 | boolean_t ds1_queued, ds2_queued; | |
d4a72f23 TC |
2600 | |
2601 | if (!dsl_scan_is_running(scn)) | |
2602 | return; | |
2603 | ||
2604 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); | |
2605 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); | |
2606 | ||
dd262c96 AG |
2607 | /* |
2608 | * Handle the in-memory scan queue. | |
2609 | */ | |
2610 | ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1); | |
2611 | ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2); | |
2612 | ||
2613 | /* Sanity checking. */ | |
2614 | if (ds1_queued) { | |
2615 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2616 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
2617 | } | |
2618 | if (ds2_queued) { | |
2619 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2620 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
d4a72f23 | 2621 | } |
dd262c96 AG |
2622 | |
2623 | if (ds1_queued && ds2_queued) { | |
2624 | /* | |
2625 | * If both are queued, we don't need to do anything. | |
2626 | * The swapping code below would not handle this case correctly, | |
2627 | * since we can't insert ds2 if it is already there. That's | |
2628 | * because scan_ds_queue_insert() prohibits a duplicate insert | |
2629 | * and panics. | |
2630 | */ | |
2631 | } else if (ds1_queued) { | |
2632 | scan_ds_queue_remove(scn, ds1->ds_object); | |
2633 | scan_ds_queue_insert(scn, ds2->ds_object, mintxg1); | |
2634 | } else if (ds2_queued) { | |
d4a72f23 | 2635 | scan_ds_queue_remove(scn, ds2->ds_object); |
dd262c96 | 2636 | scan_ds_queue_insert(scn, ds1->ds_object, mintxg2); |
d4a72f23 | 2637 | } |
428870ff | 2638 | |
dd262c96 AG |
2639 | /* |
2640 | * Handle the on-disk scan queue. | |
2641 | * The on-disk state is an out-of-date version of the in-memory state, | |
2642 | * so the in-memory and on-disk values for ds1_queued and ds2_queued may | |
2643 | * be different. Therefore we need to apply the swap logic to the | |
2644 | * on-disk state independently of the in-memory state. | |
2645 | */ | |
2646 | ds1_queued = zap_lookup_int_key(dp->dp_meta_objset, | |
2647 | scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0; | |
2648 | ds2_queued = zap_lookup_int_key(dp->dp_meta_objset, | |
2649 | scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0; | |
2650 | ||
2651 | /* Sanity checking. */ | |
2652 | if (ds1_queued) { | |
2653 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2654 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
2655 | } | |
2656 | if (ds2_queued) { | |
2657 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2658 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
2659 | } | |
2660 | ||
2661 | if (ds1_queued && ds2_queued) { | |
2662 | /* | |
2663 | * If both are queued, we don't need to do anything. | |
2664 | * Alternatively, we could check for EEXIST from | |
2665 | * zap_add_int_key() and back out to the original state, but | |
2666 | * that would be more work than checking for this case upfront. | |
2667 | */ | |
2668 | } else if (ds1_queued) { | |
2669 | VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, | |
428870ff | 2670 | scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); |
dd262c96 AG |
2671 | VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, |
2672 | scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx)); | |
6f57f1e3 | 2673 | zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " |
428870ff BB |
2674 | "replacing with %llu", |
2675 | (u_longlong_t)ds1->ds_object, | |
6f57f1e3 | 2676 | dp->dp_spa->spa_name, |
428870ff | 2677 | (u_longlong_t)ds2->ds_object); |
dd262c96 AG |
2678 | } else if (ds2_queued) { |
2679 | VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, | |
428870ff | 2680 | scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); |
dd262c96 AG |
2681 | VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, |
2682 | scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx)); | |
6f57f1e3 | 2683 | zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " |
428870ff BB |
2684 | "replacing with %llu", |
2685 | (u_longlong_t)ds2->ds_object, | |
6f57f1e3 | 2686 | dp->dp_spa->spa_name, |
428870ff BB |
2687 | (u_longlong_t)ds1->ds_object); |
2688 | } | |
2689 | ||
d4a72f23 | 2690 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
428870ff BB |
2691 | } |
2692 | ||
428870ff | 2693 | static int |
13fe0198 | 2694 | enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2695 | { |
d4a72f23 | 2696 | uint64_t originobj = *(uint64_t *)arg; |
428870ff BB |
2697 | dsl_dataset_t *ds; |
2698 | int err; | |
428870ff BB |
2699 | dsl_scan_t *scn = dp->dp_scan; |
2700 | ||
d4a72f23 | 2701 | if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) |
13fe0198 MA |
2702 | return (0); |
2703 | ||
2704 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); | |
428870ff BB |
2705 | if (err) |
2706 | return (err); | |
2707 | ||
d4a72f23 | 2708 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { |
13fe0198 MA |
2709 | dsl_dataset_t *prev; |
2710 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 2711 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); |
428870ff | 2712 | |
13fe0198 MA |
2713 | dsl_dataset_rele(ds, FTAG); |
2714 | if (err) | |
2715 | return (err); | |
2716 | ds = prev; | |
428870ff | 2717 | } |
d4a72f23 TC |
2718 | scan_ds_queue_insert(scn, ds->ds_object, |
2719 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2720 | dsl_dataset_rele(ds, FTAG); |
2721 | return (0); | |
2722 | } | |
2723 | ||
2724 | static void | |
2725 | dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) | |
2726 | { | |
2727 | dsl_pool_t *dp = scn->scn_dp; | |
2728 | dsl_dataset_t *ds; | |
2729 | ||
2730 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); | |
2731 | ||
b77222c8 MA |
2732 | if (scn->scn_phys.scn_cur_min_txg >= |
2733 | scn->scn_phys.scn_max_txg) { | |
2734 | /* | |
2735 | * This can happen if this snapshot was created after the | |
2736 | * scan started, and we already completed a previous snapshot | |
2737 | * that was created after the scan started. This snapshot | |
2738 | * only references blocks with: | |
2739 | * | |
2740 | * birth < our ds_creation_txg | |
2741 | * cur_min_txg is no less than ds_creation_txg. | |
2742 | * We have already visited these blocks. | |
2743 | * or | |
2744 | * birth > scn_max_txg | |
2745 | * The scan requested not to visit these blocks. | |
2746 | * | |
2747 | * Subsequent snapshots (and clones) can reference our | |
2748 | * blocks, or blocks with even higher birth times. | |
2749 | * Therefore we do not need to visit them either, | |
2750 | * so we do not add them to the work queue. | |
2751 | * | |
2752 | * Note that checking for cur_min_txg >= cur_max_txg | |
2753 | * is not sufficient, because in that case we may need to | |
2754 | * visit subsequent snapshots. This happens when min_txg > 0, | |
2755 | * which raises cur_min_txg. In this case we will visit | |
2756 | * this dataset but skip all of its blocks, because the | |
2757 | * rootbp's birth time is < cur_min_txg. Then we will | |
2758 | * add the next snapshots/clones to the work queue. | |
2759 | */ | |
eca7b760 | 2760 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
b77222c8 MA |
2761 | dsl_dataset_name(ds, dsname); |
2762 | zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " | |
2763 | "cur_min_txg (%llu) >= max_txg (%llu)", | |
d4a72f23 TC |
2764 | (longlong_t)dsobj, dsname, |
2765 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2766 | (longlong_t)scn->scn_phys.scn_max_txg); | |
b77222c8 MA |
2767 | kmem_free(dsname, MAXNAMELEN); |
2768 | ||
2769 | goto out; | |
2770 | } | |
2771 | ||
572e2857 | 2772 | /* |
a1d477c2 | 2773 | * Only the ZIL in the head (non-snapshot) is valid. Even though |
572e2857 | 2774 | * snapshots can have ZIL block pointers (which may be the same |
a1d477c2 MA |
2775 | * BP as in the head), they must be ignored. In addition, $ORIGIN |
2776 | * doesn't have a objset (i.e. its ds_bp is a hole) so we don't | |
2777 | * need to look for a ZIL in it either. So we traverse the ZIL here, | |
2778 | * rather than in scan_recurse(), because the regular snapshot | |
2779 | * block-sharing rules don't apply to it. | |
572e2857 | 2780 | */ |
a1d477c2 | 2781 | if (!dsl_dataset_is_snapshot(ds) && |
5e097c67 MA |
2782 | (dp->dp_origin_snap == NULL || |
2783 | ds->ds_dir != dp->dp_origin_snap->ds_dir)) { | |
a1d477c2 MA |
2784 | objset_t *os; |
2785 | if (dmu_objset_from_ds(ds, &os) != 0) { | |
2786 | goto out; | |
2787 | } | |
572e2857 | 2788 | dsl_scan_zil(dp, &os->os_zil_header); |
a1d477c2 | 2789 | } |
572e2857 | 2790 | |
428870ff BB |
2791 | /* |
2792 | * Iterate over the bps in this ds. | |
2793 | */ | |
2794 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
cc9bb3e5 | 2795 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 2796 | dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); |
cc9bb3e5 | 2797 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
428870ff | 2798 | |
1c27024e | 2799 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
428870ff BB |
2800 | dsl_dataset_name(ds, dsname); |
2801 | zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " | |
0ea05c64 | 2802 | "suspending=%u", |
428870ff BB |
2803 | (longlong_t)dsobj, dsname, |
2804 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2805 | (longlong_t)scn->scn_phys.scn_cur_max_txg, | |
0ea05c64 | 2806 | (int)scn->scn_suspending); |
eca7b760 | 2807 | kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); |
428870ff | 2808 | |
0ea05c64 | 2809 | if (scn->scn_suspending) |
428870ff BB |
2810 | goto out; |
2811 | ||
2812 | /* | |
2813 | * We've finished this pass over this dataset. | |
2814 | */ | |
2815 | ||
2816 | /* | |
2817 | * If we did not completely visit this dataset, do another pass. | |
2818 | */ | |
2819 | if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { | |
6f57f1e3 RE |
2820 | zfs_dbgmsg("incomplete pass on %s; visiting again", |
2821 | dp->dp_spa->spa_name); | |
428870ff | 2822 | scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; |
d4a72f23 TC |
2823 | scan_ds_queue_insert(scn, ds->ds_object, |
2824 | scn->scn_phys.scn_cur_max_txg); | |
428870ff BB |
2825 | goto out; |
2826 | } | |
2827 | ||
2828 | /* | |
13a2ff27 | 2829 | * Add descendant datasets to work queue. |
428870ff | 2830 | */ |
d683ddbb | 2831 | if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { |
d4a72f23 | 2832 | scan_ds_queue_insert(scn, |
d683ddbb | 2833 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
d4a72f23 | 2834 | dsl_dataset_phys(ds)->ds_creation_txg); |
428870ff | 2835 | } |
d683ddbb | 2836 | if (dsl_dataset_phys(ds)->ds_num_children > 1) { |
428870ff | 2837 | boolean_t usenext = B_FALSE; |
d683ddbb | 2838 | if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { |
428870ff BB |
2839 | uint64_t count; |
2840 | /* | |
2841 | * A bug in a previous version of the code could | |
2842 | * cause upgrade_clones_cb() to not set | |
2843 | * ds_next_snap_obj when it should, leading to a | |
2844 | * missing entry. Therefore we can only use the | |
2845 | * next_clones_obj when its count is correct. | |
2846 | */ | |
2847 | int err = zap_count(dp->dp_meta_objset, | |
d683ddbb | 2848 | dsl_dataset_phys(ds)->ds_next_clones_obj, &count); |
428870ff | 2849 | if (err == 0 && |
d683ddbb | 2850 | count == dsl_dataset_phys(ds)->ds_num_children - 1) |
428870ff BB |
2851 | usenext = B_TRUE; |
2852 | } | |
2853 | ||
2854 | if (usenext) { | |
d4a72f23 TC |
2855 | zap_cursor_t zc; |
2856 | zap_attribute_t za; | |
2857 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
2858 | dsl_dataset_phys(ds)->ds_next_clones_obj); | |
2859 | zap_cursor_retrieve(&zc, &za) == 0; | |
2860 | (void) zap_cursor_advance(&zc)) { | |
2861 | scan_ds_queue_insert(scn, | |
2862 | zfs_strtonum(za.za_name, NULL), | |
2863 | dsl_dataset_phys(ds)->ds_creation_txg); | |
2864 | } | |
2865 | zap_cursor_fini(&zc); | |
428870ff | 2866 | } else { |
13fe0198 | 2867 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 TC |
2868 | enqueue_clones_cb, &ds->ds_object, |
2869 | DS_FIND_CHILDREN)); | |
428870ff BB |
2870 | } |
2871 | } | |
2872 | ||
2873 | out: | |
2874 | dsl_dataset_rele(ds, FTAG); | |
2875 | } | |
2876 | ||
428870ff | 2877 | static int |
13fe0198 | 2878 | enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2879 | { |
14e4e3cb | 2880 | (void) arg; |
428870ff BB |
2881 | dsl_dataset_t *ds; |
2882 | int err; | |
428870ff BB |
2883 | dsl_scan_t *scn = dp->dp_scan; |
2884 | ||
13fe0198 | 2885 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
428870ff BB |
2886 | if (err) |
2887 | return (err); | |
2888 | ||
d683ddbb | 2889 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
428870ff | 2890 | dsl_dataset_t *prev; |
d683ddbb JG |
2891 | err = dsl_dataset_hold_obj(dp, |
2892 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
428870ff BB |
2893 | if (err) { |
2894 | dsl_dataset_rele(ds, FTAG); | |
2895 | return (err); | |
2896 | } | |
2897 | ||
2898 | /* | |
2899 | * If this is a clone, we don't need to worry about it for now. | |
2900 | */ | |
d683ddbb | 2901 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { |
428870ff BB |
2902 | dsl_dataset_rele(ds, FTAG); |
2903 | dsl_dataset_rele(prev, FTAG); | |
2904 | return (0); | |
2905 | } | |
2906 | dsl_dataset_rele(ds, FTAG); | |
2907 | ds = prev; | |
2908 | } | |
2909 | ||
d4a72f23 TC |
2910 | scan_ds_queue_insert(scn, ds->ds_object, |
2911 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2912 | dsl_dataset_rele(ds, FTAG); |
2913 | return (0); | |
2914 | } | |
2915 | ||
d4a72f23 TC |
2916 | void |
2917 | dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, | |
2918 | ddt_entry_t *dde, dmu_tx_t *tx) | |
2919 | { | |
14e4e3cb | 2920 | (void) tx; |
d4a72f23 TC |
2921 | const ddt_key_t *ddk = &dde->dde_key; |
2922 | ddt_phys_t *ddp = dde->dde_phys; | |
2923 | blkptr_t bp; | |
2924 | zbookmark_phys_t zb = { 0 }; | |
d4a72f23 | 2925 | |
f90a30ad | 2926 | if (!dsl_scan_is_running(scn)) |
d4a72f23 TC |
2927 | return; |
2928 | ||
5e0bd0ae TC |
2929 | /* |
2930 | * This function is special because it is the only thing | |
2931 | * that can add scan_io_t's to the vdev scan queues from | |
2932 | * outside dsl_scan_sync(). For the most part this is ok | |
2933 | * as long as it is called from within syncing context. | |
2934 | * However, dsl_scan_sync() expects that no new sio's will | |
2935 | * be added between when all the work for a scan is done | |
2936 | * and the next txg when the scan is actually marked as | |
2937 | * completed. This check ensures we do not issue new sio's | |
2938 | * during this period. | |
2939 | */ | |
2940 | if (scn->scn_done_txg != 0) | |
2941 | return; | |
2942 | ||
14e4e3cb | 2943 | for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { |
d4a72f23 TC |
2944 | if (ddp->ddp_phys_birth == 0 || |
2945 | ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) | |
2946 | continue; | |
2947 | ddt_bp_create(checksum, ddk, ddp, &bp); | |
2948 | ||
2949 | scn->scn_visited_this_txg++; | |
2950 | scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); | |
2951 | } | |
2952 | } | |
2953 | ||
428870ff BB |
2954 | /* |
2955 | * Scrub/dedup interaction. | |
2956 | * | |
2957 | * If there are N references to a deduped block, we don't want to scrub it | |
2958 | * N times -- ideally, we should scrub it exactly once. | |
2959 | * | |
2960 | * We leverage the fact that the dde's replication class (enum ddt_class) | |
2961 | * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest | |
2962 | * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. | |
2963 | * | |
2964 | * To prevent excess scrubbing, the scrub begins by walking the DDT | |
2965 | * to find all blocks with refcnt > 1, and scrubs each of these once. | |
2966 | * Since there are two replication classes which contain blocks with | |
2967 | * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. | |
2968 | * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. | |
2969 | * | |
2970 | * There would be nothing more to say if a block's refcnt couldn't change | |
2971 | * during a scrub, but of course it can so we must account for changes | |
2972 | * in a block's replication class. | |
2973 | * | |
2974 | * Here's an example of what can occur: | |
2975 | * | |
2976 | * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 | |
2977 | * when visited during the top-down scrub phase, it will be scrubbed twice. | |
2978 | * This negates our scrub optimization, but is otherwise harmless. | |
2979 | * | |
2980 | * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 | |
2981 | * on each visit during the top-down scrub phase, it will never be scrubbed. | |
2982 | * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's | |
2983 | * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to | |
2984 | * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 | |
2985 | * while a scrub is in progress, it scrubs the block right then. | |
2986 | */ | |
2987 | static void | |
2988 | dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) | |
2989 | { | |
2990 | ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; | |
861166b0 | 2991 | ddt_entry_t dde = {{{{0}}}}; |
428870ff BB |
2992 | int error; |
2993 | uint64_t n = 0; | |
2994 | ||
2995 | while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { | |
2996 | ddt_t *ddt; | |
2997 | ||
2998 | if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) | |
2999 | break; | |
3000 | dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", | |
3001 | (longlong_t)ddb->ddb_class, | |
3002 | (longlong_t)ddb->ddb_type, | |
3003 | (longlong_t)ddb->ddb_checksum, | |
3004 | (longlong_t)ddb->ddb_cursor); | |
3005 | ||
3006 | /* There should be no pending changes to the dedup table */ | |
3007 | ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; | |
3008 | ASSERT(avl_first(&ddt->ddt_tree) == NULL); | |
3009 | ||
3010 | dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); | |
3011 | n++; | |
3012 | ||
0ea05c64 | 3013 | if (dsl_scan_check_suspend(scn, NULL)) |
428870ff BB |
3014 | break; |
3015 | } | |
3016 | ||
6f57f1e3 RE |
3017 | zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; " |
3018 | "suspending=%u", (longlong_t)n, scn->scn_dp->dp_spa->spa_name, | |
0ea05c64 | 3019 | (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); |
428870ff BB |
3020 | |
3021 | ASSERT(error == 0 || error == ENOENT); | |
3022 | ASSERT(error != ENOENT || | |
3023 | ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); | |
3024 | } | |
3025 | ||
d4a72f23 TC |
3026 | static uint64_t |
3027 | dsl_scan_ds_maxtxg(dsl_dataset_t *ds) | |
428870ff | 3028 | { |
d4a72f23 TC |
3029 | uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; |
3030 | if (ds->ds_is_snapshot) | |
3031 | return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); | |
3032 | return (smt); | |
428870ff BB |
3033 | } |
3034 | ||
3035 | static void | |
3036 | dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) | |
3037 | { | |
d4a72f23 | 3038 | scan_ds_t *sds; |
428870ff | 3039 | dsl_pool_t *dp = scn->scn_dp; |
428870ff BB |
3040 | |
3041 | if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= | |
3042 | scn->scn_phys.scn_ddt_class_max) { | |
3043 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
3044 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
3045 | dsl_scan_ddt(scn, tx); | |
0ea05c64 | 3046 | if (scn->scn_suspending) |
428870ff BB |
3047 | return; |
3048 | } | |
3049 | ||
3050 | if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { | |
3051 | /* First do the MOS & ORIGIN */ | |
3052 | ||
3053 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
3054 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
3055 | dsl_scan_visit_rootbp(scn, NULL, | |
3056 | &dp->dp_meta_rootbp, tx); | |
3057 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
0ea05c64 | 3058 | if (scn->scn_suspending) |
428870ff BB |
3059 | return; |
3060 | ||
3061 | if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { | |
13fe0198 | 3062 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 | 3063 | enqueue_cb, NULL, DS_FIND_CHILDREN)); |
428870ff BB |
3064 | } else { |
3065 | dsl_scan_visitds(scn, | |
3066 | dp->dp_origin_snap->ds_object, tx); | |
3067 | } | |
0ea05c64 | 3068 | ASSERT(!scn->scn_suspending); |
428870ff BB |
3069 | } else if (scn->scn_phys.scn_bookmark.zb_objset != |
3070 | ZB_DESTROYED_OBJSET) { | |
d4a72f23 | 3071 | uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; |
428870ff | 3072 | /* |
d4a72f23 | 3073 | * If we were suspended, continue from here. Note if the |
0ea05c64 | 3074 | * ds we were suspended on was deleted, the zb_objset may |
428870ff BB |
3075 | * be -1, so we will skip this and find a new objset |
3076 | * below. | |
3077 | */ | |
d4a72f23 | 3078 | dsl_scan_visitds(scn, dsobj, tx); |
0ea05c64 | 3079 | if (scn->scn_suspending) |
428870ff BB |
3080 | return; |
3081 | } | |
3082 | ||
3083 | /* | |
d4a72f23 | 3084 | * In case we suspended right at the end of the ds, zero the |
428870ff BB |
3085 | * bookmark so we don't think that we're still trying to resume. |
3086 | */ | |
861166b0 | 3087 | memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t)); |
428870ff | 3088 | |
d4a72f23 TC |
3089 | /* |
3090 | * Keep pulling things out of the dataset avl queue. Updates to the | |
3091 | * persistent zap-object-as-queue happen only at checkpoints. | |
3092 | */ | |
3093 | while ((sds = avl_first(&scn->scn_queue)) != NULL) { | |
428870ff | 3094 | dsl_dataset_t *ds; |
d4a72f23 TC |
3095 | uint64_t dsobj = sds->sds_dsobj; |
3096 | uint64_t txg = sds->sds_txg; | |
428870ff | 3097 | |
d4a72f23 TC |
3098 | /* dequeue and free the ds from the queue */ |
3099 | scan_ds_queue_remove(scn, dsobj); | |
3100 | sds = NULL; | |
428870ff | 3101 | |
d4a72f23 | 3102 | /* set up min / max txg */ |
428870ff | 3103 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
d4a72f23 | 3104 | if (txg != 0) { |
428870ff | 3105 | scn->scn_phys.scn_cur_min_txg = |
d4a72f23 | 3106 | MAX(scn->scn_phys.scn_min_txg, txg); |
428870ff BB |
3107 | } else { |
3108 | scn->scn_phys.scn_cur_min_txg = | |
3109 | MAX(scn->scn_phys.scn_min_txg, | |
d683ddbb | 3110 | dsl_dataset_phys(ds)->ds_prev_snap_txg); |
428870ff BB |
3111 | } |
3112 | scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); | |
3113 | dsl_dataset_rele(ds, FTAG); | |
3114 | ||
3115 | dsl_scan_visitds(scn, dsobj, tx); | |
0ea05c64 | 3116 | if (scn->scn_suspending) |
d4a72f23 | 3117 | return; |
428870ff | 3118 | } |
d4a72f23 TC |
3119 | |
3120 | /* No more objsets to fetch, we're done */ | |
3121 | scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; | |
3122 | ASSERT0(scn->scn_suspending); | |
3123 | } | |
3124 | ||
3125 | static uint64_t | |
c0aea7cf | 3126 | dsl_scan_count_data_disks(spa_t *spa) |
d4a72f23 | 3127 | { |
c0aea7cf | 3128 | vdev_t *rvd = spa->spa_root_vdev; |
d4a72f23 TC |
3129 | uint64_t i, leaves = 0; |
3130 | ||
2041d6ee AM |
3131 | for (i = 0; i < rvd->vdev_children; i++) { |
3132 | vdev_t *vd = rvd->vdev_child[i]; | |
3133 | if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache) | |
3134 | continue; | |
3135 | leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd); | |
d4a72f23 | 3136 | } |
d4a72f23 TC |
3137 | return (leaves); |
3138 | } | |
3139 | ||
3140 | static void | |
3141 | scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) | |
3142 | { | |
3143 | int i; | |
3144 | uint64_t cur_size = 0; | |
3145 | ||
3146 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
3147 | cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); | |
3148 | } | |
3149 | ||
3150 | q->q_total_zio_size_this_txg += cur_size; | |
3151 | q->q_zios_this_txg++; | |
3152 | } | |
3153 | ||
3154 | static void | |
3155 | scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, | |
3156 | uint64_t end) | |
3157 | { | |
3158 | q->q_total_seg_size_this_txg += end - start; | |
3159 | q->q_segs_this_txg++; | |
3160 | } | |
3161 | ||
3162 | static boolean_t | |
3163 | scan_io_queue_check_suspend(dsl_scan_t *scn) | |
3164 | { | |
3165 | /* See comment in dsl_scan_check_suspend() */ | |
3166 | uint64_t curr_time_ns = gethrtime(); | |
3167 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
3168 | uint64_t sync_time_ns = curr_time_ns - | |
3169 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1cd72b9c AM |
3170 | uint64_t dirty_min_bytes = zfs_dirty_data_max * |
3171 | zfs_vdev_async_write_active_min_dirty_percent / 100; | |
fdc2d303 | 3172 | uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? |
d4a72f23 TC |
3173 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; |
3174 | ||
3175 | return ((NSEC2MSEC(scan_time_ns) > mintime && | |
1cd72b9c | 3176 | (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || |
d4a72f23 TC |
3177 | txg_sync_waiting(scn->scn_dp) || |
3178 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
3179 | spa_shutting_down(scn->scn_dp->dp_spa)); | |
3180 | } | |
3181 | ||
3182 | /* | |
13a2ff27 | 3183 | * Given a list of scan_io_t's in io_list, this issues the I/Os out to |
d4a72f23 TC |
3184 | * disk. This consumes the io_list and frees the scan_io_t's. This is |
3185 | * called when emptying queues, either when we're up against the memory | |
3186 | * limit or when we have finished scanning. Returns B_TRUE if we stopped | |
13a2ff27 | 3187 | * processing the list before we finished. Any sios that were not issued |
d4a72f23 TC |
3188 | * will remain in the io_list. |
3189 | */ | |
3190 | static boolean_t | |
3191 | scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) | |
3192 | { | |
3193 | dsl_scan_t *scn = queue->q_scn; | |
3194 | scan_io_t *sio; | |
d4a72f23 TC |
3195 | boolean_t suspended = B_FALSE; |
3196 | ||
3197 | while ((sio = list_head(io_list)) != NULL) { | |
3198 | blkptr_t bp; | |
3199 | ||
3200 | if (scan_io_queue_check_suspend(scn)) { | |
3201 | suspended = B_TRUE; | |
3202 | break; | |
3203 | } | |
3204 | ||
ab7615d9 | 3205 | sio2bp(sio, &bp); |
d4a72f23 TC |
3206 | scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, |
3207 | &sio->sio_zb, queue); | |
3208 | (void) list_remove_head(io_list); | |
3209 | scan_io_queues_update_zio_stats(queue, &bp); | |
ab7615d9 | 3210 | sio_free(sio); |
d4a72f23 | 3211 | } |
d4a72f23 TC |
3212 | return (suspended); |
3213 | } | |
3214 | ||
3215 | /* | |
3216 | * This function removes sios from an IO queue which reside within a given | |
3217 | * range_seg_t and inserts them (in offset order) into a list. Note that | |
3218 | * we only ever return a maximum of 32 sios at once. If there are more sios | |
3219 | * to process within this segment that did not make it onto the list we | |
3220 | * return B_TRUE and otherwise B_FALSE. | |
3221 | */ | |
3222 | static boolean_t | |
3223 | scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) | |
3224 | { | |
ab7615d9 | 3225 | scan_io_t *srch_sio, *sio, *next_sio; |
d4a72f23 TC |
3226 | avl_index_t idx; |
3227 | uint_t num_sios = 0; | |
3228 | int64_t bytes_issued = 0; | |
3229 | ||
3230 | ASSERT(rs != NULL); | |
3231 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3232 | ||
ab7615d9 TC |
3233 | srch_sio = sio_alloc(1); |
3234 | srch_sio->sio_nr_dvas = 1; | |
ca577779 | 3235 | SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr)); |
d4a72f23 TC |
3236 | |
3237 | /* | |
3238 | * The exact start of the extent might not contain any matching zios, | |
3239 | * so if that's the case, examine the next one in the tree. | |
3240 | */ | |
ab7615d9 TC |
3241 | sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); |
3242 | sio_free(srch_sio); | |
3243 | ||
d4a72f23 TC |
3244 | if (sio == NULL) |
3245 | sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); | |
3246 | ||
ca577779 PD |
3247 | while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, |
3248 | queue->q_exts_by_addr) && num_sios <= 32) { | |
3249 | ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs, | |
3250 | queue->q_exts_by_addr)); | |
3251 | ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs, | |
3252 | queue->q_exts_by_addr)); | |
d4a72f23 TC |
3253 | |
3254 | next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); | |
3255 | avl_remove(&queue->q_sios_by_addr, sio); | |
1c0c729a AM |
3256 | if (avl_is_empty(&queue->q_sios_by_addr)) |
3257 | atomic_add_64(&queue->q_scn->scn_queues_pending, -1); | |
ab7615d9 | 3258 | queue->q_sio_memused -= SIO_GET_MUSED(sio); |
d4a72f23 | 3259 | |
ab7615d9 | 3260 | bytes_issued += SIO_GET_ASIZE(sio); |
d4a72f23 TC |
3261 | num_sios++; |
3262 | list_insert_tail(list, sio); | |
3263 | sio = next_sio; | |
3264 | } | |
3265 | ||
3266 | /* | |
3267 | * We limit the number of sios we process at once to 32 to avoid | |
3268 | * biting off more than we can chew. If we didn't take everything | |
3269 | * in the segment we update it to reflect the work we were able to | |
3270 | * complete. Otherwise, we remove it from the range tree entirely. | |
3271 | */ | |
ca577779 PD |
3272 | if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, |
3273 | queue->q_exts_by_addr)) { | |
d4a72f23 TC |
3274 | range_tree_adjust_fill(queue->q_exts_by_addr, rs, |
3275 | -bytes_issued); | |
3276 | range_tree_resize_segment(queue->q_exts_by_addr, rs, | |
ca577779 PD |
3277 | SIO_GET_OFFSET(sio), rs_get_end(rs, |
3278 | queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); | |
1c0c729a | 3279 | queue->q_last_ext_addr = SIO_GET_OFFSET(sio); |
d4a72f23 TC |
3280 | return (B_TRUE); |
3281 | } else { | |
ca577779 PD |
3282 | uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr); |
3283 | uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr); | |
3284 | range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart); | |
1c0c729a | 3285 | queue->q_last_ext_addr = -1; |
d4a72f23 TC |
3286 | return (B_FALSE); |
3287 | } | |
3288 | } | |
3289 | ||
3290 | /* | |
3291 | * This is called from the queue emptying thread and selects the next | |
13a2ff27 | 3292 | * extent from which we are to issue I/Os. The behavior of this function |
d4a72f23 TC |
3293 | * depends on the state of the scan, the current memory consumption and |
3294 | * whether or not we are performing a scan shutdown. | |
3295 | * 1) We select extents in an elevator algorithm (LBA-order) if the scan | |
3296 | * needs to perform a checkpoint | |
3297 | * 2) We select the largest available extent if we are up against the | |
3298 | * memory limit. | |
3299 | * 3) Otherwise we don't select any extents. | |
3300 | */ | |
3301 | static range_seg_t * | |
3302 | scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) | |
3303 | { | |
3304 | dsl_scan_t *scn = queue->q_scn; | |
ca577779 | 3305 | range_tree_t *rt = queue->q_exts_by_addr; |
d4a72f23 TC |
3306 | |
3307 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3308 | ASSERT(scn->scn_is_sorted); | |
3309 | ||
1c0c729a AM |
3310 | if (!scn->scn_checkpointing && !scn->scn_clearing) |
3311 | return (NULL); | |
d4a72f23 TC |
3312 | |
3313 | /* | |
3314 | * During normal clearing, we want to issue our largest segments | |
3315 | * first, keeping IO as sequential as possible, and leaving the | |
3316 | * smaller extents for later with the hope that they might eventually | |
3317 | * grow to larger sequential segments. However, when the scan is | |
3318 | * checkpointing, no new extents will be added to the sorting queue, | |
3319 | * so the way we are sorted now is as good as it will ever get. | |
3320 | * In this case, we instead switch to issuing extents in LBA order. | |
3321 | */ | |
1c0c729a AM |
3322 | if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) || |
3323 | zfs_scan_issue_strategy == 1) | |
ca577779 | 3324 | return (range_tree_first(rt)); |
1c0c729a AM |
3325 | |
3326 | /* | |
3327 | * Try to continue previous extent if it is not completed yet. After | |
3328 | * shrink in scan_io_queue_gather() it may no longer be the best, but | |
3329 | * otherwise we leave shorter remnant every txg. | |
3330 | */ | |
3331 | uint64_t start; | |
e506a0ce | 3332 | uint64_t size = 1ULL << rt->rt_shift; |
1c0c729a AM |
3333 | range_seg_t *addr_rs; |
3334 | if (queue->q_last_ext_addr != -1) { | |
3335 | start = queue->q_last_ext_addr; | |
3336 | addr_rs = range_tree_find(rt, start, size); | |
3337 | if (addr_rs != NULL) | |
3338 | return (addr_rs); | |
d4a72f23 | 3339 | } |
1c0c729a AM |
3340 | |
3341 | /* | |
3342 | * Nothing to continue, so find new best extent. | |
3343 | */ | |
3344 | uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL); | |
3345 | if (v == NULL) | |
3346 | return (NULL); | |
3347 | queue->q_last_ext_addr = start = *v << rt->rt_shift; | |
3348 | ||
3349 | /* | |
3350 | * We need to get the original entry in the by_addr tree so we can | |
3351 | * modify it. | |
3352 | */ | |
3353 | addr_rs = range_tree_find(rt, start, size); | |
3354 | ASSERT3P(addr_rs, !=, NULL); | |
3355 | ASSERT3U(rs_get_start(addr_rs, rt), ==, start); | |
3356 | ASSERT3U(rs_get_end(addr_rs, rt), >, start); | |
3357 | return (addr_rs); | |
d4a72f23 TC |
3358 | } |
3359 | ||
3360 | static void | |
3361 | scan_io_queues_run_one(void *arg) | |
3362 | { | |
3363 | dsl_scan_io_queue_t *queue = arg; | |
3364 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
3365 | boolean_t suspended = B_FALSE; | |
dd867145 AM |
3366 | range_seg_t *rs; |
3367 | scan_io_t *sio; | |
3368 | zio_t *zio; | |
d4a72f23 | 3369 | list_t sio_list; |
d4a72f23 TC |
3370 | |
3371 | ASSERT(queue->q_scn->scn_is_sorted); | |
3372 | ||
3373 | list_create(&sio_list, sizeof (scan_io_t), | |
3374 | offsetof(scan_io_t, sio_nodes.sio_list_node)); | |
dd867145 AM |
3375 | zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa, |
3376 | NULL, NULL, NULL, ZIO_FLAG_CANFAIL); | |
d4a72f23 | 3377 | mutex_enter(q_lock); |
dd867145 | 3378 | queue->q_zio = zio; |
d4a72f23 | 3379 | |
2041d6ee AM |
3380 | /* Calculate maximum in-flight bytes for this vdev. */ |
3381 | queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit * | |
3382 | (vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd))); | |
d4a72f23 TC |
3383 | |
3384 | /* reset per-queue scan statistics for this txg */ | |
3385 | queue->q_total_seg_size_this_txg = 0; | |
3386 | queue->q_segs_this_txg = 0; | |
3387 | queue->q_total_zio_size_this_txg = 0; | |
3388 | queue->q_zios_this_txg = 0; | |
3389 | ||
3390 | /* loop until we run out of time or sios */ | |
3391 | while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) { | |
3392 | uint64_t seg_start = 0, seg_end = 0; | |
1c0c729a | 3393 | boolean_t more_left; |
d4a72f23 TC |
3394 | |
3395 | ASSERT(list_is_empty(&sio_list)); | |
3396 | ||
3397 | /* loop while we still have sios left to process in this rs */ | |
1c0c729a | 3398 | do { |
d4a72f23 TC |
3399 | scan_io_t *first_sio, *last_sio; |
3400 | ||
3401 | /* | |
3402 | * We have selected which extent needs to be | |
3403 | * processed next. Gather up the corresponding sios. | |
3404 | */ | |
3405 | more_left = scan_io_queue_gather(queue, rs, &sio_list); | |
3406 | ASSERT(!list_is_empty(&sio_list)); | |
3407 | first_sio = list_head(&sio_list); | |
3408 | last_sio = list_tail(&sio_list); | |
3409 | ||
ab7615d9 | 3410 | seg_end = SIO_GET_END_OFFSET(last_sio); |
d4a72f23 | 3411 | if (seg_start == 0) |
ab7615d9 | 3412 | seg_start = SIO_GET_OFFSET(first_sio); |
d4a72f23 TC |
3413 | |
3414 | /* | |
3415 | * Issuing sios can take a long time so drop the | |
3416 | * queue lock. The sio queue won't be updated by | |
3417 | * other threads since we're in syncing context so | |
3418 | * we can be sure that our trees will remain exactly | |
3419 | * as we left them. | |
3420 | */ | |
3421 | mutex_exit(q_lock); | |
3422 | suspended = scan_io_queue_issue(queue, &sio_list); | |
3423 | mutex_enter(q_lock); | |
3424 | ||
3425 | if (suspended) | |
3426 | break; | |
1c0c729a | 3427 | } while (more_left); |
d4a72f23 TC |
3428 | |
3429 | /* update statistics for debugging purposes */ | |
3430 | scan_io_queues_update_seg_stats(queue, seg_start, seg_end); | |
3431 | ||
3432 | if (suspended) | |
3433 | break; | |
3434 | } | |
3435 | ||
3436 | /* | |
3437 | * If we were suspended in the middle of processing, | |
3438 | * requeue any unfinished sios and exit. | |
3439 | */ | |
3440 | while ((sio = list_head(&sio_list)) != NULL) { | |
3441 | list_remove(&sio_list, sio); | |
3442 | scan_io_queue_insert_impl(queue, sio); | |
3443 | } | |
3444 | ||
dd867145 | 3445 | queue->q_zio = NULL; |
d4a72f23 | 3446 | mutex_exit(q_lock); |
dd867145 | 3447 | zio_nowait(zio); |
d4a72f23 TC |
3448 | list_destroy(&sio_list); |
3449 | } | |
3450 | ||
3451 | /* | |
3452 | * Performs an emptying run on all scan queues in the pool. This just | |
3453 | * punches out one thread per top-level vdev, each of which processes | |
3454 | * only that vdev's scan queue. We can parallelize the I/O here because | |
13a2ff27 | 3455 | * we know that each queue's I/Os only affect its own top-level vdev. |
d4a72f23 TC |
3456 | * |
3457 | * This function waits for the queue runs to complete, and must be | |
3458 | * called from dsl_scan_sync (or in general, syncing context). | |
3459 | */ | |
3460 | static void | |
3461 | scan_io_queues_run(dsl_scan_t *scn) | |
3462 | { | |
3463 | spa_t *spa = scn->scn_dp->dp_spa; | |
3464 | ||
3465 | ASSERT(scn->scn_is_sorted); | |
3466 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); | |
3467 | ||
1c0c729a | 3468 | if (scn->scn_queues_pending == 0) |
d4a72f23 TC |
3469 | return; |
3470 | ||
3471 | if (scn->scn_taskq == NULL) { | |
3472 | int nthreads = spa->spa_root_vdev->vdev_children; | |
3473 | ||
3474 | /* | |
3475 | * We need to make this taskq *always* execute as many | |
3476 | * threads in parallel as we have top-level vdevs and no | |
3477 | * less, otherwise strange serialization of the calls to | |
3478 | * scan_io_queues_run_one can occur during spa_sync runs | |
3479 | * and that significantly impacts performance. | |
3480 | */ | |
3481 | scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads, | |
3482 | minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE); | |
3483 | } | |
3484 | ||
3485 | for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
3486 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
3487 | ||
3488 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
3489 | if (vd->vdev_scan_io_queue != NULL) { | |
3490 | VERIFY(taskq_dispatch(scn->scn_taskq, | |
3491 | scan_io_queues_run_one, vd->vdev_scan_io_queue, | |
3492 | TQ_SLEEP) != TASKQID_INVALID); | |
3493 | } | |
3494 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
3495 | } | |
3496 | ||
3497 | /* | |
13a2ff27 | 3498 | * Wait for the queues to finish issuing their IOs for this run |
d4a72f23 TC |
3499 | * before we return. There may still be IOs in flight at this |
3500 | * point. | |
3501 | */ | |
3502 | taskq_wait(scn->scn_taskq); | |
428870ff BB |
3503 | } |
3504 | ||
9ae529ec | 3505 | static boolean_t |
a1d477c2 | 3506 | dsl_scan_async_block_should_pause(dsl_scan_t *scn) |
428870ff | 3507 | { |
428870ff BB |
3508 | uint64_t elapsed_nanosecs; |
3509 | ||
78e2739d MA |
3510 | if (zfs_recover) |
3511 | return (B_FALSE); | |
3512 | ||
a3c98d57 TJ |
3513 | if (zfs_async_block_max_blocks != 0 && |
3514 | scn->scn_visited_this_txg >= zfs_async_block_max_blocks) { | |
36283ca2 | 3515 | return (B_TRUE); |
a3c98d57 | 3516 | } |
36283ca2 | 3517 | |
4fe3a842 MA |
3518 | if (zfs_max_async_dedup_frees != 0 && |
3519 | scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) { | |
3520 | return (B_TRUE); | |
3521 | } | |
3522 | ||
428870ff | 3523 | elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; |
9ae529ec | 3524 | return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || |
a1d477c2 | 3525 | (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && |
428870ff | 3526 | txg_sync_waiting(scn->scn_dp)) || |
9ae529ec CS |
3527 | spa_shutting_down(scn->scn_dp->dp_spa)); |
3528 | } | |
3529 | ||
3530 | static int | |
3531 | dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) | |
3532 | { | |
3533 | dsl_scan_t *scn = arg; | |
3534 | ||
3535 | if (!scn->scn_is_bptree || | |
3536 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { | |
a1d477c2 | 3537 | if (dsl_scan_async_block_should_pause(scn)) |
2e528b49 | 3538 | return (SET_ERROR(ERESTART)); |
9ae529ec | 3539 | } |
428870ff BB |
3540 | |
3541 | zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, | |
3542 | dmu_tx_get_txg(tx), bp, 0)); | |
3543 | dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, | |
3544 | -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), | |
3545 | -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); | |
3546 | scn->scn_visited_this_txg++; | |
4fe3a842 MA |
3547 | if (BP_GET_DEDUP(bp)) |
3548 | scn->scn_dedup_frees_this_txg++; | |
428870ff BB |
3549 | return (0); |
3550 | } | |
3551 | ||
d4a72f23 TC |
3552 | static void |
3553 | dsl_scan_update_stats(dsl_scan_t *scn) | |
3554 | { | |
3555 | spa_t *spa = scn->scn_dp->dp_spa; | |
3556 | uint64_t i; | |
3557 | uint64_t seg_size_total = 0, zio_size_total = 0; | |
3558 | uint64_t seg_count_total = 0, zio_count_total = 0; | |
3559 | ||
3560 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
3561 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
3562 | dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; | |
3563 | ||
3564 | if (queue == NULL) | |
3565 | continue; | |
3566 | ||
3567 | seg_size_total += queue->q_total_seg_size_this_txg; | |
3568 | zio_size_total += queue->q_total_zio_size_this_txg; | |
3569 | seg_count_total += queue->q_segs_this_txg; | |
3570 | zio_count_total += queue->q_zios_this_txg; | |
3571 | } | |
3572 | ||
3573 | if (seg_count_total == 0 || zio_count_total == 0) { | |
3574 | scn->scn_avg_seg_size_this_txg = 0; | |
3575 | scn->scn_avg_zio_size_this_txg = 0; | |
3576 | scn->scn_segs_this_txg = 0; | |
3577 | scn->scn_zios_this_txg = 0; | |
3578 | return; | |
3579 | } | |
3580 | ||
3581 | scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; | |
3582 | scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; | |
3583 | scn->scn_segs_this_txg = seg_count_total; | |
3584 | scn->scn_zios_this_txg = zio_count_total; | |
3585 | } | |
3586 | ||
a1d477c2 | 3587 | static int |
37f03da8 SH |
3588 | bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, |
3589 | dmu_tx_t *tx) | |
a1d477c2 | 3590 | { |
37f03da8 SH |
3591 | ASSERT(!bp_freed); |
3592 | return (dsl_scan_free_block_cb(arg, bp, tx)); | |
3593 | } | |
3594 | ||
3595 | static int | |
3596 | dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, | |
3597 | dmu_tx_t *tx) | |
3598 | { | |
3599 | ASSERT(!bp_freed); | |
a1d477c2 MA |
3600 | dsl_scan_t *scn = arg; |
3601 | const dva_t *dva = &bp->blk_dva[0]; | |
3602 | ||
3603 | if (dsl_scan_async_block_should_pause(scn)) | |
3604 | return (SET_ERROR(ERESTART)); | |
3605 | ||
3606 | spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, | |
3607 | DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), | |
3608 | DVA_GET_ASIZE(dva), tx); | |
3609 | scn->scn_visited_this_txg++; | |
3610 | return (0); | |
3611 | } | |
3612 | ||
428870ff BB |
3613 | boolean_t |
3614 | dsl_scan_active(dsl_scan_t *scn) | |
3615 | { | |
3616 | spa_t *spa = scn->scn_dp->dp_spa; | |
3617 | uint64_t used = 0, comp, uncomp; | |
37f03da8 | 3618 | boolean_t clones_left; |
428870ff BB |
3619 | |
3620 | if (spa->spa_load_state != SPA_LOAD_NONE) | |
3621 | return (B_FALSE); | |
3622 | if (spa_shutting_down(spa)) | |
3623 | return (B_FALSE); | |
d4a72f23 | 3624 | if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || |
fbeddd60 | 3625 | (scn->scn_async_destroying && !scn->scn_async_stalled)) |
428870ff BB |
3626 | return (B_TRUE); |
3627 | ||
3628 | if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { | |
3629 | (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, | |
3630 | &used, &comp, &uncomp); | |
3631 | } | |
37f03da8 SH |
3632 | clones_left = spa_livelist_delete_check(spa); |
3633 | return ((used != 0) || (clones_left)); | |
428870ff BB |
3634 | } |
3635 | ||
482eeef8 GA |
3636 | boolean_t |
3637 | dsl_errorscrub_active(dsl_scan_t *scn) | |
3638 | { | |
3639 | spa_t *spa = scn->scn_dp->dp_spa; | |
3640 | if (spa->spa_load_state != SPA_LOAD_NONE) | |
3641 | return (B_FALSE); | |
3642 | if (spa_shutting_down(spa)) | |
3643 | return (B_FALSE); | |
3644 | if (dsl_errorscrubbing(scn->scn_dp)) | |
3645 | return (B_TRUE); | |
3646 | return (B_FALSE); | |
3647 | } | |
3648 | ||
80a91e74 TC |
3649 | static boolean_t |
3650 | dsl_scan_check_deferred(vdev_t *vd) | |
3651 | { | |
3652 | boolean_t need_resilver = B_FALSE; | |
3653 | ||
3654 | for (int c = 0; c < vd->vdev_children; c++) { | |
3655 | need_resilver |= | |
3656 | dsl_scan_check_deferred(vd->vdev_child[c]); | |
3657 | } | |
3658 | ||
3659 | if (!vdev_is_concrete(vd) || vd->vdev_aux || | |
3660 | !vd->vdev_ops->vdev_op_leaf) | |
3661 | return (need_resilver); | |
3662 | ||
3663 | if (!vd->vdev_resilver_deferred) | |
3664 | need_resilver = B_TRUE; | |
3665 | ||
3666 | return (need_resilver); | |
3667 | } | |
3668 | ||
d4a72f23 TC |
3669 | static boolean_t |
3670 | dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, | |
3671 | uint64_t phys_birth) | |
3672 | { | |
3673 | vdev_t *vd; | |
3674 | ||
9e052db4 MA |
3675 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); |
3676 | ||
3677 | if (vd->vdev_ops == &vdev_indirect_ops) { | |
3678 | /* | |
3679 | * The indirect vdev can point to multiple | |
3680 | * vdevs. For simplicity, always create | |
3681 | * the resilver zio_t. zio_vdev_io_start() | |
3682 | * will bypass the child resilver i/o's if | |
3683 | * they are on vdevs that don't have DTL's. | |
3684 | */ | |
3685 | return (B_TRUE); | |
3686 | } | |
3687 | ||
d4a72f23 TC |
3688 | if (DVA_GET_GANG(dva)) { |
3689 | /* | |
3690 | * Gang members may be spread across multiple | |
3691 | * vdevs, so the best estimate we have is the | |
3692 | * scrub range, which has already been checked. | |
3693 | * XXX -- it would be better to change our | |
3694 | * allocation policy to ensure that all | |
3695 | * gang members reside on the same vdev. | |
3696 | */ | |
3697 | return (B_TRUE); | |
3698 | } | |
3699 | ||
d4a72f23 TC |
3700 | /* |
3701 | * Check if the top-level vdev must resilver this offset. | |
3702 | * When the offset does not intersect with a dirty leaf DTL | |
3703 | * then it may be possible to skip the resilver IO. The psize | |
3704 | * is provided instead of asize to simplify the check for RAIDZ. | |
3705 | */ | |
b2255edc | 3706 | if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth)) |
d4a72f23 TC |
3707 | return (B_FALSE); |
3708 | ||
80a91e74 TC |
3709 | /* |
3710 | * Check that this top-level vdev has a device under it which | |
3711 | * is resilvering and is not deferred. | |
3712 | */ | |
3713 | if (!dsl_scan_check_deferred(vd)) | |
3714 | return (B_FALSE); | |
3715 | ||
d4a72f23 TC |
3716 | return (B_TRUE); |
3717 | } | |
3718 | ||
d2734cce SD |
3719 | static int |
3720 | dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) | |
428870ff BB |
3721 | { |
3722 | dsl_scan_t *scn = dp->dp_scan; | |
3723 | spa_t *spa = dp->dp_spa; | |
d2734cce | 3724 | int err = 0; |
428870ff | 3725 | |
d2734cce SD |
3726 | if (spa_suspend_async_destroy(spa)) |
3727 | return (0); | |
428870ff | 3728 | |
ba5ad9a4 | 3729 | if (zfs_free_bpobj_enabled && |
d4a72f23 | 3730 | spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
9ae529ec | 3731 | scn->scn_is_bptree = B_FALSE; |
a1d477c2 | 3732 | scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; |
d4a72f23 | 3733 | scn->scn_zio_root = zio_root(spa, NULL, |
428870ff BB |
3734 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3735 | err = bpobj_iterate(&dp->dp_free_bpobj, | |
37f03da8 | 3736 | bpobj_dsl_scan_free_block_cb, scn, tx); |
d4a72f23 TC |
3737 | VERIFY0(zio_wait(scn->scn_zio_root)); |
3738 | scn->scn_zio_root = NULL; | |
9ae529ec | 3739 | |
fbeddd60 MA |
3740 | if (err != 0 && err != ERESTART) |
3741 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3742 | } | |
13fe0198 | 3743 | |
fbeddd60 MA |
3744 | if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { |
3745 | ASSERT(scn->scn_async_destroying); | |
3746 | scn->scn_is_bptree = B_TRUE; | |
d4a72f23 | 3747 | scn->scn_zio_root = zio_root(spa, NULL, |
fbeddd60 MA |
3748 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3749 | err = bptree_iterate(dp->dp_meta_objset, | |
3750 | dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); | |
3751 | VERIFY0(zio_wait(scn->scn_zio_root)); | |
d4a72f23 | 3752 | scn->scn_zio_root = NULL; |
fbeddd60 MA |
3753 | |
3754 | if (err == EIO || err == ECKSUM) { | |
3755 | err = 0; | |
3756 | } else if (err != 0 && err != ERESTART) { | |
3757 | zfs_panic_recover("error %u from " | |
3758 | "traverse_dataset_destroyed()", err); | |
9ae529ec | 3759 | } |
fbeddd60 | 3760 | |
fbeddd60 MA |
3761 | if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { |
3762 | /* finished; deactivate async destroy feature */ | |
3763 | spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); | |
3764 | ASSERT(!spa_feature_is_active(spa, | |
3765 | SPA_FEATURE_ASYNC_DESTROY)); | |
3766 | VERIFY0(zap_remove(dp->dp_meta_objset, | |
3767 | DMU_POOL_DIRECTORY_OBJECT, | |
3768 | DMU_POOL_BPTREE_OBJ, tx)); | |
3769 | VERIFY0(bptree_free(dp->dp_meta_objset, | |
3770 | dp->dp_bptree_obj, tx)); | |
3771 | dp->dp_bptree_obj = 0; | |
3772 | scn->scn_async_destroying = B_FALSE; | |
905edb40 | 3773 | scn->scn_async_stalled = B_FALSE; |
89b1cd65 | 3774 | } else { |
3775 | /* | |
905edb40 MA |
3776 | * If we didn't make progress, mark the async |
3777 | * destroy as stalled, so that we will not initiate | |
3778 | * a spa_sync() on its behalf. Note that we only | |
3779 | * check this if we are not finished, because if the | |
3780 | * bptree had no blocks for us to visit, we can | |
3781 | * finish without "making progress". | |
89b1cd65 | 3782 | */ |
3783 | scn->scn_async_stalled = | |
3784 | (scn->scn_visited_this_txg == 0); | |
428870ff | 3785 | } |
fbeddd60 MA |
3786 | } |
3787 | if (scn->scn_visited_this_txg) { | |
3788 | zfs_dbgmsg("freed %llu blocks in %llums from " | |
6f57f1e3 | 3789 | "free_bpobj/bptree on %s in txg %llu; err=%u", |
fbeddd60 MA |
3790 | (longlong_t)scn->scn_visited_this_txg, |
3791 | (longlong_t) | |
3792 | NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), | |
6f57f1e3 | 3793 | spa->spa_name, (longlong_t)tx->tx_txg, err); |
fbeddd60 | 3794 | scn->scn_visited_this_txg = 0; |
4fe3a842 | 3795 | scn->scn_dedup_frees_this_txg = 0; |
fbeddd60 MA |
3796 | |
3797 | /* | |
67a1b037 PJD |
3798 | * Write out changes to the DDT and the BRT that may be required |
3799 | * as a result of the blocks freed. This ensures that the DDT | |
3800 | * and the BRT are clean when a scrub/resilver runs. | |
fbeddd60 MA |
3801 | */ |
3802 | ddt_sync(spa, tx->tx_txg); | |
67a1b037 | 3803 | brt_sync(spa, tx->tx_txg); |
fbeddd60 MA |
3804 | } |
3805 | if (err != 0) | |
d2734cce | 3806 | return (err); |
7c9abfa7 GM |
3807 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && |
3808 | zfs_free_leak_on_eio && | |
d683ddbb JG |
3809 | (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || |
3810 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || | |
3811 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { | |
fbeddd60 MA |
3812 | /* |
3813 | * We have finished background destroying, but there is still | |
3814 | * some space left in the dp_free_dir. Transfer this leaked | |
3815 | * space to the dp_leak_dir. | |
3816 | */ | |
3817 | if (dp->dp_leak_dir == NULL) { | |
3818 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); | |
3819 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
3820 | LEAK_DIR_NAME, tx); | |
3821 | VERIFY0(dsl_pool_open_special_dir(dp, | |
3822 | LEAK_DIR_NAME, &dp->dp_leak_dir)); | |
3823 | rrw_exit(&dp->dp_config_rwlock, FTAG); | |
3824 | } | |
3825 | dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, | |
d683ddbb JG |
3826 | dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3827 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3828 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3829 | dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, |
d683ddbb JG |
3830 | -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3831 | -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3832 | -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3833 | } |
a1d477c2 | 3834 | |
37f03da8 SH |
3835 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && |
3836 | !spa_livelist_delete_check(spa)) { | |
9b67f605 | 3837 | /* finished; verify that space accounting went to zero */ |
d683ddbb JG |
3838 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); |
3839 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); | |
3840 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); | |
428870ff BB |
3841 | } |
3842 | ||
e60e158e JG |
3843 | spa_notify_waiters(spa); |
3844 | ||
a1d477c2 MA |
3845 | EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), |
3846 | 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
3847 | DMU_POOL_OBSOLETE_BPOBJ)); | |
3848 | if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { | |
3849 | ASSERT(spa_feature_is_active(dp->dp_spa, | |
3850 | SPA_FEATURE_OBSOLETE_COUNTS)); | |
3851 | ||
3852 | scn->scn_is_bptree = B_FALSE; | |
3853 | scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; | |
3854 | err = bpobj_iterate(&dp->dp_obsolete_bpobj, | |
3855 | dsl_scan_obsolete_block_cb, scn, tx); | |
3856 | if (err != 0 && err != ERESTART) | |
3857 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3858 | ||
3859 | if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) | |
3860 | dsl_pool_destroy_obsolete_bpobj(dp, tx); | |
3861 | } | |
d2734cce SD |
3862 | return (0); |
3863 | } | |
3864 | ||
482eeef8 GA |
3865 | static void |
3866 | name_to_bookmark(char *buf, zbookmark_phys_t *zb) | |
3867 | { | |
3868 | zb->zb_objset = zfs_strtonum(buf, &buf); | |
3869 | ASSERT(*buf == ':'); | |
3870 | zb->zb_object = zfs_strtonum(buf + 1, &buf); | |
3871 | ASSERT(*buf == ':'); | |
3872 | zb->zb_level = (int)zfs_strtonum(buf + 1, &buf); | |
3873 | ASSERT(*buf == ':'); | |
3874 | zb->zb_blkid = zfs_strtonum(buf + 1, &buf); | |
3875 | ASSERT(*buf == '\0'); | |
3876 | } | |
3877 | ||
3878 | static void | |
3879 | name_to_object(char *buf, uint64_t *obj) | |
3880 | { | |
3881 | *obj = zfs_strtonum(buf, &buf); | |
3882 | ASSERT(*buf == '\0'); | |
3883 | } | |
3884 | ||
3885 | static void | |
3886 | read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb) | |
3887 | { | |
3888 | dsl_pool_t *dp = scn->scn_dp; | |
3889 | dsl_dataset_t *ds; | |
3890 | objset_t *os; | |
3891 | if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0) | |
3892 | return; | |
3893 | ||
3894 | if (dmu_objset_from_ds(ds, &os) != 0) { | |
3895 | dsl_dataset_rele(ds, FTAG); | |
3896 | return; | |
3897 | } | |
3898 | ||
3899 | /* | |
3900 | * If the key is not loaded dbuf_dnode_findbp() will error out with | |
3901 | * EACCES. However in that case dnode_hold() will eventually call | |
3902 | * dbuf_read()->zio_wait() which may call spa_log_error(). This will | |
3903 | * lead to a deadlock due to us holding the mutex spa_errlist_lock. | |
3904 | * Avoid this by checking here if the keys are loaded, if not return. | |
3905 | * If the keys are not loaded the head_errlog feature is meaningless | |
3906 | * as we cannot figure out the birth txg of the block pointer. | |
3907 | */ | |
3908 | if (dsl_dataset_get_keystatus(ds->ds_dir) == | |
3909 | ZFS_KEYSTATUS_UNAVAILABLE) { | |
3910 | dsl_dataset_rele(ds, FTAG); | |
3911 | return; | |
3912 | } | |
3913 | ||
3914 | dnode_t *dn; | |
3915 | blkptr_t bp; | |
3916 | ||
3917 | if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) { | |
3918 | dsl_dataset_rele(ds, FTAG); | |
3919 | return; | |
3920 | } | |
3921 | ||
3922 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
3923 | int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL, | |
3924 | NULL); | |
3925 | ||
3926 | if (error) { | |
3927 | rw_exit(&dn->dn_struct_rwlock); | |
3928 | dnode_rele(dn, FTAG); | |
3929 | dsl_dataset_rele(ds, FTAG); | |
3930 | return; | |
3931 | } | |
3932 | ||
3933 | if (!error && BP_IS_HOLE(&bp)) { | |
3934 | rw_exit(&dn->dn_struct_rwlock); | |
3935 | dnode_rele(dn, FTAG); | |
3936 | dsl_dataset_rele(ds, FTAG); | |
3937 | return; | |
3938 | } | |
3939 | ||
3940 | int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | | |
3941 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB; | |
3942 | ||
3943 | /* If it's an intent log block, failure is expected. */ | |
3944 | if (zb.zb_level == ZB_ZIL_LEVEL) | |
3945 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
3946 | ||
3947 | ASSERT(!BP_IS_EMBEDDED(&bp)); | |
3948 | scan_exec_io(dp, &bp, zio_flags, &zb, NULL); | |
3949 | rw_exit(&dn->dn_struct_rwlock); | |
3950 | dnode_rele(dn, FTAG); | |
3951 | dsl_dataset_rele(ds, FTAG); | |
3952 | } | |
3953 | ||
3954 | /* | |
3955 | * We keep track of the scrubbed error blocks in "count". This will be used | |
3956 | * when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This | |
3957 | * function is modelled after check_filesystem(). | |
3958 | */ | |
3959 | static int | |
3960 | scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep, | |
3961 | int *count) | |
3962 | { | |
3963 | dsl_dataset_t *ds; | |
3964 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
3965 | dsl_scan_t *scn = dp->dp_scan; | |
3966 | ||
3967 | int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds); | |
3968 | if (error != 0) | |
3969 | return (error); | |
3970 | ||
3971 | uint64_t latest_txg; | |
3972 | uint64_t txg_to_consider = spa->spa_syncing_txg; | |
3973 | boolean_t check_snapshot = B_TRUE; | |
3974 | ||
3975 | error = find_birth_txg(ds, zep, &latest_txg); | |
3976 | ||
3977 | /* | |
3978 | * If find_birth_txg() errors out, then err on the side of caution and | |
3979 | * proceed. In worst case scenario scrub all objects. If zep->zb_birth | |
3980 | * is 0 (e.g. in case of encryption with unloaded keys) also proceed to | |
3981 | * scrub all objects. | |
3982 | */ | |
3983 | if (error == 0 && zep->zb_birth == latest_txg) { | |
3984 | /* Block neither free nor re written. */ | |
3985 | zbookmark_phys_t zb; | |
3986 | zep_to_zb(fs, zep, &zb); | |
3987 | scn->scn_zio_root = zio_root(spa, NULL, NULL, | |
3988 | ZIO_FLAG_CANFAIL); | |
3989 | /* We have already acquired the config lock for spa */ | |
3990 | read_by_block_level(scn, zb); | |
3991 | ||
3992 | (void) zio_wait(scn->scn_zio_root); | |
3993 | scn->scn_zio_root = NULL; | |
3994 | ||
3995 | scn->errorscrub_phys.dep_examined++; | |
3996 | scn->errorscrub_phys.dep_to_examine--; | |
3997 | (*count)++; | |
3998 | if ((*count) == zfs_scrub_error_blocks_per_txg || | |
3999 | dsl_error_scrub_check_suspend(scn, &zb)) { | |
4000 | dsl_dataset_rele(ds, FTAG); | |
4001 | return (SET_ERROR(EFAULT)); | |
4002 | } | |
4003 | ||
4004 | check_snapshot = B_FALSE; | |
4005 | } else if (error == 0) { | |
4006 | txg_to_consider = latest_txg; | |
4007 | } | |
4008 | ||
4009 | /* | |
4010 | * Retrieve the number of snapshots if the dataset is not a snapshot. | |
4011 | */ | |
4012 | uint64_t snap_count = 0; | |
4013 | if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) { | |
4014 | ||
4015 | error = zap_count(spa->spa_meta_objset, | |
4016 | dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count); | |
4017 | ||
4018 | if (error != 0) { | |
4019 | dsl_dataset_rele(ds, FTAG); | |
4020 | return (error); | |
4021 | } | |
4022 | } | |
4023 | ||
4024 | if (snap_count == 0) { | |
4025 | /* Filesystem without snapshots. */ | |
4026 | dsl_dataset_rele(ds, FTAG); | |
4027 | return (0); | |
4028 | } | |
4029 | ||
4030 | uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
4031 | uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; | |
4032 | ||
4033 | dsl_dataset_rele(ds, FTAG); | |
4034 | ||
4035 | /* Check only snapshots created from this file system. */ | |
4036 | while (snap_obj != 0 && zep->zb_birth < snap_obj_txg && | |
4037 | snap_obj_txg <= txg_to_consider) { | |
4038 | ||
4039 | error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds); | |
4040 | if (error != 0) | |
4041 | return (error); | |
4042 | ||
4043 | if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) { | |
4044 | snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
4045 | snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; | |
4046 | dsl_dataset_rele(ds, FTAG); | |
4047 | continue; | |
4048 | } | |
4049 | ||
4050 | boolean_t affected = B_TRUE; | |
4051 | if (check_snapshot) { | |
4052 | uint64_t blk_txg; | |
4053 | error = find_birth_txg(ds, zep, &blk_txg); | |
4054 | ||
4055 | /* | |
4056 | * Scrub the snapshot also when zb_birth == 0 or when | |
4057 | * find_birth_txg() returns an error. | |
4058 | */ | |
4059 | affected = (error == 0 && zep->zb_birth == blk_txg) || | |
4060 | (error != 0) || (zep->zb_birth == 0); | |
4061 | } | |
4062 | ||
4063 | /* Scrub snapshots. */ | |
4064 | if (affected) { | |
4065 | zbookmark_phys_t zb; | |
4066 | zep_to_zb(snap_obj, zep, &zb); | |
4067 | scn->scn_zio_root = zio_root(spa, NULL, NULL, | |
4068 | ZIO_FLAG_CANFAIL); | |
4069 | /* We have already acquired the config lock for spa */ | |
4070 | read_by_block_level(scn, zb); | |
4071 | ||
4072 | (void) zio_wait(scn->scn_zio_root); | |
4073 | scn->scn_zio_root = NULL; | |
4074 | ||
4075 | scn->errorscrub_phys.dep_examined++; | |
4076 | scn->errorscrub_phys.dep_to_examine--; | |
4077 | (*count)++; | |
4078 | if ((*count) == zfs_scrub_error_blocks_per_txg || | |
4079 | dsl_error_scrub_check_suspend(scn, &zb)) { | |
4080 | dsl_dataset_rele(ds, FTAG); | |
4081 | return (EFAULT); | |
4082 | } | |
4083 | } | |
4084 | snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; | |
4085 | snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
4086 | dsl_dataset_rele(ds, FTAG); | |
4087 | } | |
4088 | return (0); | |
4089 | } | |
4090 | ||
4091 | void | |
4092 | dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx) | |
4093 | { | |
4094 | spa_t *spa = dp->dp_spa; | |
4095 | dsl_scan_t *scn = dp->dp_scan; | |
4096 | ||
4097 | /* | |
4098 | * Only process scans in sync pass 1. | |
4099 | */ | |
4100 | ||
4101 | if (spa_sync_pass(spa) > 1) | |
4102 | return; | |
4103 | ||
4104 | /* | |
4105 | * If the spa is shutting down, then stop scanning. This will | |
4106 | * ensure that the scan does not dirty any new data during the | |
4107 | * shutdown phase. | |
4108 | */ | |
4109 | if (spa_shutting_down(spa)) | |
4110 | return; | |
4111 | ||
4112 | if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) { | |
4113 | return; | |
4114 | } | |
4115 | ||
4116 | if (dsl_scan_resilvering(scn->scn_dp)) { | |
4117 | /* cancel the error scrub if resilver started */ | |
4118 | dsl_scan_cancel(scn->scn_dp); | |
4119 | return; | |
4120 | } | |
4121 | ||
4122 | spa->spa_scrub_active = B_TRUE; | |
4123 | scn->scn_sync_start_time = gethrtime(); | |
4124 | ||
4125 | /* | |
4126 | * zfs_scan_suspend_progress can be set to disable scrub progress. | |
4127 | * See more detailed comment in dsl_scan_sync(). | |
4128 | */ | |
4129 | if (zfs_scan_suspend_progress) { | |
4130 | uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
4131 | int mintime = zfs_scrub_min_time_ms; | |
4132 | ||
4133 | while (zfs_scan_suspend_progress && | |
4134 | !txg_sync_waiting(scn->scn_dp) && | |
4135 | !spa_shutting_down(scn->scn_dp->dp_spa) && | |
4136 | NSEC2MSEC(scan_time_ns) < mintime) { | |
4137 | delay(hz); | |
4138 | scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
4139 | } | |
4140 | return; | |
4141 | } | |
4142 | ||
4143 | int i = 0; | |
4144 | zap_attribute_t *za; | |
4145 | zbookmark_phys_t *zb; | |
4146 | boolean_t limit_exceeded = B_FALSE; | |
4147 | ||
4148 | za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP); | |
4149 | zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP); | |
4150 | ||
4151 | if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { | |
4152 | for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0; | |
4153 | zap_cursor_advance(&scn->errorscrub_cursor)) { | |
4154 | name_to_bookmark(za->za_name, zb); | |
4155 | ||
4156 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, | |
4157 | NULL, ZIO_FLAG_CANFAIL); | |
4158 | dsl_pool_config_enter(dp, FTAG); | |
4159 | read_by_block_level(scn, *zb); | |
4160 | dsl_pool_config_exit(dp, FTAG); | |
4161 | ||
4162 | (void) zio_wait(scn->scn_zio_root); | |
4163 | scn->scn_zio_root = NULL; | |
4164 | ||
4165 | scn->errorscrub_phys.dep_examined += 1; | |
4166 | scn->errorscrub_phys.dep_to_examine -= 1; | |
4167 | i++; | |
4168 | if (i == zfs_scrub_error_blocks_per_txg || | |
4169 | dsl_error_scrub_check_suspend(scn, zb)) { | |
4170 | limit_exceeded = B_TRUE; | |
4171 | break; | |
4172 | } | |
4173 | } | |
4174 | ||
4175 | if (!limit_exceeded) | |
4176 | dsl_errorscrub_done(scn, B_TRUE, tx); | |
4177 | ||
4178 | dsl_errorscrub_sync_state(scn, tx); | |
4179 | kmem_free(za, sizeof (*za)); | |
4180 | kmem_free(zb, sizeof (*zb)); | |
4181 | return; | |
4182 | } | |
4183 | ||
4184 | int error = 0; | |
4185 | for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0; | |
4186 | zap_cursor_advance(&scn->errorscrub_cursor)) { | |
4187 | ||
4188 | zap_cursor_t *head_ds_cursor; | |
4189 | zap_attribute_t *head_ds_attr; | |
4190 | zbookmark_err_phys_t head_ds_block; | |
4191 | ||
4192 | head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP); | |
4193 | head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP); | |
4194 | ||
4195 | uint64_t head_ds_err_obj = za->za_first_integer; | |
4196 | uint64_t head_ds; | |
4197 | name_to_object(za->za_name, &head_ds); | |
4198 | boolean_t config_held = B_FALSE; | |
4199 | uint64_t top_affected_fs; | |
4200 | ||
4201 | for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset, | |
4202 | head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor, | |
4203 | head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) { | |
4204 | ||
4205 | name_to_errphys(head_ds_attr->za_name, &head_ds_block); | |
4206 | ||
4207 | /* | |
4208 | * In case we are called from spa_sync the pool | |
4209 | * config is already held. | |
4210 | */ | |
4211 | if (!dsl_pool_config_held(dp)) { | |
4212 | dsl_pool_config_enter(dp, FTAG); | |
4213 | config_held = B_TRUE; | |
4214 | } | |
4215 | ||
4216 | error = find_top_affected_fs(spa, | |
4217 | head_ds, &head_ds_block, &top_affected_fs); | |
4218 | if (error) | |
4219 | break; | |
4220 | ||
4221 | error = scrub_filesystem(spa, top_affected_fs, | |
4222 | &head_ds_block, &i); | |
4223 | ||
4224 | if (error == SET_ERROR(EFAULT)) { | |
4225 | limit_exceeded = B_TRUE; | |
4226 | break; | |
4227 | } | |
4228 | } | |
4229 | ||
4230 | zap_cursor_fini(head_ds_cursor); | |
4231 | kmem_free(head_ds_cursor, sizeof (*head_ds_cursor)); | |
4232 | kmem_free(head_ds_attr, sizeof (*head_ds_attr)); | |
4233 | ||
4234 | if (config_held) | |
4235 | dsl_pool_config_exit(dp, FTAG); | |
4236 | } | |
4237 | ||
4238 | kmem_free(za, sizeof (*za)); | |
4239 | kmem_free(zb, sizeof (*zb)); | |
4240 | if (!limit_exceeded) | |
4241 | dsl_errorscrub_done(scn, B_TRUE, tx); | |
4242 | ||
4243 | dsl_errorscrub_sync_state(scn, tx); | |
4244 | } | |
4245 | ||
d2734cce SD |
4246 | /* |
4247 | * This is the primary entry point for scans that is called from syncing | |
4248 | * context. Scans must happen entirely during syncing context so that we | |
e1cfd73f | 4249 | * can guarantee that blocks we are currently scanning will not change out |
d2734cce SD |
4250 | * from under us. While a scan is active, this function controls how quickly |
4251 | * transaction groups proceed, instead of the normal handling provided by | |
4252 | * txg_sync_thread(). | |
4253 | */ | |
4254 | void | |
4255 | dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) | |
4256 | { | |
4257 | int err = 0; | |
4258 | dsl_scan_t *scn = dp->dp_scan; | |
4259 | spa_t *spa = dp->dp_spa; | |
4260 | state_sync_type_t sync_type = SYNC_OPTIONAL; | |
4261 | ||
80a91e74 TC |
4262 | if (spa->spa_resilver_deferred && |
4263 | !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) | |
4264 | spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx); | |
4265 | ||
d2734cce SD |
4266 | /* |
4267 | * Check for scn_restart_txg before checking spa_load_state, so | |
4268 | * that we can restart an old-style scan while the pool is being | |
80a91e74 TC |
4269 | * imported (see dsl_scan_init). We also restart scans if there |
4270 | * is a deferred resilver and the user has manually disabled | |
4271 | * deferred resilvers via the tunable. | |
d2734cce | 4272 | */ |
80a91e74 TC |
4273 | if (dsl_scan_restarting(scn, tx) || |
4274 | (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) { | |
d2734cce SD |
4275 | pool_scan_func_t func = POOL_SCAN_SCRUB; |
4276 | dsl_scan_done(scn, B_FALSE, tx); | |
4277 | if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) | |
4278 | func = POOL_SCAN_RESILVER; | |
6f57f1e3 RE |
4279 | zfs_dbgmsg("restarting scan func=%u on %s txg=%llu", |
4280 | func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg); | |
d2734cce SD |
4281 | dsl_scan_setup_sync(&func, tx); |
4282 | } | |
4283 | ||
4284 | /* | |
4285 | * Only process scans in sync pass 1. | |
4286 | */ | |
4287 | if (spa_sync_pass(spa) > 1) | |
4288 | return; | |
4289 | ||
4290 | /* | |
4291 | * If the spa is shutting down, then stop scanning. This will | |
4292 | * ensure that the scan does not dirty any new data during the | |
4293 | * shutdown phase. | |
4294 | */ | |
4295 | if (spa_shutting_down(spa)) | |
4296 | return; | |
4297 | ||
4298 | /* | |
4299 | * If the scan is inactive due to a stalled async destroy, try again. | |
4300 | */ | |
4301 | if (!scn->scn_async_stalled && !dsl_scan_active(scn)) | |
4302 | return; | |
4303 | ||
4304 | /* reset scan statistics */ | |
4305 | scn->scn_visited_this_txg = 0; | |
4fe3a842 | 4306 | scn->scn_dedup_frees_this_txg = 0; |
d2734cce SD |
4307 | scn->scn_holes_this_txg = 0; |
4308 | scn->scn_lt_min_this_txg = 0; | |
4309 | scn->scn_gt_max_this_txg = 0; | |
4310 | scn->scn_ddt_contained_this_txg = 0; | |
4311 | scn->scn_objsets_visited_this_txg = 0; | |
4312 | scn->scn_avg_seg_size_this_txg = 0; | |
4313 | scn->scn_segs_this_txg = 0; | |
4314 | scn->scn_avg_zio_size_this_txg = 0; | |
4315 | scn->scn_zios_this_txg = 0; | |
4316 | scn->scn_suspending = B_FALSE; | |
4317 | scn->scn_sync_start_time = gethrtime(); | |
4318 | spa->spa_scrub_active = B_TRUE; | |
4319 | ||
4320 | /* | |
4321 | * First process the async destroys. If we suspend, don't do | |
4322 | * any scrubbing or resilvering. This ensures that there are no | |
4323 | * async destroys while we are scanning, so the scan code doesn't | |
4324 | * have to worry about traversing it. It is also faster to free the | |
4325 | * blocks than to scrub them. | |
4326 | */ | |
4327 | err = dsl_process_async_destroys(dp, tx); | |
4328 | if (err != 0) | |
4329 | return; | |
a1d477c2 | 4330 | |
d4a72f23 | 4331 | if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) |
428870ff BB |
4332 | return; |
4333 | ||
d4a72f23 TC |
4334 | /* |
4335 | * Wait a few txgs after importing to begin scanning so that | |
4336 | * we can get the pool imported quickly. | |
4337 | */ | |
4338 | if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) | |
5d1f7fb6 | 4339 | return; |
5d1f7fb6 | 4340 | |
cef48f14 TC |
4341 | /* |
4342 | * zfs_scan_suspend_progress can be set to disable scan progress. | |
4343 | * We don't want to spin the txg_sync thread, so we add a delay | |
4344 | * here to simulate the time spent doing a scan. This is mostly | |
4345 | * useful for testing and debugging. | |
4346 | */ | |
4347 | if (zfs_scan_suspend_progress) { | |
4348 | uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
fdc2d303 RY |
4349 | uint_t mintime = (scn->scn_phys.scn_func == |
4350 | POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms : | |
4351 | zfs_scrub_min_time_ms; | |
cef48f14 TC |
4352 | |
4353 | while (zfs_scan_suspend_progress && | |
4354 | !txg_sync_waiting(scn->scn_dp) && | |
4355 | !spa_shutting_down(scn->scn_dp->dp_spa) && | |
4356 | NSEC2MSEC(scan_time_ns) < mintime) { | |
4357 | delay(hz); | |
4358 | scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
4359 | } | |
4360 | return; | |
4361 | } | |
4362 | ||
c85ac731 BB |
4363 | /* |
4364 | * Disabled by default, set zfs_scan_report_txgs to report | |
4365 | * average performance over the last zfs_scan_report_txgs TXGs. | |
4366 | */ | |
4367 | if (!dsl_scan_is_paused_scrub(scn) && zfs_scan_report_txgs != 0 && | |
4368 | tx->tx_txg % zfs_scan_report_txgs == 0) { | |
4369 | scn->scn_issued_before_pass += spa->spa_scan_pass_issued; | |
4370 | spa_scan_stat_init(spa); | |
4371 | } | |
4372 | ||
d4a72f23 TC |
4373 | /* |
4374 | * It is possible to switch from unsorted to sorted at any time, | |
4375 | * but afterwards the scan will remain sorted unless reloaded from | |
4376 | * a checkpoint after a reboot. | |
4377 | */ | |
4378 | if (!zfs_scan_legacy) { | |
4379 | scn->scn_is_sorted = B_TRUE; | |
4380 | if (scn->scn_last_checkpoint == 0) | |
4381 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
4382 | } | |
0ea05c64 | 4383 | |
d4a72f23 TC |
4384 | /* |
4385 | * For sorted scans, determine what kind of work we will be doing | |
4386 | * this txg based on our memory limitations and whether or not we | |
4387 | * need to perform a checkpoint. | |
4388 | */ | |
4389 | if (scn->scn_is_sorted) { | |
4390 | /* | |
4391 | * If we are over our checkpoint interval, set scn_clearing | |
4392 | * so that we can begin checkpointing immediately. The | |
13a2ff27 | 4393 | * checkpoint allows us to save a consistent bookmark |
d4a72f23 TC |
4394 | * representing how much data we have scrubbed so far. |
4395 | * Otherwise, use the memory limit to determine if we should | |
4396 | * scan for metadata or start issue scrub IOs. We accumulate | |
4397 | * metadata until we hit our hard memory limit at which point | |
4398 | * we issue scrub IOs until we are at our soft memory limit. | |
4399 | */ | |
4400 | if (scn->scn_checkpointing || | |
4401 | ddi_get_lbolt() - scn->scn_last_checkpoint > | |
4402 | SEC_TO_TICK(zfs_scan_checkpoint_intval)) { | |
4403 | if (!scn->scn_checkpointing) | |
6f57f1e3 RE |
4404 | zfs_dbgmsg("begin scan checkpoint for %s", |
4405 | spa->spa_name); | |
d4a72f23 TC |
4406 | |
4407 | scn->scn_checkpointing = B_TRUE; | |
4408 | scn->scn_clearing = B_TRUE; | |
4409 | } else { | |
4410 | boolean_t should_clear = dsl_scan_should_clear(scn); | |
4411 | if (should_clear && !scn->scn_clearing) { | |
6f57f1e3 RE |
4412 | zfs_dbgmsg("begin scan clearing for %s", |
4413 | spa->spa_name); | |
d4a72f23 TC |
4414 | scn->scn_clearing = B_TRUE; |
4415 | } else if (!should_clear && scn->scn_clearing) { | |
6f57f1e3 RE |
4416 | zfs_dbgmsg("finish scan clearing for %s", |
4417 | spa->spa_name); | |
d4a72f23 TC |
4418 | scn->scn_clearing = B_FALSE; |
4419 | } | |
4420 | } | |
428870ff | 4421 | } else { |
d4a72f23 TC |
4422 | ASSERT0(scn->scn_checkpointing); |
4423 | ASSERT0(scn->scn_clearing); | |
428870ff BB |
4424 | } |
4425 | ||
d4a72f23 TC |
4426 | if (!scn->scn_clearing && scn->scn_done_txg == 0) { |
4427 | /* Need to scan metadata for more blocks to scrub */ | |
4428 | dsl_scan_phys_t *scnp = &scn->scn_phys; | |
4429 | taskqid_t prefetch_tqid; | |
428870ff | 4430 | |
d4a72f23 | 4431 | /* |
c0aea7cf BB |
4432 | * Calculate the max number of in-flight bytes for pool-wide |
4433 | * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). | |
4434 | * Limits for the issuing phase are done per top-level vdev and | |
4435 | * are handled separately. | |
d4a72f23 | 4436 | */ |
c0aea7cf BB |
4437 | scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, |
4438 | zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); | |
d4a72f23 TC |
4439 | |
4440 | if (scnp->scn_ddt_bookmark.ddb_class <= | |
4441 | scnp->scn_ddt_class_max) { | |
4442 | ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); | |
6f57f1e3 | 4443 | zfs_dbgmsg("doing scan sync for %s txg %llu; " |
d4a72f23 | 4444 | "ddt bm=%llu/%llu/%llu/%llx", |
6f57f1e3 | 4445 | spa->spa_name, |
d4a72f23 TC |
4446 | (longlong_t)tx->tx_txg, |
4447 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
4448 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
4449 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
4450 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
4451 | } else { | |
6f57f1e3 | 4452 | zfs_dbgmsg("doing scan sync for %s txg %llu; " |
d4a72f23 | 4453 | "bm=%llu/%llu/%llu/%llu", |
6f57f1e3 | 4454 | spa->spa_name, |
d4a72f23 TC |
4455 | (longlong_t)tx->tx_txg, |
4456 | (longlong_t)scnp->scn_bookmark.zb_objset, | |
4457 | (longlong_t)scnp->scn_bookmark.zb_object, | |
4458 | (longlong_t)scnp->scn_bookmark.zb_level, | |
4459 | (longlong_t)scnp->scn_bookmark.zb_blkid); | |
4460 | } | |
428870ff | 4461 | |
d4a72f23 TC |
4462 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, |
4463 | NULL, ZIO_FLAG_CANFAIL); | |
428870ff | 4464 | |
d4a72f23 TC |
4465 | scn->scn_prefetch_stop = B_FALSE; |
4466 | prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, | |
4467 | dsl_scan_prefetch_thread, scn, TQ_SLEEP); | |
4468 | ASSERT(prefetch_tqid != TASKQID_INVALID); | |
428870ff | 4469 | |
d4a72f23 TC |
4470 | dsl_pool_config_enter(dp, FTAG); |
4471 | dsl_scan_visit(scn, tx); | |
4472 | dsl_pool_config_exit(dp, FTAG); | |
428870ff | 4473 | |
d4a72f23 TC |
4474 | mutex_enter(&dp->dp_spa->spa_scrub_lock); |
4475 | scn->scn_prefetch_stop = B_TRUE; | |
4476 | cv_broadcast(&spa->spa_scrub_io_cv); | |
4477 | mutex_exit(&dp->dp_spa->spa_scrub_lock); | |
428870ff | 4478 | |
d4a72f23 TC |
4479 | taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); |
4480 | (void) zio_wait(scn->scn_zio_root); | |
4481 | scn->scn_zio_root = NULL; | |
4482 | ||
6f57f1e3 | 4483 | zfs_dbgmsg("scan visited %llu blocks of %s in %llums " |
d4a72f23 TC |
4484 | "(%llu os's, %llu holes, %llu < mintxg, " |
4485 | "%llu in ddt, %llu > maxtxg)", | |
4486 | (longlong_t)scn->scn_visited_this_txg, | |
6f57f1e3 | 4487 | spa->spa_name, |
d4a72f23 TC |
4488 | (longlong_t)NSEC2MSEC(gethrtime() - |
4489 | scn->scn_sync_start_time), | |
4490 | (longlong_t)scn->scn_objsets_visited_this_txg, | |
4491 | (longlong_t)scn->scn_holes_this_txg, | |
4492 | (longlong_t)scn->scn_lt_min_this_txg, | |
4493 | (longlong_t)scn->scn_ddt_contained_this_txg, | |
4494 | (longlong_t)scn->scn_gt_max_this_txg); | |
4495 | ||
4496 | if (!scn->scn_suspending) { | |
4497 | ASSERT0(avl_numnodes(&scn->scn_queue)); | |
4498 | scn->scn_done_txg = tx->tx_txg + 1; | |
4499 | if (scn->scn_is_sorted) { | |
4500 | scn->scn_checkpointing = B_TRUE; | |
4501 | scn->scn_clearing = B_TRUE; | |
c85ac731 BB |
4502 | scn->scn_issued_before_pass += |
4503 | spa->spa_scan_pass_issued; | |
4504 | spa_scan_stat_init(spa); | |
d4a72f23 | 4505 | } |
6f57f1e3 RE |
4506 | zfs_dbgmsg("scan complete for %s txg %llu", |
4507 | spa->spa_name, | |
d4a72f23 TC |
4508 | (longlong_t)tx->tx_txg); |
4509 | } | |
1c0c729a | 4510 | } else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) { |
5e0bd0ae TC |
4511 | ASSERT(scn->scn_clearing); |
4512 | ||
d4a72f23 TC |
4513 | /* need to issue scrubbing IOs from per-vdev queues */ |
4514 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, | |
4515 | NULL, ZIO_FLAG_CANFAIL); | |
4516 | scan_io_queues_run(scn); | |
4517 | (void) zio_wait(scn->scn_zio_root); | |
4518 | scn->scn_zio_root = NULL; | |
4519 | ||
4520 | /* calculate and dprintf the current memory usage */ | |
4521 | (void) dsl_scan_should_clear(scn); | |
4522 | dsl_scan_update_stats(scn); | |
4523 | ||
6f57f1e3 RE |
4524 | zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) " |
4525 | "in %llums (avg_block_size = %llu, avg_seg_size = %llu)", | |
d4a72f23 | 4526 | (longlong_t)scn->scn_zios_this_txg, |
6f57f1e3 | 4527 | spa->spa_name, |
d4a72f23 TC |
4528 | (longlong_t)scn->scn_segs_this_txg, |
4529 | (longlong_t)NSEC2MSEC(gethrtime() - | |
4530 | scn->scn_sync_start_time), | |
4531 | (longlong_t)scn->scn_avg_zio_size_this_txg, | |
4532 | (longlong_t)scn->scn_avg_seg_size_this_txg); | |
4533 | } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { | |
4534 | /* Finished with everything. Mark the scrub as complete */ | |
6f57f1e3 RE |
4535 | zfs_dbgmsg("scan issuing complete txg %llu for %s", |
4536 | (longlong_t)tx->tx_txg, | |
4537 | spa->spa_name); | |
d4a72f23 TC |
4538 | ASSERT3U(scn->scn_done_txg, !=, 0); |
4539 | ASSERT0(spa->spa_scrub_inflight); | |
1c0c729a | 4540 | ASSERT0(scn->scn_queues_pending); |
d4a72f23 TC |
4541 | dsl_scan_done(scn, B_TRUE, tx); |
4542 | sync_type = SYNC_MANDATORY; | |
428870ff | 4543 | } |
428870ff | 4544 | |
d4a72f23 | 4545 | dsl_scan_sync_state(scn, tx, sync_type); |
428870ff BB |
4546 | } |
4547 | ||
428870ff | 4548 | static void |
82732299 | 4549 | count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all) |
428870ff | 4550 | { |
3b61ca3e TC |
4551 | /* |
4552 | * Don't count embedded bp's, since we already did the work of | |
4553 | * scanning these when we scanned the containing block. | |
4554 | */ | |
4555 | if (BP_IS_EMBEDDED(bp)) | |
4556 | return; | |
4557 | ||
ab7615d9 TC |
4558 | /* |
4559 | * Update the spa's stats on how many bytes we have issued. | |
4560 | * Sequential scrubs create a zio for each DVA of the bp. Each | |
4561 | * of these will include all DVAs for repair purposes, but the | |
4562 | * zio code will only try the first one unless there is an issue. | |
4563 | * Therefore, we should only count the first DVA for these IOs. | |
4564 | */ | |
82732299 AM |
4565 | atomic_add_64(&spa->spa_scan_pass_issued, |
4566 | all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0])); | |
4567 | } | |
d4a72f23 | 4568 | |
82732299 AM |
4569 | static void |
4570 | count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) | |
4571 | { | |
428870ff BB |
4572 | /* |
4573 | * If we resume after a reboot, zab will be NULL; don't record | |
4574 | * incomplete stats in that case. | |
4575 | */ | |
4576 | if (zab == NULL) | |
4577 | return; | |
4578 | ||
82732299 | 4579 | for (int i = 0; i < 4; i++) { |
428870ff BB |
4580 | int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; |
4581 | int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; | |
9ae529ec CS |
4582 | |
4583 | if (t & DMU_OT_NEWTYPE) | |
4584 | t = DMU_OT_OTHER; | |
1c27024e DB |
4585 | zfs_blkstat_t *zb = &zab->zab_type[l][t]; |
4586 | int equal; | |
428870ff BB |
4587 | |
4588 | zb->zb_count++; | |
4589 | zb->zb_asize += BP_GET_ASIZE(bp); | |
4590 | zb->zb_lsize += BP_GET_LSIZE(bp); | |
4591 | zb->zb_psize += BP_GET_PSIZE(bp); | |
4592 | zb->zb_gangs += BP_COUNT_GANG(bp); | |
4593 | ||
4594 | switch (BP_GET_NDVAS(bp)) { | |
4595 | case 2: | |
4596 | if (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
4597 | DVA_GET_VDEV(&bp->blk_dva[1])) | |
4598 | zb->zb_ditto_2_of_2_samevdev++; | |
4599 | break; | |
4600 | case 3: | |
4601 | equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
4602 | DVA_GET_VDEV(&bp->blk_dva[1])) + | |
4603 | (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
4604 | DVA_GET_VDEV(&bp->blk_dva[2])) + | |
4605 | (DVA_GET_VDEV(&bp->blk_dva[1]) == | |
4606 | DVA_GET_VDEV(&bp->blk_dva[2])); | |
4607 | if (equal == 1) | |
4608 | zb->zb_ditto_2_of_3_samevdev++; | |
4609 | else if (equal == 3) | |
4610 | zb->zb_ditto_3_of_3_samevdev++; | |
4611 | break; | |
4612 | } | |
4613 | } | |
4614 | } | |
4615 | ||
4616 | static void | |
d4a72f23 | 4617 | scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) |
428870ff | 4618 | { |
d4a72f23 | 4619 | avl_index_t idx; |
d4a72f23 | 4620 | dsl_scan_t *scn = queue->q_scn; |
428870ff | 4621 | |
d4a72f23 | 4622 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); |
428870ff | 4623 | |
1c0c729a AM |
4624 | if (unlikely(avl_is_empty(&queue->q_sios_by_addr))) |
4625 | atomic_add_64(&scn->scn_queues_pending, 1); | |
d4a72f23 TC |
4626 | if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { |
4627 | /* block is already scheduled for reading */ | |
ab7615d9 | 4628 | sio_free(sio); |
d4a72f23 | 4629 | return; |
428870ff | 4630 | } |
d4a72f23 | 4631 | avl_insert(&queue->q_sios_by_addr, sio, idx); |
ab7615d9 | 4632 | queue->q_sio_memused += SIO_GET_MUSED(sio); |
1c0c729a AM |
4633 | range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), |
4634 | SIO_GET_ASIZE(sio)); | |
428870ff BB |
4635 | } |
4636 | ||
d4a72f23 TC |
4637 | /* |
4638 | * Given all the info we got from our metadata scanning process, we | |
4639 | * construct a scan_io_t and insert it into the scan sorting queue. The | |
4640 | * I/O must already be suitable for us to process. This is controlled | |
4641 | * by dsl_scan_enqueue(). | |
4642 | */ | |
4643 | static void | |
4644 | scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, | |
4645 | int zio_flags, const zbookmark_phys_t *zb) | |
3d6da72d | 4646 | { |
ab7615d9 | 4647 | scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp)); |
3d6da72d | 4648 | |
d4a72f23 TC |
4649 | ASSERT0(BP_IS_GANG(bp)); |
4650 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3d6da72d | 4651 | |
d4a72f23 TC |
4652 | bp2sio(bp, sio, dva_i); |
4653 | sio->sio_flags = zio_flags; | |
4654 | sio->sio_zb = *zb; | |
3d6da72d | 4655 | |
1c0c729a | 4656 | queue->q_last_ext_addr = -1; |
d4a72f23 TC |
4657 | scan_io_queue_insert_impl(queue, sio); |
4658 | } | |
4659 | ||
4660 | /* | |
4661 | * Given a set of I/O parameters as discovered by the metadata traversal | |
4662 | * process, attempts to place the I/O into the sorted queues (if allowed), | |
4663 | * or immediately executes the I/O. | |
4664 | */ | |
4665 | static void | |
4666 | dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
4667 | const zbookmark_phys_t *zb) | |
4668 | { | |
4669 | spa_t *spa = dp->dp_spa; | |
4670 | ||
4671 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
3d6da72d IH |
4672 | |
4673 | /* | |
d4a72f23 TC |
4674 | * Gang blocks are hard to issue sequentially, so we just issue them |
4675 | * here immediately instead of queuing them. | |
3d6da72d | 4676 | */ |
d4a72f23 TC |
4677 | if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { |
4678 | scan_exec_io(dp, bp, zio_flags, zb, NULL); | |
4679 | return; | |
4680 | } | |
3d6da72d | 4681 | |
d4a72f23 TC |
4682 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
4683 | dva_t dva; | |
4684 | vdev_t *vdev; | |
4685 | ||
4686 | dva = bp->blk_dva[i]; | |
4687 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); | |
4688 | ASSERT(vdev != NULL); | |
4689 | ||
4690 | mutex_enter(&vdev->vdev_scan_io_queue_lock); | |
4691 | if (vdev->vdev_scan_io_queue == NULL) | |
4692 | vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); | |
4693 | ASSERT(dp->dp_scan != NULL); | |
4694 | scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, | |
4695 | i, zio_flags, zb); | |
4696 | mutex_exit(&vdev->vdev_scan_io_queue_lock); | |
4697 | } | |
3d6da72d IH |
4698 | } |
4699 | ||
428870ff BB |
4700 | static int |
4701 | dsl_scan_scrub_cb(dsl_pool_t *dp, | |
5dbd68a3 | 4702 | const blkptr_t *bp, const zbookmark_phys_t *zb) |
428870ff BB |
4703 | { |
4704 | dsl_scan_t *scn = dp->dp_scan; | |
428870ff BB |
4705 | spa_t *spa = dp->dp_spa; |
4706 | uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); | |
d4a72f23 | 4707 | size_t psize = BP_GET_PSIZE(bp); |
d6320ddb | 4708 | boolean_t needs_io = B_FALSE; |
572e2857 | 4709 | int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; |
428870ff | 4710 | |
82732299 | 4711 | count_block(dp->dp_blkstats, bp); |
428870ff | 4712 | if (phys_birth <= scn->scn_phys.scn_min_txg || |
863522b1 | 4713 | phys_birth >= scn->scn_phys.scn_max_txg) { |
82732299 | 4714 | count_block_issued(spa, bp, B_TRUE); |
428870ff | 4715 | return (0); |
863522b1 | 4716 | } |
428870ff | 4717 | |
00c405b4 MA |
4718 | /* Embedded BP's have phys_birth==0, so we reject them above. */ |
4719 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
9b67f605 | 4720 | |
428870ff BB |
4721 | ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); |
4722 | if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { | |
4723 | zio_flags |= ZIO_FLAG_SCRUB; | |
428870ff | 4724 | needs_io = B_TRUE; |
a117a6d6 GW |
4725 | } else { |
4726 | ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); | |
428870ff | 4727 | zio_flags |= ZIO_FLAG_RESILVER; |
428870ff BB |
4728 | needs_io = B_FALSE; |
4729 | } | |
4730 | ||
4731 | /* If it's an intent log block, failure is expected. */ | |
4732 | if (zb->zb_level == ZB_ZIL_LEVEL) | |
4733 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
4734 | ||
1c27024e | 4735 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) { |
3d6da72d | 4736 | const dva_t *dva = &bp->blk_dva[d]; |
428870ff BB |
4737 | |
4738 | /* | |
4739 | * Keep track of how much data we've examined so that | |
76d04993 | 4740 | * zpool(8) status can make useful progress reports. |
428870ff | 4741 | */ |
1c0c729a AM |
4742 | uint64_t asize = DVA_GET_ASIZE(dva); |
4743 | scn->scn_phys.scn_examined += asize; | |
4744 | spa->spa_scan_pass_exam += asize; | |
428870ff BB |
4745 | |
4746 | /* if it's a resilver, this may not be in the target range */ | |
3d6da72d IH |
4747 | if (!needs_io) |
4748 | needs_io = dsl_scan_need_resilver(spa, dva, psize, | |
4749 | phys_birth); | |
428870ff BB |
4750 | } |
4751 | ||
4752 | if (needs_io && !zfs_no_scrub_io) { | |
d4a72f23 TC |
4753 | dsl_scan_enqueue(dp, bp, zio_flags, zb); |
4754 | } else { | |
82732299 | 4755 | count_block_issued(spa, bp, B_TRUE); |
d4a72f23 TC |
4756 | } |
4757 | ||
4758 | /* do not relocate this block */ | |
4759 | return (0); | |
4760 | } | |
4761 | ||
4762 | static void | |
4763 | dsl_scan_scrub_done(zio_t *zio) | |
4764 | { | |
4765 | spa_t *spa = zio->io_spa; | |
4766 | blkptr_t *bp = zio->io_bp; | |
4767 | dsl_scan_io_queue_t *queue = zio->io_private; | |
4768 | ||
4769 | abd_free(zio->io_abd); | |
4770 | ||
4771 | if (queue == NULL) { | |
4772 | mutex_enter(&spa->spa_scrub_lock); | |
4773 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
4774 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
4775 | cv_broadcast(&spa->spa_scrub_io_cv); | |
4776 | mutex_exit(&spa->spa_scrub_lock); | |
4777 | } else { | |
4778 | mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); | |
4779 | ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); | |
4780 | queue->q_inflight_bytes -= BP_GET_PSIZE(bp); | |
4781 | cv_broadcast(&queue->q_zio_cv); | |
4782 | mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); | |
4783 | } | |
4784 | ||
4785 | if (zio->io_error && (zio->io_error != ECKSUM || | |
4786 | !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { | |
482eeef8 GA |
4787 | if (dsl_errorscrubbing(spa->spa_dsl_pool) && |
4788 | !dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) { | |
4789 | atomic_inc_64(&spa->spa_dsl_pool->dp_scan | |
4790 | ->errorscrub_phys.dep_errors); | |
4791 | } else { | |
4792 | atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys | |
4793 | .scn_errors); | |
4794 | } | |
d4a72f23 TC |
4795 | } |
4796 | } | |
428870ff | 4797 | |
d4a72f23 TC |
4798 | /* |
4799 | * Given a scanning zio's information, executes the zio. The zio need | |
4800 | * not necessarily be only sortable, this function simply executes the | |
4801 | * zio, no matter what it is. The optional queue argument allows the | |
4802 | * caller to specify that they want per top level vdev IO rate limiting | |
4803 | * instead of the legacy global limiting. | |
4804 | */ | |
4805 | static void | |
4806 | scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
4807 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) | |
4808 | { | |
4809 | spa_t *spa = dp->dp_spa; | |
4810 | dsl_scan_t *scn = dp->dp_scan; | |
4811 | size_t size = BP_GET_PSIZE(bp); | |
4812 | abd_t *data = abd_alloc_for_io(size, B_FALSE); | |
dd867145 | 4813 | zio_t *pio; |
d4a72f23 TC |
4814 | |
4815 | if (queue == NULL) { | |
2041d6ee | 4816 | ASSERT3U(scn->scn_maxinflight_bytes, >, 0); |
428870ff | 4817 | mutex_enter(&spa->spa_scrub_lock); |
d4a72f23 | 4818 | while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) |
428870ff | 4819 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); |
d4a72f23 | 4820 | spa->spa_scrub_inflight += BP_GET_PSIZE(bp); |
428870ff | 4821 | mutex_exit(&spa->spa_scrub_lock); |
dd867145 | 4822 | pio = scn->scn_zio_root; |
d4a72f23 TC |
4823 | } else { |
4824 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
428870ff | 4825 | |
2041d6ee | 4826 | ASSERT3U(queue->q_maxinflight_bytes, >, 0); |
d4a72f23 TC |
4827 | mutex_enter(q_lock); |
4828 | while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) | |
4829 | cv_wait(&queue->q_zio_cv, q_lock); | |
4830 | queue->q_inflight_bytes += BP_GET_PSIZE(bp); | |
dd867145 | 4831 | pio = queue->q_zio; |
d4a72f23 TC |
4832 | mutex_exit(q_lock); |
4833 | } | |
4834 | ||
dd867145 | 4835 | ASSERT(pio != NULL); |
82732299 | 4836 | count_block_issued(spa, bp, queue == NULL); |
dd867145 AM |
4837 | zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done, |
4838 | queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); | |
d4a72f23 | 4839 | } |
572e2857 | 4840 | |
d4a72f23 TC |
4841 | /* |
4842 | * This is the primary extent sorting algorithm. We balance two parameters: | |
4843 | * 1) how many bytes of I/O are in an extent | |
4844 | * 2) how well the extent is filled with I/O (as a fraction of its total size) | |
4845 | * Since we allow extents to have gaps between their constituent I/Os, it's | |
4846 | * possible to have a fairly large extent that contains the same amount of | |
4847 | * I/O bytes than a much smaller extent, which just packs the I/O more tightly. | |
4848 | * The algorithm sorts based on a score calculated from the extent's size, | |
4849 | * the relative fill volume (in %) and a "fill weight" parameter that controls | |
4850 | * the split between whether we prefer larger extents or more well populated | |
4851 | * extents: | |
4852 | * | |
4853 | * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) | |
4854 | * | |
4855 | * Example: | |
4856 | * 1) assume extsz = 64 MiB | |
4857 | * 2) assume fill = 32 MiB (extent is half full) | |
4858 | * 3) assume fill_weight = 3 | |
4859 | * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 | |
4860 | * SCORE = 32M + (50 * 3 * 32M) / 100 | |
4861 | * SCORE = 32M + (4800M / 100) | |
4862 | * SCORE = 32M + 48M | |
4863 | * ^ ^ | |
4864 | * | +--- final total relative fill-based score | |
4865 | * +--------- final total fill-based score | |
4866 | * SCORE = 80M | |
4867 | * | |
4868 | * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards | |
4869 | * extents that are more completely filled (in a 3:2 ratio) vs just larger. | |
4870 | * Note that as an optimization, we replace multiplication and division by | |
e1cfd73f | 4871 | * 100 with bitshifting by 7 (which effectively multiplies and divides by 128). |
1c0c729a AM |
4872 | * |
4873 | * Since we do not care if one extent is only few percent better than another, | |
4874 | * compress the score into 6 bits via binary logarithm AKA highbit64() and | |
4875 | * put into otherwise unused due to ashift high bits of offset. This allows | |
4876 | * to reduce q_exts_by_size B-tree elements to only 64 bits and compare them | |
4877 | * with single operation. Plus it makes scrubs more sequential and reduces | |
4878 | * chances that minor extent change move it within the B-tree. | |
d4a72f23 TC |
4879 | */ |
4880 | static int | |
4881 | ext_size_compare(const void *x, const void *y) | |
4882 | { | |
1c0c729a AM |
4883 | const uint64_t *a = x, *b = y; |
4884 | ||
4885 | return (TREE_CMP(*a, *b)); | |
4886 | } | |
4887 | ||
4888 | static void | |
4889 | ext_size_create(range_tree_t *rt, void *arg) | |
4890 | { | |
4891 | (void) rt; | |
4892 | zfs_btree_t *size_tree = arg; | |
ca577779 | 4893 | |
1c0c729a AM |
4894 | zfs_btree_create(size_tree, ext_size_compare, sizeof (uint64_t)); |
4895 | } | |
d4a72f23 | 4896 | |
1c0c729a AM |
4897 | static void |
4898 | ext_size_destroy(range_tree_t *rt, void *arg) | |
4899 | { | |
4900 | (void) rt; | |
4901 | zfs_btree_t *size_tree = arg; | |
4902 | ASSERT0(zfs_btree_numnodes(size_tree)); | |
d4a72f23 | 4903 | |
1c0c729a AM |
4904 | zfs_btree_destroy(size_tree); |
4905 | } | |
4906 | ||
4907 | static uint64_t | |
4908 | ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg) | |
4909 | { | |
4910 | (void) rt; | |
4911 | uint64_t size = rsg->rs_end - rsg->rs_start; | |
4912 | uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) * | |
4913 | fill_weight * rsg->rs_fill) >> 7); | |
4914 | ASSERT3U(rt->rt_shift, >=, 8); | |
4915 | return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start); | |
d4a72f23 | 4916 | } |
428870ff | 4917 | |
1c0c729a AM |
4918 | static void |
4919 | ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg) | |
4920 | { | |
4921 | zfs_btree_t *size_tree = arg; | |
4922 | ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); | |
4923 | uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); | |
4924 | zfs_btree_add(size_tree, &v); | |
4925 | } | |
4926 | ||
4927 | static void | |
4928 | ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg) | |
4929 | { | |
4930 | zfs_btree_t *size_tree = arg; | |
4931 | ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); | |
4932 | uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); | |
4933 | zfs_btree_remove(size_tree, &v); | |
4934 | } | |
4935 | ||
4936 | static void | |
4937 | ext_size_vacate(range_tree_t *rt, void *arg) | |
4938 | { | |
4939 | zfs_btree_t *size_tree = arg; | |
4940 | zfs_btree_clear(size_tree); | |
4941 | zfs_btree_destroy(size_tree); | |
4942 | ||
4943 | ext_size_create(rt, arg); | |
4944 | } | |
4945 | ||
4946 | static const range_tree_ops_t ext_size_ops = { | |
4947 | .rtop_create = ext_size_create, | |
4948 | .rtop_destroy = ext_size_destroy, | |
4949 | .rtop_add = ext_size_add, | |
4950 | .rtop_remove = ext_size_remove, | |
4951 | .rtop_vacate = ext_size_vacate | |
4952 | }; | |
4953 | ||
d4a72f23 TC |
4954 | /* |
4955 | * Comparator for the q_sios_by_addr tree. Sorting is simply performed | |
4956 | * based on LBA-order (from lowest to highest). | |
4957 | */ | |
4958 | static int | |
4959 | sio_addr_compare(const void *x, const void *y) | |
4960 | { | |
4961 | const scan_io_t *a = x, *b = y; | |
4962 | ||
ca577779 | 4963 | return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b))); |
d4a72f23 TC |
4964 | } |
4965 | ||
4966 | /* IO queues are created on demand when they are needed. */ | |
4967 | static dsl_scan_io_queue_t * | |
4968 | scan_io_queue_create(vdev_t *vd) | |
4969 | { | |
4970 | dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; | |
4971 | dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); | |
4972 | ||
4973 | q->q_scn = scn; | |
4974 | q->q_vd = vd; | |
ab7615d9 | 4975 | q->q_sio_memused = 0; |
1c0c729a | 4976 | q->q_last_ext_addr = -1; |
d4a72f23 | 4977 | cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); |
1c0c729a AM |
4978 | q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP, |
4979 | &q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap); | |
d4a72f23 TC |
4980 | avl_create(&q->q_sios_by_addr, sio_addr_compare, |
4981 | sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); | |
4982 | ||
4983 | return (q); | |
428870ff BB |
4984 | } |
4985 | ||
0ea05c64 | 4986 | /* |
d4a72f23 TC |
4987 | * Destroys a scan queue and all segments and scan_io_t's contained in it. |
4988 | * No further execution of I/O occurs, anything pending in the queue is | |
4989 | * simply freed without being executed. | |
0ea05c64 | 4990 | */ |
d4a72f23 TC |
4991 | void |
4992 | dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) | |
428870ff | 4993 | { |
d4a72f23 TC |
4994 | dsl_scan_t *scn = queue->q_scn; |
4995 | scan_io_t *sio; | |
4996 | void *cookie = NULL; | |
d4a72f23 TC |
4997 | |
4998 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
4999 | ||
1c0c729a AM |
5000 | if (!avl_is_empty(&queue->q_sios_by_addr)) |
5001 | atomic_add_64(&scn->scn_queues_pending, -1); | |
d4a72f23 TC |
5002 | while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != |
5003 | NULL) { | |
5004 | ASSERT(range_tree_contains(queue->q_exts_by_addr, | |
ab7615d9 | 5005 | SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); |
ab7615d9 TC |
5006 | queue->q_sio_memused -= SIO_GET_MUSED(sio); |
5007 | sio_free(sio); | |
d4a72f23 | 5008 | } |
428870ff | 5009 | |
ab7615d9 | 5010 | ASSERT0(queue->q_sio_memused); |
d4a72f23 TC |
5011 | range_tree_vacate(queue->q_exts_by_addr, NULL, queue); |
5012 | range_tree_destroy(queue->q_exts_by_addr); | |
5013 | avl_destroy(&queue->q_sios_by_addr); | |
5014 | cv_destroy(&queue->q_zio_cv); | |
428870ff | 5015 | |
d4a72f23 TC |
5016 | kmem_free(queue, sizeof (*queue)); |
5017 | } | |
0ea05c64 | 5018 | |
d4a72f23 TC |
5019 | /* |
5020 | * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is | |
5021 | * called on behalf of vdev_top_transfer when creating or destroying | |
5022 | * a mirror vdev due to zpool attach/detach. | |
5023 | */ | |
5024 | void | |
5025 | dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) | |
5026 | { | |
5027 | mutex_enter(&svd->vdev_scan_io_queue_lock); | |
5028 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
5029 | ||
5030 | VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); | |
5031 | tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; | |
5032 | svd->vdev_scan_io_queue = NULL; | |
a1d477c2 | 5033 | if (tvd->vdev_scan_io_queue != NULL) |
d4a72f23 | 5034 | tvd->vdev_scan_io_queue->q_vd = tvd; |
0ea05c64 | 5035 | |
d4a72f23 TC |
5036 | mutex_exit(&tvd->vdev_scan_io_queue_lock); |
5037 | mutex_exit(&svd->vdev_scan_io_queue_lock); | |
428870ff | 5038 | } |
c409e464 | 5039 | |
d4a72f23 TC |
5040 | static void |
5041 | scan_io_queues_destroy(dsl_scan_t *scn) | |
784d15c1 | 5042 | { |
d4a72f23 TC |
5043 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
5044 | ||
5045 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
5046 | vdev_t *tvd = rvd->vdev_child[i]; | |
5047 | ||
5048 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
5049 | if (tvd->vdev_scan_io_queue != NULL) | |
5050 | dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); | |
5051 | tvd->vdev_scan_io_queue = NULL; | |
5052 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
5053 | } | |
784d15c1 NR |
5054 | } |
5055 | ||
d4a72f23 TC |
5056 | static void |
5057 | dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) | |
5058 | { | |
5059 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
5060 | dsl_scan_t *scn = dp->dp_scan; | |
5061 | vdev_t *vdev; | |
5062 | kmutex_t *q_lock; | |
5063 | dsl_scan_io_queue_t *queue; | |
ab7615d9 | 5064 | scan_io_t *srch_sio, *sio; |
d4a72f23 TC |
5065 | avl_index_t idx; |
5066 | uint64_t start, size; | |
5067 | ||
5068 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); | |
5069 | ASSERT(vdev != NULL); | |
5070 | q_lock = &vdev->vdev_scan_io_queue_lock; | |
5071 | queue = vdev->vdev_scan_io_queue; | |
5072 | ||
5073 | mutex_enter(q_lock); | |
5074 | if (queue == NULL) { | |
5075 | mutex_exit(q_lock); | |
5076 | return; | |
5077 | } | |
5078 | ||
ab7615d9 TC |
5079 | srch_sio = sio_alloc(BP_GET_NDVAS(bp)); |
5080 | bp2sio(bp, srch_sio, dva_i); | |
5081 | start = SIO_GET_OFFSET(srch_sio); | |
5082 | size = SIO_GET_ASIZE(srch_sio); | |
d4a72f23 TC |
5083 | |
5084 | /* | |
5085 | * We can find the zio in two states: | |
5086 | * 1) Cold, just sitting in the queue of zio's to be issued at | |
5087 | * some point in the future. In this case, all we do is | |
5088 | * remove the zio from the q_sios_by_addr tree, decrement | |
5089 | * its data volume from the containing range_seg_t and | |
5090 | * resort the q_exts_by_size tree to reflect that the | |
5091 | * range_seg_t has lost some of its 'fill'. We don't shorten | |
5092 | * the range_seg_t - this is usually rare enough not to be | |
5093 | * worth the extra hassle of trying keep track of precise | |
5094 | * extent boundaries. | |
5095 | * 2) Hot, where the zio is currently in-flight in | |
5096 | * dsl_scan_issue_ios. In this case, we can't simply | |
5097 | * reach in and stop the in-flight zio's, so we instead | |
5098 | * block the caller. Eventually, dsl_scan_issue_ios will | |
5099 | * be done with issuing the zio's it gathered and will | |
5100 | * signal us. | |
5101 | */ | |
ab7615d9 TC |
5102 | sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); |
5103 | sio_free(srch_sio); | |
5104 | ||
d4a72f23 | 5105 | if (sio != NULL) { |
d4a72f23 TC |
5106 | blkptr_t tmpbp; |
5107 | ||
5108 | /* Got it while it was cold in the queue */ | |
ab7615d9 | 5109 | ASSERT3U(start, ==, SIO_GET_OFFSET(sio)); |
1c0c729a | 5110 | ASSERT3U(size, ==, SIO_GET_ASIZE(sio)); |
d4a72f23 | 5111 | avl_remove(&queue->q_sios_by_addr, sio); |
1c0c729a AM |
5112 | if (avl_is_empty(&queue->q_sios_by_addr)) |
5113 | atomic_add_64(&scn->scn_queues_pending, -1); | |
ab7615d9 | 5114 | queue->q_sio_memused -= SIO_GET_MUSED(sio); |
c409e464 | 5115 | |
d4a72f23 TC |
5116 | ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); |
5117 | range_tree_remove_fill(queue->q_exts_by_addr, start, size); | |
5118 | ||
d4a72f23 | 5119 | /* count the block as though we issued it */ |
ab7615d9 | 5120 | sio2bp(sio, &tmpbp); |
82732299 | 5121 | count_block_issued(spa, &tmpbp, B_FALSE); |
c409e464 | 5122 | |
ab7615d9 | 5123 | sio_free(sio); |
d4a72f23 TC |
5124 | } |
5125 | mutex_exit(q_lock); | |
5126 | } | |
c409e464 | 5127 | |
d4a72f23 TC |
5128 | /* |
5129 | * Callback invoked when a zio_free() zio is executing. This needs to be | |
5130 | * intercepted to prevent the zio from deallocating a particular portion | |
5131 | * of disk space and it then getting reallocated and written to, while we | |
5132 | * still have it queued up for processing. | |
5133 | */ | |
5134 | void | |
5135 | dsl_scan_freed(spa_t *spa, const blkptr_t *bp) | |
5136 | { | |
5137 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
5138 | dsl_scan_t *scn = dp->dp_scan; | |
5139 | ||
5140 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
5141 | ASSERT(scn != NULL); | |
5142 | if (!dsl_scan_is_running(scn)) | |
5143 | return; | |
5144 | ||
5145 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) | |
5146 | dsl_scan_freed_dva(spa, bp, i); | |
5147 | } | |
5148 | ||
3c819a2c JP |
5149 | /* |
5150 | * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has | |
5151 | * not started, start it. Otherwise, only restart if max txg in DTL range is | |
5152 | * greater than the max txg in the current scan. If the DTL max is less than | |
5153 | * the scan max, then the vdev has not missed any new data since the resilver | |
5154 | * started, so a restart is not needed. | |
5155 | */ | |
5156 | void | |
5157 | dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd) | |
5158 | { | |
5159 | uint64_t min, max; | |
5160 | ||
5161 | if (!vdev_resilver_needed(vd, &min, &max)) | |
5162 | return; | |
5163 | ||
5164 | if (!dsl_scan_resilvering(dp)) { | |
5165 | spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); | |
5166 | return; | |
5167 | } | |
5168 | ||
5169 | if (max <= dp->dp_scan->scn_phys.scn_max_txg) | |
5170 | return; | |
5171 | ||
5172 | /* restart is needed, check if it can be deferred */ | |
5173 | if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) | |
5174 | vdev_defer_resilver(vd); | |
5175 | else | |
5176 | spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); | |
5177 | } | |
5178 | ||
ab8d9c17 | 5179 | ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW, |
d4a72f23 TC |
5180 | "Max bytes in flight per leaf vdev for scrubs and resilvers"); |
5181 | ||
fdc2d303 | 5182 | ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5183 | "Min millisecs to scrub per txg"); |
c409e464 | 5184 | |
fdc2d303 | 5185 | ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5186 | "Min millisecs to obsolete per txg"); |
a1d477c2 | 5187 | |
fdc2d303 | 5188 | ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5189 | "Min millisecs to free per txg"); |
c409e464 | 5190 | |
fdc2d303 | 5191 | ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5192 | "Min millisecs to resilver per txg"); |
c409e464 | 5193 | |
03fdcb9a | 5194 | ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW, |
cef48f14 TC |
5195 | "Set to prevent scans from progressing"); |
5196 | ||
03fdcb9a MM |
5197 | ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW, |
5198 | "Set to disable scrub I/O"); | |
c409e464 | 5199 | |
03fdcb9a MM |
5200 | ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW, |
5201 | "Set to disable scrub prefetching"); | |
36283ca2 | 5202 | |
ab8d9c17 | 5203 | ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW, |
a1d477c2 | 5204 | "Max number of blocks freed in one txg"); |
ba5ad9a4 | 5205 | |
ab8d9c17 | 5206 | ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW, |
4fe3a842 MA |
5207 | "Max number of dedup blocks freed in one txg"); |
5208 | ||
03fdcb9a MM |
5209 | ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW, |
5210 | "Enable processing of the free_bpobj"); | |
d4a72f23 | 5211 | |
82732299 AM |
5212 | ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW, |
5213 | "Enable block statistics calculation during scrub"); | |
5214 | ||
fdc2d303 | 5215 | ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW, |
03fdcb9a | 5216 | "Fraction of RAM for scan hard limit"); |
d4a72f23 | 5217 | |
fdc2d303 | 5218 | ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW, |
7ada752a | 5219 | "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size"); |
d4a72f23 | 5220 | |
03fdcb9a MM |
5221 | ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW, |
5222 | "Scrub using legacy non-sequential method"); | |
d4a72f23 | 5223 | |
fdc2d303 | 5224 | ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW, |
d4a72f23 TC |
5225 | "Scan progress on-disk checkpointing interval"); |
5226 | ||
ab8d9c17 | 5227 | ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW, |
63f88c12 | 5228 | "Max gap in bytes between sequential scrub / resilver I/Os"); |
5229 | ||
fdc2d303 | 5230 | ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW, |
d4a72f23 TC |
5231 | "Fraction of hard limit used as soft limit"); |
5232 | ||
03fdcb9a | 5233 | ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW, |
d4a72f23 TC |
5234 | "Tunable to attempt to reduce lock contention"); |
5235 | ||
fdc2d303 | 5236 | ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW, |
d4a72f23 | 5237 | "Tunable to adjust bias towards more filled segments during scans"); |
80a91e74 | 5238 | |
c85ac731 BB |
5239 | ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW, |
5240 | "Tunable to report resilver performance over the last N txgs"); | |
5241 | ||
03fdcb9a | 5242 | ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW, |
80a91e74 | 5243 | "Process all resilvers immediately"); |
482eeef8 GA |
5244 | |
5245 | ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, U64, ZMOD_RW, | |
5246 | "Error blocks to be scrubbed in one txg"); | |
5247 | /* END CSTYLED */ |