]>
Commit | Line | Data |
---|---|---|
428870ff BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
428870ff BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
03e02e5b | 23 | * Copyright (c) 2011, 2021 by Delphix. All rights reserved. |
7c9abfa7 | 24 | * Copyright 2016 Gary Mills |
3c819a2c | 25 | * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. |
dce63135 | 26 | * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. |
d49d7336 | 27 | * Copyright 2019 Joyent, Inc. |
428870ff BB |
28 | */ |
29 | ||
30 | #include <sys/dsl_scan.h> | |
31 | #include <sys/dsl_pool.h> | |
32 | #include <sys/dsl_dataset.h> | |
33 | #include <sys/dsl_prop.h> | |
34 | #include <sys/dsl_dir.h> | |
35 | #include <sys/dsl_synctask.h> | |
36 | #include <sys/dnode.h> | |
37 | #include <sys/dmu_tx.h> | |
38 | #include <sys/dmu_objset.h> | |
39 | #include <sys/arc.h> | |
c0aea7cf | 40 | #include <sys/arc_impl.h> |
428870ff BB |
41 | #include <sys/zap.h> |
42 | #include <sys/zio.h> | |
43 | #include <sys/zfs_context.h> | |
44 | #include <sys/fs/zfs.h> | |
45 | #include <sys/zfs_znode.h> | |
46 | #include <sys/spa_impl.h> | |
47 | #include <sys/vdev_impl.h> | |
48 | #include <sys/zil_impl.h> | |
49 | #include <sys/zio_checksum.h> | |
67a1b037 | 50 | #include <sys/brt.h> |
428870ff BB |
51 | #include <sys/ddt.h> |
52 | #include <sys/sa.h> | |
53 | #include <sys/sa_impl.h> | |
9ae529ec | 54 | #include <sys/zfeature.h> |
a6255b7f | 55 | #include <sys/abd.h> |
d4a72f23 | 56 | #include <sys/range_tree.h> |
482eeef8 | 57 | #include <sys/dbuf.h> |
428870ff BB |
58 | #ifdef _KERNEL |
59 | #include <sys/zfs_vfsops.h> | |
60 | #endif | |
61 | ||
d4a72f23 TC |
62 | /* |
63 | * Grand theory statement on scan queue sorting | |
64 | * | |
65 | * Scanning is implemented by recursively traversing all indirection levels | |
66 | * in an object and reading all blocks referenced from said objects. This | |
67 | * results in us approximately traversing the object from lowest logical | |
68 | * offset to the highest. For best performance, we would want the logical | |
69 | * blocks to be physically contiguous. However, this is frequently not the | |
70 | * case with pools given the allocation patterns of copy-on-write filesystems. | |
71 | * So instead, we put the I/Os into a reordering queue and issue them in a | |
72 | * way that will most benefit physical disks (LBA-order). | |
73 | * | |
74 | * Queue management: | |
75 | * | |
76 | * Ideally, we would want to scan all metadata and queue up all block I/O | |
77 | * prior to starting to issue it, because that allows us to do an optimal | |
78 | * sorting job. This can however consume large amounts of memory. Therefore | |
79 | * we continuously monitor the size of the queues and constrain them to 5% | |
80 | * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this | |
81 | * limit, we clear out a few of the largest extents at the head of the queues | |
82 | * to make room for more scanning. Hopefully, these extents will be fairly | |
83 | * large and contiguous, allowing us to approach sequential I/O throughput | |
84 | * even without a fully sorted tree. | |
85 | * | |
86 | * Metadata scanning takes place in dsl_scan_visit(), which is called from | |
87 | * dsl_scan_sync() every spa_sync(). If we have either fully scanned all | |
88 | * metadata on the pool, or we need to make room in memory because our | |
89 | * queues are too large, dsl_scan_visit() is postponed and | |
90 | * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies | |
91 | * that metadata scanning and queued I/O issuing are mutually exclusive. This | |
92 | * allows us to provide maximum sequential I/O throughput for the majority of | |
93 | * I/O's issued since sequential I/O performance is significantly negatively | |
94 | * impacted if it is interleaved with random I/O. | |
95 | * | |
96 | * Implementation Notes | |
97 | * | |
98 | * One side effect of the queued scanning algorithm is that the scanning code | |
99 | * needs to be notified whenever a block is freed. This is needed to allow | |
100 | * the scanning code to remove these I/Os from the issuing queue. Additionally, | |
101 | * we do not attempt to queue gang blocks to be issued sequentially since this | |
13a2ff27 | 102 | * is very hard to do and would have an extremely limited performance benefit. |
d4a72f23 TC |
103 | * Instead, we simply issue gang I/Os as soon as we find them using the legacy |
104 | * algorithm. | |
105 | * | |
106 | * Backwards compatibility | |
107 | * | |
108 | * This new algorithm is backwards compatible with the legacy on-disk data | |
109 | * structures (and therefore does not require a new feature flag). | |
110 | * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan | |
111 | * will stop scanning metadata (in logical order) and wait for all outstanding | |
112 | * sorted I/O to complete. Once this is done, we write out a checkpoint | |
113 | * bookmark, indicating that we have scanned everything logically before it. | |
114 | * If the pool is imported on a machine without the new sorting algorithm, | |
115 | * the scan simply resumes from the last checkpoint using the legacy algorithm. | |
116 | */ | |
117 | ||
5dbd68a3 MA |
118 | typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, |
119 | const zbookmark_phys_t *); | |
428870ff | 120 | |
428870ff | 121 | static scan_cb_t dsl_scan_scrub_cb; |
428870ff | 122 | |
d4a72f23 TC |
123 | static int scan_ds_queue_compare(const void *a, const void *b); |
124 | static int scan_prefetch_queue_compare(const void *a, const void *b); | |
125 | static void scan_ds_queue_clear(dsl_scan_t *scn); | |
d6496040 | 126 | static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn); |
d4a72f23 TC |
127 | static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, |
128 | uint64_t *txg); | |
129 | static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); | |
130 | static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); | |
131 | static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); | |
c0aea7cf | 132 | static uint64_t dsl_scan_count_data_disks(spa_t *spa); |
482eeef8 | 133 | static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb); |
d4a72f23 | 134 | |
fdc2d303 | 135 | extern uint_t zfs_vdev_async_write_active_min_dirty_percent; |
82732299 | 136 | static int zfs_scan_blkstats = 0; |
d4a72f23 | 137 | |
c85ac731 BB |
138 | /* |
139 | * 'zpool status' uses bytes processed per pass to report throughput and | |
140 | * estimate time remaining. We define a pass to start when the scanning | |
141 | * phase completes for a sequential resilver. Optionally, this value | |
142 | * may be used to reset the pass statistics every N txgs to provide an | |
143 | * estimated completion time based on currently observed performance. | |
144 | */ | |
145 | static uint_t zfs_scan_report_txgs = 0; | |
146 | ||
d4a72f23 TC |
147 | /* |
148 | * By default zfs will check to ensure it is not over the hard memory | |
149 | * limit before each txg. If finer-grained control of this is needed | |
150 | * this value can be set to 1 to enable checking before scanning each | |
151 | * block. | |
152 | */ | |
18168da7 | 153 | static int zfs_scan_strict_mem_lim = B_FALSE; |
d4a72f23 TC |
154 | |
155 | /* | |
156 | * Maximum number of parallelly executed bytes per leaf vdev. We attempt | |
157 | * to strike a balance here between keeping the vdev queues full of I/Os | |
158 | * at all times and not overflowing the queues to cause long latency, | |
159 | * which would cause long txg sync times. No matter what, we will not | |
160 | * overload the drives with I/O, since that is protected by | |
161 | * zfs_vdev_scrub_max_active. | |
162 | */ | |
c0aea7cf | 163 | static uint64_t zfs_scan_vdev_limit = 16 << 20; |
d4a72f23 | 164 | |
fdc2d303 RY |
165 | static uint_t zfs_scan_issue_strategy = 0; |
166 | ||
167 | /* don't queue & sort zios, go direct */ | |
168 | static int zfs_scan_legacy = B_FALSE; | |
ab8d9c17 | 169 | static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ |
d4a72f23 TC |
170 | |
171 | /* | |
172 | * fill_weight is non-tunable at runtime, so we copy it at module init from | |
173 | * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would | |
174 | * break queue sorting. | |
175 | */ | |
fdc2d303 | 176 | static uint_t zfs_scan_fill_weight = 3; |
d4a72f23 TC |
177 | static uint64_t fill_weight; |
178 | ||
179 | /* See dsl_scan_should_clear() for details on the memory limit tunables */ | |
18168da7 AZ |
180 | static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ |
181 | static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ | |
fdc2d303 RY |
182 | |
183 | ||
184 | /* fraction of physmem */ | |
185 | static uint_t zfs_scan_mem_lim_fact = 20; | |
186 | ||
187 | /* fraction of mem lim above */ | |
188 | static uint_t zfs_scan_mem_lim_soft_fact = 20; | |
189 | ||
190 | /* minimum milliseconds to scrub per txg */ | |
191 | static uint_t zfs_scrub_min_time_ms = 1000; | |
192 | ||
193 | /* minimum milliseconds to obsolete per txg */ | |
194 | static uint_t zfs_obsolete_min_time_ms = 500; | |
195 | ||
196 | /* minimum milliseconds to free per txg */ | |
197 | static uint_t zfs_free_min_time_ms = 1000; | |
198 | ||
199 | /* minimum milliseconds to resilver per txg */ | |
200 | static uint_t zfs_resilver_min_time_ms = 3000; | |
201 | ||
202 | static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */ | |
cef48f14 | 203 | int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ |
18168da7 AZ |
204 | static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ |
205 | static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ | |
206 | static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; | |
36283ca2 | 207 | /* max number of blocks to free in a single TXG */ |
ab8d9c17 | 208 | static uint64_t zfs_async_block_max_blocks = UINT64_MAX; |
4fe3a842 | 209 | /* max number of dedup blocks to free in a single TXG */ |
ab8d9c17 | 210 | static uint64_t zfs_max_async_dedup_frees = 100000; |
428870ff | 211 | |
18168da7 AZ |
212 | /* set to disable resilver deferring */ |
213 | static int zfs_resilver_disable_defer = B_FALSE; | |
80a91e74 | 214 | |
d4a72f23 TC |
215 | /* |
216 | * We wait a few txgs after importing a pool to begin scanning so that | |
217 | * the import / mounting code isn't held up by scrub / resilver IO. | |
218 | * Unfortunately, it is a bit difficult to determine exactly how long | |
219 | * this will take since userspace will trigger fs mounts asynchronously | |
220 | * and the kernel will create zvol minors asynchronously. As a result, | |
221 | * the value provided here is a bit arbitrary, but represents a | |
222 | * reasonable estimate of how many txgs it will take to finish fully | |
223 | * importing a pool | |
224 | */ | |
225 | #define SCAN_IMPORT_WAIT_TXGS 5 | |
226 | ||
428870ff BB |
227 | #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ |
228 | ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ | |
229 | (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) | |
230 | ||
ba5ad9a4 GW |
231 | /* |
232 | * Enable/disable the processing of the free_bpobj object. | |
233 | */ | |
18168da7 | 234 | static int zfs_free_bpobj_enabled = 1; |
ba5ad9a4 | 235 | |
482eeef8 | 236 | /* Error blocks to be scrubbed in one txg. */ |
bb736d98 | 237 | static uint_t zfs_scrub_error_blocks_per_txg = 1 << 12; |
482eeef8 | 238 | |
428870ff BB |
239 | /* the order has to match pool_scan_type */ |
240 | static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { | |
241 | NULL, | |
242 | dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ | |
243 | dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ | |
244 | }; | |
245 | ||
d4a72f23 TC |
246 | /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ |
247 | typedef struct { | |
248 | uint64_t sds_dsobj; | |
249 | uint64_t sds_txg; | |
250 | avl_node_t sds_node; | |
251 | } scan_ds_t; | |
252 | ||
253 | /* | |
254 | * This controls what conditions are placed on dsl_scan_sync_state(): | |
1c0c729a AM |
255 | * SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0 |
256 | * SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0. | |
257 | * SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise | |
d4a72f23 TC |
258 | * write out the scn_phys_cached version. |
259 | * See dsl_scan_sync_state for details. | |
260 | */ | |
261 | typedef enum { | |
262 | SYNC_OPTIONAL, | |
263 | SYNC_MANDATORY, | |
264 | SYNC_CACHED | |
265 | } state_sync_type_t; | |
266 | ||
267 | /* | |
268 | * This struct represents the minimum information needed to reconstruct a | |
269 | * zio for sequential scanning. This is useful because many of these will | |
270 | * accumulate in the sequential IO queues before being issued, so saving | |
271 | * memory matters here. | |
272 | */ | |
273 | typedef struct scan_io { | |
274 | /* fields from blkptr_t */ | |
d4a72f23 TC |
275 | uint64_t sio_blk_prop; |
276 | uint64_t sio_phys_birth; | |
277 | uint64_t sio_birth; | |
278 | zio_cksum_t sio_cksum; | |
ab7615d9 | 279 | uint32_t sio_nr_dvas; |
d4a72f23 TC |
280 | |
281 | /* fields from zio_t */ | |
ab7615d9 | 282 | uint32_t sio_flags; |
d4a72f23 TC |
283 | zbookmark_phys_t sio_zb; |
284 | ||
285 | /* members for queue sorting */ | |
286 | union { | |
ab7615d9 | 287 | avl_node_t sio_addr_node; /* link into issuing queue */ |
d4a72f23 TC |
288 | list_node_t sio_list_node; /* link for issuing to disk */ |
289 | } sio_nodes; | |
ab7615d9 TC |
290 | |
291 | /* | |
292 | * There may be up to SPA_DVAS_PER_BP DVAs here from the bp, | |
293 | * depending on how many were in the original bp. Only the | |
294 | * first DVA is really used for sorting and issuing purposes. | |
295 | * The other DVAs (if provided) simply exist so that the zio | |
296 | * layer can find additional copies to repair from in the | |
297 | * event of an error. This array must go at the end of the | |
298 | * struct to allow this for the variable number of elements. | |
299 | */ | |
8e7ebf4e | 300 | dva_t sio_dva[]; |
d4a72f23 TC |
301 | } scan_io_t; |
302 | ||
ab7615d9 TC |
303 | #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x) |
304 | #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x) | |
305 | #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0]) | |
306 | #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0]) | |
307 | #define SIO_GET_END_OFFSET(sio) \ | |
308 | (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio)) | |
309 | #define SIO_GET_MUSED(sio) \ | |
310 | (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t))) | |
311 | ||
d4a72f23 TC |
312 | struct dsl_scan_io_queue { |
313 | dsl_scan_t *q_scn; /* associated dsl_scan_t */ | |
314 | vdev_t *q_vd; /* top-level vdev that this queue represents */ | |
dd867145 | 315 | zio_t *q_zio; /* scn_zio_root child for waiting on IO */ |
d4a72f23 TC |
316 | |
317 | /* trees used for sorting I/Os and extents of I/Os */ | |
318 | range_tree_t *q_exts_by_addr; | |
1c0c729a | 319 | zfs_btree_t q_exts_by_size; |
d4a72f23 | 320 | avl_tree_t q_sios_by_addr; |
ab7615d9 | 321 | uint64_t q_sio_memused; |
1c0c729a | 322 | uint64_t q_last_ext_addr; |
d4a72f23 TC |
323 | |
324 | /* members for zio rate limiting */ | |
325 | uint64_t q_maxinflight_bytes; | |
326 | uint64_t q_inflight_bytes; | |
327 | kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ | |
328 | ||
329 | /* per txg statistics */ | |
330 | uint64_t q_total_seg_size_this_txg; | |
331 | uint64_t q_segs_this_txg; | |
332 | uint64_t q_total_zio_size_this_txg; | |
333 | uint64_t q_zios_this_txg; | |
334 | }; | |
335 | ||
336 | /* private data for dsl_scan_prefetch_cb() */ | |
337 | typedef struct scan_prefetch_ctx { | |
c13060e4 | 338 | zfs_refcount_t spc_refcnt; /* refcount for memory management */ |
d4a72f23 TC |
339 | dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ |
340 | boolean_t spc_root; /* is this prefetch for an objset? */ | |
341 | uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ | |
342 | uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ | |
343 | } scan_prefetch_ctx_t; | |
344 | ||
345 | /* private data for dsl_scan_prefetch() */ | |
346 | typedef struct scan_prefetch_issue_ctx { | |
347 | avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ | |
348 | scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ | |
349 | blkptr_t spic_bp; /* bp to prefetch */ | |
350 | zbookmark_phys_t spic_zb; /* bookmark to prefetch */ | |
351 | } scan_prefetch_issue_ctx_t; | |
352 | ||
353 | static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
354 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); | |
355 | static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, | |
356 | scan_io_t *sio); | |
357 | ||
358 | static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); | |
359 | static void scan_io_queues_destroy(dsl_scan_t *scn); | |
360 | ||
ab7615d9 TC |
361 | static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP]; |
362 | ||
363 | /* sio->sio_nr_dvas must be set so we know which cache to free from */ | |
364 | static void | |
365 | sio_free(scan_io_t *sio) | |
366 | { | |
367 | ASSERT3U(sio->sio_nr_dvas, >, 0); | |
368 | ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); | |
369 | ||
370 | kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio); | |
371 | } | |
372 | ||
373 | /* It is up to the caller to set sio->sio_nr_dvas for freeing */ | |
374 | static scan_io_t * | |
375 | sio_alloc(unsigned short nr_dvas) | |
376 | { | |
377 | ASSERT3U(nr_dvas, >, 0); | |
378 | ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP); | |
379 | ||
380 | return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP)); | |
381 | } | |
d4a72f23 TC |
382 | |
383 | void | |
384 | scan_init(void) | |
385 | { | |
386 | /* | |
387 | * This is used in ext_size_compare() to weight segments | |
388 | * based on how sparse they are. This cannot be changed | |
389 | * mid-scan and the tree comparison functions don't currently | |
13a2ff27 | 390 | * have a mechanism for passing additional context to the |
d4a72f23 | 391 | * compare functions. Thus we store this value globally and |
13a2ff27 | 392 | * we only allow it to be set at module initialization time |
d4a72f23 TC |
393 | */ |
394 | fill_weight = zfs_scan_fill_weight; | |
395 | ||
ab7615d9 TC |
396 | for (int i = 0; i < SPA_DVAS_PER_BP; i++) { |
397 | char name[36]; | |
398 | ||
c9e319fa | 399 | (void) snprintf(name, sizeof (name), "sio_cache_%d", i); |
ab7615d9 TC |
400 | sio_cache[i] = kmem_cache_create(name, |
401 | (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))), | |
402 | 0, NULL, NULL, NULL, NULL, NULL, 0); | |
403 | } | |
d4a72f23 TC |
404 | } |
405 | ||
406 | void | |
407 | scan_fini(void) | |
408 | { | |
ab7615d9 TC |
409 | for (int i = 0; i < SPA_DVAS_PER_BP; i++) { |
410 | kmem_cache_destroy(sio_cache[i]); | |
411 | } | |
d4a72f23 TC |
412 | } |
413 | ||
414 | static inline boolean_t | |
415 | dsl_scan_is_running(const dsl_scan_t *scn) | |
416 | { | |
417 | return (scn->scn_phys.scn_state == DSS_SCANNING); | |
418 | } | |
419 | ||
420 | boolean_t | |
421 | dsl_scan_resilvering(dsl_pool_t *dp) | |
422 | { | |
423 | return (dsl_scan_is_running(dp->dp_scan) && | |
424 | dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); | |
425 | } | |
426 | ||
427 | static inline void | |
ab7615d9 | 428 | sio2bp(const scan_io_t *sio, blkptr_t *bp) |
d4a72f23 | 429 | { |
861166b0 | 430 | memset(bp, 0, sizeof (*bp)); |
d4a72f23 TC |
431 | bp->blk_prop = sio->sio_blk_prop; |
432 | bp->blk_phys_birth = sio->sio_phys_birth; | |
433 | bp->blk_birth = sio->sio_birth; | |
434 | bp->blk_fill = 1; /* we always only work with data pointers */ | |
435 | bp->blk_cksum = sio->sio_cksum; | |
ab7615d9 TC |
436 | |
437 | ASSERT3U(sio->sio_nr_dvas, >, 0); | |
438 | ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); | |
439 | ||
861166b0 | 440 | memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t)); |
d4a72f23 TC |
441 | } |
442 | ||
443 | static inline void | |
444 | bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) | |
445 | { | |
d4a72f23 TC |
446 | sio->sio_blk_prop = bp->blk_prop; |
447 | sio->sio_phys_birth = bp->blk_phys_birth; | |
448 | sio->sio_birth = bp->blk_birth; | |
449 | sio->sio_cksum = bp->blk_cksum; | |
ab7615d9 TC |
450 | sio->sio_nr_dvas = BP_GET_NDVAS(bp); |
451 | ||
452 | /* | |
453 | * Copy the DVAs to the sio. We need all copies of the block so | |
454 | * that the self healing code can use the alternate copies if the | |
455 | * first is corrupted. We want the DVA at index dva_i to be first | |
456 | * in the sio since this is the primary one that we want to issue. | |
457 | */ | |
458 | for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) { | |
459 | sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas]; | |
460 | } | |
d4a72f23 TC |
461 | } |
462 | ||
428870ff BB |
463 | int |
464 | dsl_scan_init(dsl_pool_t *dp, uint64_t txg) | |
465 | { | |
466 | int err; | |
467 | dsl_scan_t *scn; | |
468 | spa_t *spa = dp->dp_spa; | |
469 | uint64_t f; | |
470 | ||
471 | scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); | |
472 | scn->scn_dp = dp; | |
473 | ||
2696dfaf GW |
474 | /* |
475 | * It's possible that we're resuming a scan after a reboot so | |
476 | * make sure that the scan_async_destroying flag is initialized | |
477 | * appropriately. | |
478 | */ | |
479 | ASSERT(!scn->scn_async_destroying); | |
480 | scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, | |
fa86b5db | 481 | SPA_FEATURE_ASYNC_DESTROY); |
2696dfaf | 482 | |
f90a30ad BB |
483 | /* |
484 | * Calculate the max number of in-flight bytes for pool-wide | |
c0aea7cf BB |
485 | * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). |
486 | * Limits for the issuing phase are done per top-level vdev and | |
487 | * are handled separately. | |
f90a30ad | 488 | */ |
c0aea7cf BB |
489 | scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, |
490 | zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); | |
f90a30ad | 491 | |
d4a72f23 TC |
492 | avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), |
493 | offsetof(scan_ds_t, sds_node)); | |
494 | avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, | |
495 | sizeof (scan_prefetch_issue_ctx_t), | |
496 | offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); | |
497 | ||
428870ff BB |
498 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
499 | "scrub_func", sizeof (uint64_t), 1, &f); | |
500 | if (err == 0) { | |
501 | /* | |
502 | * There was an old-style scrub in progress. Restart a | |
503 | * new-style scrub from the beginning. | |
504 | */ | |
505 | scn->scn_restart_txg = txg; | |
6f57f1e3 | 506 | zfs_dbgmsg("old-style scrub was in progress for %s; " |
428870ff | 507 | "restarting new-style scrub in txg %llu", |
6f57f1e3 | 508 | spa->spa_name, |
d4a72f23 | 509 | (longlong_t)scn->scn_restart_txg); |
428870ff BB |
510 | |
511 | /* | |
512 | * Load the queue obj from the old location so that it | |
513 | * can be freed by dsl_scan_done(). | |
514 | */ | |
515 | (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
516 | "scrub_queue", sizeof (uint64_t), 1, | |
517 | &scn->scn_phys.scn_queue_obj); | |
518 | } else { | |
482eeef8 GA |
519 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
520 | DMU_POOL_ERRORSCRUB, sizeof (uint64_t), | |
521 | ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys); | |
522 | ||
523 | if (err != 0 && err != ENOENT) | |
524 | return (err); | |
525 | ||
428870ff BB |
526 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
527 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
528 | &scn->scn_phys); | |
482eeef8 | 529 | |
4f2dcb3e RY |
530 | /* |
531 | * Detect if the pool contains the signature of #2094. If it | |
532 | * does properly update the scn->scn_phys structure and notify | |
533 | * the administrator by setting an errata for the pool. | |
534 | */ | |
535 | if (err == EOVERFLOW) { | |
536 | uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; | |
537 | VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); | |
538 | VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, | |
539 | (23 * sizeof (uint64_t))); | |
540 | ||
541 | err = zap_lookup(dp->dp_meta_objset, | |
542 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, | |
543 | sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); | |
544 | if (err == 0) { | |
545 | uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; | |
546 | ||
547 | if (overflow & ~DSL_SCAN_FLAGS_MASK || | |
548 | scn->scn_async_destroying) { | |
549 | spa->spa_errata = | |
550 | ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; | |
d4a72f23 | 551 | return (EOVERFLOW); |
4f2dcb3e RY |
552 | } |
553 | ||
861166b0 | 554 | memcpy(&scn->scn_phys, zaptmp, |
4f2dcb3e RY |
555 | SCAN_PHYS_NUMINTS * sizeof (uint64_t)); |
556 | scn->scn_phys.scn_flags = overflow; | |
557 | ||
558 | /* Required scrub already in progress. */ | |
559 | if (scn->scn_phys.scn_state == DSS_FINISHED || | |
560 | scn->scn_phys.scn_state == DSS_CANCELED) | |
561 | spa->spa_errata = | |
562 | ZPOOL_ERRATA_ZOL_2094_SCRUB; | |
563 | } | |
564 | } | |
565 | ||
428870ff BB |
566 | if (err == ENOENT) |
567 | return (0); | |
568 | else if (err) | |
569 | return (err); | |
570 | ||
d4a72f23 TC |
571 | /* |
572 | * We might be restarting after a reboot, so jump the issued | |
573 | * counter to how far we've scanned. We know we're consistent | |
574 | * up to here. | |
575 | */ | |
fa7b2390 AM |
576 | scn->scn_issued_before_pass = scn->scn_phys.scn_examined - |
577 | scn->scn_phys.scn_skipped; | |
d4a72f23 TC |
578 | |
579 | if (dsl_scan_is_running(scn) && | |
428870ff BB |
580 | spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { |
581 | /* | |
582 | * A new-type scrub was in progress on an old | |
583 | * pool, and the pool was accessed by old | |
584 | * software. Restart from the beginning, since | |
585 | * the old software may have changed the pool in | |
586 | * the meantime. | |
587 | */ | |
588 | scn->scn_restart_txg = txg; | |
6f57f1e3 | 589 | zfs_dbgmsg("new-style scrub for %s was modified " |
428870ff | 590 | "by old software; restarting in txg %llu", |
6f57f1e3 | 591 | spa->spa_name, |
d4a72f23 | 592 | (longlong_t)scn->scn_restart_txg); |
41035a04 JP |
593 | } else if (dsl_scan_resilvering(dp)) { |
594 | /* | |
595 | * If a resilver is in progress and there are already | |
596 | * errors, restart it instead of finishing this scan and | |
597 | * then restarting it. If there haven't been any errors | |
598 | * then remember that the incore DTL is valid. | |
599 | */ | |
600 | if (scn->scn_phys.scn_errors > 0) { | |
601 | scn->scn_restart_txg = txg; | |
602 | zfs_dbgmsg("resilver can't excise DTL_MISSING " | |
6f57f1e3 RE |
603 | "when finished; restarting on %s in txg " |
604 | "%llu", | |
605 | spa->spa_name, | |
41035a04 JP |
606 | (u_longlong_t)scn->scn_restart_txg); |
607 | } else { | |
608 | /* it's safe to excise DTL when finished */ | |
609 | spa->spa_scrub_started = B_TRUE; | |
610 | } | |
d4a72f23 TC |
611 | } |
612 | } | |
613 | ||
861166b0 | 614 | memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); |
8cb119e3 | 615 | |
d4a72f23 TC |
616 | /* reload the queue into the in-core state */ |
617 | if (scn->scn_phys.scn_queue_obj != 0) { | |
618 | zap_cursor_t zc; | |
619 | zap_attribute_t za; | |
620 | ||
621 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
622 | scn->scn_phys.scn_queue_obj); | |
623 | zap_cursor_retrieve(&zc, &za) == 0; | |
624 | (void) zap_cursor_advance(&zc)) { | |
625 | scan_ds_queue_insert(scn, | |
626 | zfs_strtonum(za.za_name, NULL), | |
627 | za.za_first_integer); | |
428870ff | 628 | } |
d4a72f23 | 629 | zap_cursor_fini(&zc); |
428870ff BB |
630 | } |
631 | ||
632 | spa_scan_stat_init(spa); | |
c85ac731 BB |
633 | vdev_scan_stat_init(spa->spa_root_vdev); |
634 | ||
428870ff BB |
635 | return (0); |
636 | } | |
637 | ||
638 | void | |
639 | dsl_scan_fini(dsl_pool_t *dp) | |
640 | { | |
d4a72f23 TC |
641 | if (dp->dp_scan != NULL) { |
642 | dsl_scan_t *scn = dp->dp_scan; | |
643 | ||
644 | if (scn->scn_taskq != NULL) | |
645 | taskq_destroy(scn->scn_taskq); | |
d6496040 | 646 | |
d4a72f23 TC |
647 | scan_ds_queue_clear(scn); |
648 | avl_destroy(&scn->scn_queue); | |
d6496040 | 649 | scan_ds_prefetch_queue_clear(scn); |
d4a72f23 TC |
650 | avl_destroy(&scn->scn_prefetch_queue); |
651 | ||
428870ff BB |
652 | kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); |
653 | dp->dp_scan = NULL; | |
654 | } | |
655 | } | |
656 | ||
d4a72f23 TC |
657 | static boolean_t |
658 | dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) | |
659 | { | |
660 | return (scn->scn_restart_txg != 0 && | |
661 | scn->scn_restart_txg <= tx->tx_txg); | |
662 | } | |
663 | ||
3c819a2c JP |
664 | boolean_t |
665 | dsl_scan_resilver_scheduled(dsl_pool_t *dp) | |
666 | { | |
667 | return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) || | |
668 | (spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER)); | |
669 | } | |
670 | ||
d4a72f23 TC |
671 | boolean_t |
672 | dsl_scan_scrubbing(const dsl_pool_t *dp) | |
673 | { | |
674 | dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; | |
675 | ||
676 | return (scn_phys->scn_state == DSS_SCANNING && | |
677 | scn_phys->scn_func == POOL_SCAN_SCRUB); | |
678 | } | |
679 | ||
482eeef8 GA |
680 | boolean_t |
681 | dsl_errorscrubbing(const dsl_pool_t *dp) | |
682 | { | |
683 | dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys; | |
684 | ||
685 | return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING && | |
686 | errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB); | |
687 | } | |
688 | ||
689 | boolean_t | |
690 | dsl_errorscrub_is_paused(const dsl_scan_t *scn) | |
691 | { | |
692 | return (dsl_errorscrubbing(scn->scn_dp) && | |
693 | scn->errorscrub_phys.dep_paused_flags); | |
694 | } | |
695 | ||
d4a72f23 TC |
696 | boolean_t |
697 | dsl_scan_is_paused_scrub(const dsl_scan_t *scn) | |
698 | { | |
699 | return (dsl_scan_scrubbing(scn->scn_dp) && | |
700 | scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); | |
701 | } | |
702 | ||
482eeef8 GA |
703 | static void |
704 | dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) | |
705 | { | |
706 | scn->errorscrub_phys.dep_cursor = | |
707 | zap_cursor_serialize(&scn->errorscrub_cursor); | |
708 | ||
709 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
710 | DMU_POOL_DIRECTORY_OBJECT, | |
711 | DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS, | |
712 | &scn->errorscrub_phys, tx)); | |
713 | } | |
714 | ||
715 | static void | |
716 | dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx) | |
717 | { | |
718 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
719 | pool_scan_func_t *funcp = arg; | |
720 | dsl_pool_t *dp = scn->scn_dp; | |
721 | spa_t *spa = dp->dp_spa; | |
722 | ||
723 | ASSERT(!dsl_scan_is_running(scn)); | |
724 | ASSERT(!dsl_errorscrubbing(scn->scn_dp)); | |
725 | ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); | |
726 | ||
727 | memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys)); | |
728 | scn->errorscrub_phys.dep_func = *funcp; | |
729 | scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING; | |
730 | scn->errorscrub_phys.dep_start_time = gethrestime_sec(); | |
731 | scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa); | |
732 | scn->errorscrub_phys.dep_examined = 0; | |
733 | scn->errorscrub_phys.dep_errors = 0; | |
734 | scn->errorscrub_phys.dep_cursor = 0; | |
735 | zap_cursor_init_serialized(&scn->errorscrub_cursor, | |
736 | spa->spa_meta_objset, spa->spa_errlog_last, | |
737 | scn->errorscrub_phys.dep_cursor); | |
738 | ||
739 | vdev_config_dirty(spa->spa_root_vdev); | |
740 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START); | |
741 | ||
742 | dsl_errorscrub_sync_state(scn, tx); | |
743 | ||
744 | spa_history_log_internal(spa, "error scrub setup", tx, | |
745 | "func=%u mintxg=%u maxtxg=%llu", | |
746 | *funcp, 0, (u_longlong_t)tx->tx_txg); | |
747 | } | |
748 | ||
749 | static int | |
750 | dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx) | |
751 | { | |
752 | (void) arg; | |
753 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
754 | ||
755 | if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) { | |
756 | return (SET_ERROR(EBUSY)); | |
757 | } | |
758 | ||
759 | if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) { | |
760 | return (ECANCELED); | |
761 | } | |
762 | return (0); | |
763 | } | |
764 | ||
d4a72f23 TC |
765 | /* |
766 | * Writes out a persistent dsl_scan_phys_t record to the pool directory. | |
767 | * Because we can be running in the block sorting algorithm, we do not always | |
768 | * want to write out the record, only when it is "safe" to do so. This safety | |
769 | * condition is achieved by making sure that the sorting queues are empty | |
1c0c729a | 770 | * (scn_queues_pending == 0). When this condition is not true, the sync'd state |
d4a72f23 TC |
771 | * is inconsistent with how much actual scanning progress has been made. The |
772 | * kind of sync to be performed is specified by the sync_type argument. If the | |
773 | * sync is optional, we only sync if the queues are empty. If the sync is | |
774 | * mandatory, we do a hard ASSERT to make sure that the queues are empty. The | |
775 | * third possible state is a "cached" sync. This is done in response to: | |
776 | * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
777 | * destroyed, so we wouldn't be able to restart scanning from it. | |
778 | * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been | |
779 | * superseded by a newer snapshot. | |
780 | * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been | |
781 | * swapped with its clone. | |
782 | * In all cases, a cached sync simply rewrites the last record we've written, | |
783 | * just slightly modified. For the modifications that are performed to the | |
784 | * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, | |
785 | * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. | |
786 | */ | |
787 | static void | |
788 | dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) | |
789 | { | |
790 | int i; | |
791 | spa_t *spa = scn->scn_dp->dp_spa; | |
792 | ||
1c0c729a AM |
793 | ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0); |
794 | if (scn->scn_queues_pending == 0) { | |
d4a72f23 TC |
795 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { |
796 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
797 | dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; | |
798 | ||
799 | if (q == NULL) | |
800 | continue; | |
801 | ||
802 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
803 | ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); | |
ca577779 PD |
804 | ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==, |
805 | NULL); | |
d4a72f23 TC |
806 | ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); |
807 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
808 | } | |
809 | ||
810 | if (scn->scn_phys.scn_queue_obj != 0) | |
811 | scan_ds_queue_sync(scn, tx); | |
812 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
813 | DMU_POOL_DIRECTORY_OBJECT, | |
814 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
815 | &scn->scn_phys, tx)); | |
861166b0 | 816 | memcpy(&scn->scn_phys_cached, &scn->scn_phys, |
d4a72f23 TC |
817 | sizeof (scn->scn_phys)); |
818 | ||
819 | if (scn->scn_checkpointing) | |
6f57f1e3 RE |
820 | zfs_dbgmsg("finish scan checkpoint for %s", |
821 | spa->spa_name); | |
d4a72f23 TC |
822 | |
823 | scn->scn_checkpointing = B_FALSE; | |
824 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
825 | } else if (sync_type == SYNC_CACHED) { | |
826 | VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, | |
827 | DMU_POOL_DIRECTORY_OBJECT, | |
828 | DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, | |
829 | &scn->scn_phys_cached, tx)); | |
830 | } | |
831 | } | |
832 | ||
600a1dc5 | 833 | int |
13fe0198 | 834 | dsl_scan_setup_check(void *arg, dmu_tx_t *tx) |
428870ff | 835 | { |
14e4e3cb | 836 | (void) arg; |
13fe0198 | 837 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
9a49d3f3 | 838 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
428870ff | 839 | |
482eeef8 GA |
840 | if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) || |
841 | dsl_errorscrubbing(scn->scn_dp)) | |
2e528b49 | 842 | return (SET_ERROR(EBUSY)); |
428870ff BB |
843 | |
844 | return (0); | |
845 | } | |
846 | ||
b2255edc | 847 | void |
13fe0198 | 848 | dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) |
428870ff | 849 | { |
482eeef8 | 850 | (void) arg; |
13fe0198 MA |
851 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
852 | pool_scan_func_t *funcp = arg; | |
428870ff BB |
853 | dmu_object_type_t ot = 0; |
854 | dsl_pool_t *dp = scn->scn_dp; | |
855 | spa_t *spa = dp->dp_spa; | |
856 | ||
d4a72f23 | 857 | ASSERT(!dsl_scan_is_running(scn)); |
428870ff | 858 | ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); |
861166b0 | 859 | memset(&scn->scn_phys, 0, sizeof (scn->scn_phys)); |
482eeef8 GA |
860 | |
861 | /* | |
862 | * If we are starting a fresh scrub, we erase the error scrub | |
863 | * information from disk. | |
864 | */ | |
865 | memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys)); | |
866 | dsl_errorscrub_sync_state(scn, tx); | |
867 | ||
428870ff BB |
868 | scn->scn_phys.scn_func = *funcp; |
869 | scn->scn_phys.scn_state = DSS_SCANNING; | |
870 | scn->scn_phys.scn_min_txg = 0; | |
871 | scn->scn_phys.scn_max_txg = tx->tx_txg; | |
872 | scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ | |
873 | scn->scn_phys.scn_start_time = gethrestime_sec(); | |
874 | scn->scn_phys.scn_errors = 0; | |
875 | scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; | |
d4a72f23 | 876 | scn->scn_issued_before_pass = 0; |
428870ff | 877 | scn->scn_restart_txg = 0; |
5d1f7fb6 | 878 | scn->scn_done_txg = 0; |
d4a72f23 TC |
879 | scn->scn_last_checkpoint = 0; |
880 | scn->scn_checkpointing = B_FALSE; | |
428870ff | 881 | spa_scan_stat_init(spa); |
c85ac731 | 882 | vdev_scan_stat_init(spa->spa_root_vdev); |
428870ff BB |
883 | |
884 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
885 | scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; | |
886 | ||
887 | /* rewrite all disk labels */ | |
888 | vdev_config_dirty(spa->spa_root_vdev); | |
889 | ||
890 | if (vdev_resilver_needed(spa->spa_root_vdev, | |
891 | &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { | |
9a49d3f3 BB |
892 | nvlist_t *aux = fnvlist_alloc(); |
893 | fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, | |
894 | "healing"); | |
895 | spa_event_notify(spa, NULL, aux, | |
12fa0466 | 896 | ESC_ZFS_RESILVER_START); |
9a49d3f3 | 897 | nvlist_free(aux); |
428870ff | 898 | } else { |
12fa0466 | 899 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); |
428870ff BB |
900 | } |
901 | ||
902 | spa->spa_scrub_started = B_TRUE; | |
903 | /* | |
904 | * If this is an incremental scrub, limit the DDT scrub phase | |
905 | * to just the auto-ditto class (for correctness); the rest | |
906 | * of the scrub should go faster using top-down pruning. | |
907 | */ | |
908 | if (scn->scn_phys.scn_min_txg > TXG_INITIAL) | |
909 | scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; | |
910 | ||
9a49d3f3 BB |
911 | /* |
912 | * When starting a resilver clear any existing rebuild state. | |
913 | * This is required to prevent stale rebuild status from | |
914 | * being reported when a rebuild is run, then a resilver and | |
915 | * finally a scrub. In which case only the scrub status | |
916 | * should be reported by 'zpool status'. | |
917 | */ | |
918 | if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) { | |
919 | vdev_t *rvd = spa->spa_root_vdev; | |
920 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
921 | vdev_t *vd = rvd->vdev_child[i]; | |
922 | vdev_rebuild_clear_sync( | |
923 | (void *)(uintptr_t)vd->vdev_id, tx); | |
924 | } | |
925 | } | |
428870ff BB |
926 | } |
927 | ||
928 | /* back to the generic stuff */ | |
929 | ||
82732299 AM |
930 | if (zfs_scan_blkstats) { |
931 | if (dp->dp_blkstats == NULL) { | |
932 | dp->dp_blkstats = | |
933 | vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); | |
934 | } | |
935 | memset(&dp->dp_blkstats->zab_type, 0, | |
936 | sizeof (dp->dp_blkstats->zab_type)); | |
937 | } else { | |
938 | if (dp->dp_blkstats) { | |
939 | vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); | |
940 | dp->dp_blkstats = NULL; | |
941 | } | |
428870ff | 942 | } |
428870ff BB |
943 | |
944 | if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) | |
945 | ot = DMU_OT_ZAP_OTHER; | |
946 | ||
947 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, | |
948 | ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); | |
949 | ||
861166b0 | 950 | memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); |
d4a72f23 TC |
951 | |
952 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); | |
428870ff | 953 | |
6f1ffb06 | 954 | spa_history_log_internal(spa, "scan setup", tx, |
428870ff | 955 | "func=%u mintxg=%llu maxtxg=%llu", |
74756182 MM |
956 | *funcp, (u_longlong_t)scn->scn_phys.scn_min_txg, |
957 | (u_longlong_t)scn->scn_phys.scn_max_txg); | |
428870ff BB |
958 | } |
959 | ||
d4a72f23 | 960 | /* |
482eeef8 GA |
961 | * Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub, |
962 | * error scrub or resilver. Can also be called to resume a paused scrub or | |
963 | * error scrub. | |
d4a72f23 TC |
964 | */ |
965 | int | |
966 | dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) | |
967 | { | |
968 | spa_t *spa = dp->dp_spa; | |
969 | dsl_scan_t *scn = dp->dp_scan; | |
970 | ||
971 | /* | |
972 | * Purge all vdev caches and probe all devices. We do this here | |
973 | * rather than in sync context because this requires a writer lock | |
974 | * on the spa_config lock, which we can't do from sync context. The | |
975 | * spa_scrub_reopen flag indicates that vdev_open() should not | |
976 | * attempt to start another scrub. | |
977 | */ | |
978 | spa_vdev_state_enter(spa, SCL_NONE); | |
979 | spa->spa_scrub_reopen = B_TRUE; | |
980 | vdev_reopen(spa->spa_root_vdev); | |
981 | spa->spa_scrub_reopen = B_FALSE; | |
982 | (void) spa_vdev_state_exit(spa, NULL, 0); | |
983 | ||
80a91e74 | 984 | if (func == POOL_SCAN_RESILVER) { |
3c819a2c | 985 | dsl_scan_restart_resilver(spa->spa_dsl_pool, 0); |
80a91e74 TC |
986 | return (0); |
987 | } | |
988 | ||
482eeef8 GA |
989 | if (func == POOL_SCAN_ERRORSCRUB) { |
990 | if (dsl_errorscrub_is_paused(dp->dp_scan)) { | |
991 | /* | |
992 | * got error scrub start cmd, resume paused error scrub. | |
993 | */ | |
994 | int err = dsl_scrub_set_pause_resume(scn->scn_dp, | |
995 | POOL_SCRUB_NORMAL); | |
996 | if (err == 0) { | |
997 | spa_event_notify(spa, NULL, NULL, | |
998 | ESC_ZFS_ERRORSCRUB_RESUME); | |
999 | return (ECANCELED); | |
1000 | } | |
1001 | return (SET_ERROR(err)); | |
1002 | } | |
1003 | ||
1004 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
1005 | dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync, | |
1006 | &func, 0, ZFS_SPACE_CHECK_RESERVED)); | |
1007 | } | |
1008 | ||
d4a72f23 TC |
1009 | if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { |
1010 | /* got scrub start cmd, resume paused scrub */ | |
1011 | int err = dsl_scrub_set_pause_resume(scn->scn_dp, | |
1012 | POOL_SCRUB_NORMAL); | |
43cb30b3 SEF |
1013 | if (err == 0) { |
1014 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); | |
28caa74b | 1015 | return (SET_ERROR(ECANCELED)); |
43cb30b3 | 1016 | } |
d4a72f23 TC |
1017 | return (SET_ERROR(err)); |
1018 | } | |
1019 | ||
1020 | return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, | |
d2734cce | 1021 | dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); |
d4a72f23 TC |
1022 | } |
1023 | ||
482eeef8 GA |
1024 | static void |
1025 | dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | |
1026 | { | |
1027 | dsl_pool_t *dp = scn->scn_dp; | |
1028 | spa_t *spa = dp->dp_spa; | |
1029 | ||
1030 | if (complete) { | |
1031 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH); | |
1032 | spa_history_log_internal(spa, "error scrub done", tx, | |
1033 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); | |
1034 | } else { | |
1035 | spa_history_log_internal(spa, "error scrub canceled", tx, | |
1036 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); | |
1037 | } | |
1038 | ||
1039 | scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED; | |
1040 | spa->spa_scrub_active = B_FALSE; | |
1041 | spa_errlog_rotate(spa); | |
1042 | scn->errorscrub_phys.dep_end_time = gethrestime_sec(); | |
1043 | zap_cursor_fini(&scn->errorscrub_cursor); | |
1044 | ||
1045 | if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) | |
1046 | spa->spa_errata = 0; | |
1047 | ||
1048 | ASSERT(!dsl_errorscrubbing(scn->scn_dp)); | |
1049 | } | |
1050 | ||
428870ff BB |
1051 | static void |
1052 | dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | |
1053 | { | |
1054 | static const char *old_names[] = { | |
1055 | "scrub_bookmark", | |
1056 | "scrub_ddt_bookmark", | |
1057 | "scrub_ddt_class_max", | |
1058 | "scrub_queue", | |
1059 | "scrub_min_txg", | |
1060 | "scrub_max_txg", | |
1061 | "scrub_func", | |
1062 | "scrub_errors", | |
1063 | NULL | |
1064 | }; | |
1065 | ||
1066 | dsl_pool_t *dp = scn->scn_dp; | |
1067 | spa_t *spa = dp->dp_spa; | |
1068 | int i; | |
1069 | ||
1070 | /* Remove any remnants of an old-style scrub. */ | |
1071 | for (i = 0; old_names[i]; i++) { | |
1072 | (void) zap_remove(dp->dp_meta_objset, | |
1073 | DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); | |
1074 | } | |
1075 | ||
1076 | if (scn->scn_phys.scn_queue_obj != 0) { | |
d4a72f23 | 1077 | VERIFY0(dmu_object_free(dp->dp_meta_objset, |
428870ff BB |
1078 | scn->scn_phys.scn_queue_obj, tx)); |
1079 | scn->scn_phys.scn_queue_obj = 0; | |
1080 | } | |
d4a72f23 | 1081 | scan_ds_queue_clear(scn); |
d6496040 | 1082 | scan_ds_prefetch_queue_clear(scn); |
428870ff | 1083 | |
0ea05c64 AP |
1084 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; |
1085 | ||
428870ff BB |
1086 | /* |
1087 | * If we were "restarted" from a stopped state, don't bother | |
1088 | * with anything else. | |
1089 | */ | |
d4a72f23 TC |
1090 | if (!dsl_scan_is_running(scn)) { |
1091 | ASSERT(!scn->scn_is_sorted); | |
428870ff | 1092 | return; |
d4a72f23 | 1093 | } |
428870ff | 1094 | |
d4a72f23 TC |
1095 | if (scn->scn_is_sorted) { |
1096 | scan_io_queues_destroy(scn); | |
1097 | scn->scn_is_sorted = B_FALSE; | |
1098 | ||
1099 | if (scn->scn_taskq != NULL) { | |
1100 | taskq_destroy(scn->scn_taskq); | |
1101 | scn->scn_taskq = NULL; | |
1102 | } | |
1103 | } | |
1104 | ||
1105 | scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; | |
428870ff | 1106 | |
e60e158e JG |
1107 | spa_notify_waiters(spa); |
1108 | ||
784d15c1 NR |
1109 | if (dsl_scan_restarting(scn, tx)) |
1110 | spa_history_log_internal(spa, "scan aborted, restarting", tx, | |
018f2604 | 1111 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); |
784d15c1 NR |
1112 | else if (!complete) |
1113 | spa_history_log_internal(spa, "scan cancelled", tx, | |
018f2604 | 1114 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); |
784d15c1 NR |
1115 | else |
1116 | spa_history_log_internal(spa, "scan done", tx, | |
018f2604 | 1117 | "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); |
428870ff BB |
1118 | |
1119 | if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { | |
428870ff BB |
1120 | spa->spa_scrub_active = B_FALSE; |
1121 | ||
1122 | /* | |
1123 | * If the scrub/resilver completed, update all DTLs to | |
1124 | * reflect this. Whether it succeeded or not, vacate | |
1125 | * all temporary scrub DTLs. | |
d2734cce SD |
1126 | * |
1127 | * As the scrub does not currently support traversing | |
1128 | * data that have been freed but are part of a checkpoint, | |
1129 | * we don't mark the scrub as done in the DTLs as faults | |
1130 | * may still exist in those vdevs. | |
428870ff | 1131 | */ |
d2734cce SD |
1132 | if (complete && |
1133 | !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { | |
1134 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
9a49d3f3 BB |
1135 | scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE); |
1136 | ||
1137 | if (scn->scn_phys.scn_min_txg) { | |
1138 | nvlist_t *aux = fnvlist_alloc(); | |
1139 | fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, | |
1140 | "healing"); | |
1141 | spa_event_notify(spa, NULL, aux, | |
1142 | ESC_ZFS_RESILVER_FINISH); | |
1143 | nvlist_free(aux); | |
1144 | } else { | |
1145 | spa_event_notify(spa, NULL, NULL, | |
1146 | ESC_ZFS_SCRUB_FINISH); | |
1147 | } | |
d2734cce SD |
1148 | } else { |
1149 | vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, | |
9a49d3f3 | 1150 | 0, B_TRUE, B_FALSE); |
428870ff BB |
1151 | } |
1152 | spa_errlog_rotate(spa); | |
1153 | ||
41035a04 JP |
1154 | /* |
1155 | * Don't clear flag until after vdev_dtl_reassess to ensure that | |
1156 | * DTL_MISSING will get updated when possible. | |
1157 | */ | |
1158 | spa->spa_scrub_started = B_FALSE; | |
1159 | ||
428870ff BB |
1160 | /* |
1161 | * We may have finished replacing a device. | |
1162 | * Let the async thread assess this and handle the detach. | |
1163 | */ | |
1164 | spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); | |
80a91e74 TC |
1165 | |
1166 | /* | |
3c819a2c | 1167 | * Clear any resilver_deferred flags in the config. |
80a91e74 TC |
1168 | * If there are drives that need resilvering, kick |
1169 | * off an asynchronous request to start resilver. | |
3c819a2c | 1170 | * vdev_clear_resilver_deferred() may update the config |
80a91e74 TC |
1171 | * before the resilver can restart. In the event of |
1172 | * a crash during this period, the spa loading code | |
1173 | * will find the drives that need to be resilvered | |
3c819a2c | 1174 | * and start the resilver then. |
80a91e74 | 1175 | */ |
3c819a2c JP |
1176 | if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) && |
1177 | vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) { | |
1178 | spa_history_log_internal(spa, | |
1179 | "starting deferred resilver", tx, "errors=%llu", | |
018f2604 | 1180 | (u_longlong_t)spa_approx_errlog_size(spa)); |
3c819a2c | 1181 | spa_async_request(spa, SPA_ASYNC_RESILVER); |
80a91e74 | 1182 | } |
03e02e5b DB |
1183 | |
1184 | /* Clear recent error events (i.e. duplicate events tracking) */ | |
1185 | if (complete) | |
1186 | zfs_ereport_clear(spa, NULL); | |
428870ff BB |
1187 | } |
1188 | ||
1189 | scn->scn_phys.scn_end_time = gethrestime_sec(); | |
4f2dcb3e RY |
1190 | |
1191 | if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) | |
1192 | spa->spa_errata = 0; | |
d4a72f23 TC |
1193 | |
1194 | ASSERT(!dsl_scan_is_running(scn)); | |
428870ff BB |
1195 | } |
1196 | ||
482eeef8 GA |
1197 | static int |
1198 | dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx) | |
1199 | { | |
1200 | pool_scrub_cmd_t *cmd = arg; | |
1201 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1202 | dsl_scan_t *scn = dp->dp_scan; | |
1203 | ||
1204 | if (*cmd == POOL_SCRUB_PAUSE) { | |
1205 | /* | |
1206 | * can't pause a error scrub when there is no in-progress | |
1207 | * error scrub. | |
1208 | */ | |
1209 | if (!dsl_errorscrubbing(dp)) | |
1210 | return (SET_ERROR(ENOENT)); | |
1211 | ||
1212 | /* can't pause a paused error scrub */ | |
1213 | if (dsl_errorscrub_is_paused(scn)) | |
1214 | return (SET_ERROR(EBUSY)); | |
1215 | } else if (*cmd != POOL_SCRUB_NORMAL) { | |
1216 | return (SET_ERROR(ENOTSUP)); | |
1217 | } | |
1218 | ||
1219 | return (0); | |
1220 | } | |
1221 | ||
1222 | static void | |
1223 | dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx) | |
1224 | { | |
1225 | pool_scrub_cmd_t *cmd = arg; | |
1226 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1227 | spa_t *spa = dp->dp_spa; | |
1228 | dsl_scan_t *scn = dp->dp_scan; | |
1229 | ||
1230 | if (*cmd == POOL_SCRUB_PAUSE) { | |
1231 | spa->spa_scan_pass_errorscrub_pause = gethrestime_sec(); | |
1232 | scn->errorscrub_phys.dep_paused_flags = B_TRUE; | |
1233 | dsl_errorscrub_sync_state(scn, tx); | |
1234 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED); | |
1235 | } else { | |
1236 | ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); | |
1237 | if (dsl_errorscrub_is_paused(scn)) { | |
1238 | /* | |
1239 | * We need to keep track of how much time we spend | |
1240 | * paused per pass so that we can adjust the error scrub | |
1241 | * rate shown in the output of 'zpool status'. | |
1242 | */ | |
1243 | spa->spa_scan_pass_errorscrub_spent_paused += | |
1244 | gethrestime_sec() - | |
1245 | spa->spa_scan_pass_errorscrub_pause; | |
1246 | ||
1247 | spa->spa_scan_pass_errorscrub_pause = 0; | |
1248 | scn->errorscrub_phys.dep_paused_flags = B_FALSE; | |
1249 | ||
1250 | zap_cursor_init_serialized( | |
1251 | &scn->errorscrub_cursor, | |
1252 | spa->spa_meta_objset, spa->spa_errlog_last, | |
1253 | scn->errorscrub_phys.dep_cursor); | |
1254 | ||
1255 | dsl_errorscrub_sync_state(scn, tx); | |
1256 | } | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | static int | |
1261 | dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx) | |
1262 | { | |
1263 | (void) arg; | |
1264 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
1265 | /* can't cancel a error scrub when there is no one in-progress */ | |
1266 | if (!dsl_errorscrubbing(scn->scn_dp)) | |
1267 | return (SET_ERROR(ENOENT)); | |
1268 | return (0); | |
1269 | } | |
1270 | ||
1271 | static void | |
1272 | dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx) | |
1273 | { | |
1274 | (void) arg; | |
1275 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | |
1276 | ||
1277 | dsl_errorscrub_done(scn, B_FALSE, tx); | |
1278 | dsl_errorscrub_sync_state(scn, tx); | |
1279 | spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, | |
1280 | ESC_ZFS_ERRORSCRUB_ABORT); | |
1281 | } | |
1282 | ||
428870ff | 1283 | static int |
13fe0198 | 1284 | dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) |
428870ff | 1285 | { |
14e4e3cb | 1286 | (void) arg; |
13fe0198 | 1287 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff | 1288 | |
d4a72f23 | 1289 | if (!dsl_scan_is_running(scn)) |
2e528b49 | 1290 | return (SET_ERROR(ENOENT)); |
428870ff BB |
1291 | return (0); |
1292 | } | |
1293 | ||
428870ff | 1294 | static void |
13fe0198 | 1295 | dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) |
428870ff | 1296 | { |
14e4e3cb | 1297 | (void) arg; |
13fe0198 | 1298 | dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; |
428870ff BB |
1299 | |
1300 | dsl_scan_done(scn, B_FALSE, tx); | |
d4a72f23 | 1301 | dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); |
43cb30b3 | 1302 | spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); |
428870ff BB |
1303 | } |
1304 | ||
1305 | int | |
1306 | dsl_scan_cancel(dsl_pool_t *dp) | |
1307 | { | |
482eeef8 GA |
1308 | if (dsl_errorscrubbing(dp)) { |
1309 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
1310 | dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync, | |
1311 | NULL, 3, ZFS_SPACE_CHECK_RESERVED)); | |
1312 | } | |
13fe0198 | 1313 | return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, |
3d45fdd6 | 1314 | dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); |
428870ff BB |
1315 | } |
1316 | ||
0ea05c64 AP |
1317 | static int |
1318 | dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) | |
1319 | { | |
1320 | pool_scrub_cmd_t *cmd = arg; | |
1321 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1322 | dsl_scan_t *scn = dp->dp_scan; | |
1323 | ||
1324 | if (*cmd == POOL_SCRUB_PAUSE) { | |
1325 | /* can't pause a scrub when there is no in-progress scrub */ | |
1326 | if (!dsl_scan_scrubbing(dp)) | |
1327 | return (SET_ERROR(ENOENT)); | |
1328 | ||
1329 | /* can't pause a paused scrub */ | |
1330 | if (dsl_scan_is_paused_scrub(scn)) | |
1331 | return (SET_ERROR(EBUSY)); | |
1332 | } else if (*cmd != POOL_SCRUB_NORMAL) { | |
1333 | return (SET_ERROR(ENOTSUP)); | |
1334 | } | |
1335 | ||
1336 | return (0); | |
1337 | } | |
1338 | ||
1339 | static void | |
1340 | dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) | |
1341 | { | |
1342 | pool_scrub_cmd_t *cmd = arg; | |
1343 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1344 | spa_t *spa = dp->dp_spa; | |
1345 | dsl_scan_t *scn = dp->dp_scan; | |
1346 | ||
0ea05c64 AP |
1347 | if (*cmd == POOL_SCRUB_PAUSE) { |
1348 | /* can't pause a scrub when there is no in-progress scrub */ | |
1349 | spa->spa_scan_pass_scrub_pause = gethrestime_sec(); | |
1350 | scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; | |
8cb119e3 | 1351 | scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED; |
d4a72f23 | 1352 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
43cb30b3 | 1353 | spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); |
e60e158e | 1354 | spa_notify_waiters(spa); |
0ea05c64 AP |
1355 | } else { |
1356 | ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); | |
1357 | if (dsl_scan_is_paused_scrub(scn)) { | |
1358 | /* | |
1359 | * We need to keep track of how much time we spend | |
1360 | * paused per pass so that we can adjust the scrub rate | |
1361 | * shown in the output of 'zpool status' | |
1362 | */ | |
1363 | spa->spa_scan_pass_scrub_spent_paused += | |
1364 | gethrestime_sec() - spa->spa_scan_pass_scrub_pause; | |
1365 | spa->spa_scan_pass_scrub_pause = 0; | |
1366 | scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; | |
8cb119e3 | 1367 | scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED; |
d4a72f23 | 1368 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
0ea05c64 AP |
1369 | } |
1370 | } | |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * Set scrub pause/resume state if it makes sense to do so | |
1375 | */ | |
1376 | int | |
1377 | dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) | |
1378 | { | |
482eeef8 GA |
1379 | if (dsl_errorscrubbing(dp)) { |
1380 | return (dsl_sync_task(spa_name(dp->dp_spa), | |
1381 | dsl_errorscrub_pause_resume_check, | |
1382 | dsl_errorscrub_pause_resume_sync, &cmd, 3, | |
1383 | ZFS_SPACE_CHECK_RESERVED)); | |
1384 | } | |
0ea05c64 AP |
1385 | return (dsl_sync_task(spa_name(dp->dp_spa), |
1386 | dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, | |
1387 | ZFS_SPACE_CHECK_RESERVED)); | |
1388 | } | |
1389 | ||
0ea05c64 | 1390 | |
d4a72f23 TC |
1391 | /* start a new scan, or restart an existing one. */ |
1392 | void | |
3c819a2c | 1393 | dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg) |
d4a72f23 TC |
1394 | { |
1395 | if (txg == 0) { | |
1396 | dmu_tx_t *tx; | |
1397 | tx = dmu_tx_create_dd(dp->dp_mos_dir); | |
1398 | VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); | |
0ea05c64 | 1399 | |
d4a72f23 TC |
1400 | txg = dmu_tx_get_txg(tx); |
1401 | dp->dp_scan->scn_restart_txg = txg; | |
1402 | dmu_tx_commit(tx); | |
1403 | } else { | |
1404 | dp->dp_scan->scn_restart_txg = txg; | |
1405 | } | |
6f57f1e3 RE |
1406 | zfs_dbgmsg("restarting resilver for %s at txg=%llu", |
1407 | dp->dp_spa->spa_name, (longlong_t)txg); | |
0ea05c64 AP |
1408 | } |
1409 | ||
428870ff BB |
1410 | void |
1411 | dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) | |
1412 | { | |
1413 | zio_free(dp->dp_spa, txg, bp); | |
1414 | } | |
1415 | ||
1416 | void | |
1417 | dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) | |
1418 | { | |
1419 | ASSERT(dsl_pool_sync_context(dp)); | |
1420 | zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); | |
1421 | } | |
1422 | ||
d4a72f23 TC |
1423 | static int |
1424 | scan_ds_queue_compare(const void *a, const void *b) | |
428870ff | 1425 | { |
d4a72f23 TC |
1426 | const scan_ds_t *sds_a = a, *sds_b = b; |
1427 | ||
1428 | if (sds_a->sds_dsobj < sds_b->sds_dsobj) | |
1429 | return (-1); | |
1430 | if (sds_a->sds_dsobj == sds_b->sds_dsobj) | |
1431 | return (0); | |
1432 | return (1); | |
428870ff BB |
1433 | } |
1434 | ||
1435 | static void | |
d4a72f23 TC |
1436 | scan_ds_queue_clear(dsl_scan_t *scn) |
1437 | { | |
1438 | void *cookie = NULL; | |
1439 | scan_ds_t *sds; | |
1440 | while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { | |
1441 | kmem_free(sds, sizeof (*sds)); | |
1442 | } | |
1443 | } | |
1444 | ||
1445 | static boolean_t | |
1446 | scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) | |
428870ff | 1447 | { |
d4a72f23 TC |
1448 | scan_ds_t srch, *sds; |
1449 | ||
1450 | srch.sds_dsobj = dsobj; | |
1451 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1452 | if (sds != NULL && txg != NULL) | |
1453 | *txg = sds->sds_txg; | |
1454 | return (sds != NULL); | |
428870ff BB |
1455 | } |
1456 | ||
d4a72f23 TC |
1457 | static void |
1458 | scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) | |
1459 | { | |
1460 | scan_ds_t *sds; | |
1461 | avl_index_t where; | |
1462 | ||
1463 | sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); | |
1464 | sds->sds_dsobj = dsobj; | |
1465 | sds->sds_txg = txg; | |
1466 | ||
1467 | VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); | |
1468 | avl_insert(&scn->scn_queue, sds, where); | |
1469 | } | |
1470 | ||
1471 | static void | |
1472 | scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) | |
1473 | { | |
1474 | scan_ds_t srch, *sds; | |
1475 | ||
1476 | srch.sds_dsobj = dsobj; | |
1477 | ||
1478 | sds = avl_find(&scn->scn_queue, &srch, NULL); | |
1479 | VERIFY(sds != NULL); | |
1480 | avl_remove(&scn->scn_queue, sds); | |
1481 | kmem_free(sds, sizeof (*sds)); | |
1482 | } | |
1483 | ||
1484 | static void | |
1485 | scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) | |
1486 | { | |
1487 | dsl_pool_t *dp = scn->scn_dp; | |
1488 | spa_t *spa = dp->dp_spa; | |
1489 | dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? | |
1490 | DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; | |
1491 | ||
1c0c729a | 1492 | ASSERT0(scn->scn_queues_pending); |
d4a72f23 TC |
1493 | ASSERT(scn->scn_phys.scn_queue_obj != 0); |
1494 | ||
1495 | VERIFY0(dmu_object_free(dp->dp_meta_objset, | |
1496 | scn->scn_phys.scn_queue_obj, tx)); | |
1497 | scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, | |
1498 | DMU_OT_NONE, 0, tx); | |
1499 | for (scan_ds_t *sds = avl_first(&scn->scn_queue); | |
1500 | sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { | |
1501 | VERIFY0(zap_add_int_key(dp->dp_meta_objset, | |
1502 | scn->scn_phys.scn_queue_obj, sds->sds_dsobj, | |
1503 | sds->sds_txg, tx)); | |
1504 | } | |
1505 | } | |
1506 | ||
1507 | /* | |
1508 | * Computes the memory limit state that we're currently in. A sorted scan | |
1509 | * needs quite a bit of memory to hold the sorting queue, so we need to | |
1510 | * reasonably constrain the size so it doesn't impact overall system | |
1511 | * performance. We compute two limits: | |
1512 | * 1) Hard memory limit: if the amount of memory used by the sorting | |
1513 | * queues on a pool gets above this value, we stop the metadata | |
1514 | * scanning portion and start issuing the queued up and sorted | |
1515 | * I/Os to reduce memory usage. | |
1516 | * This limit is calculated as a fraction of physmem (by default 5%). | |
1517 | * We constrain the lower bound of the hard limit to an absolute | |
1518 | * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain | |
1519 | * the upper bound to 5% of the total pool size - no chance we'll | |
1520 | * ever need that much memory, but just to keep the value in check. | |
1521 | * 2) Soft memory limit: once we hit the hard memory limit, we start | |
1522 | * issuing I/O to reduce queue memory usage, but we don't want to | |
1523 | * completely empty out the queues, since we might be able to find I/Os | |
1524 | * that will fill in the gaps of our non-sequential IOs at some point | |
1525 | * in the future. So we stop the issuing of I/Os once the amount of | |
1526 | * memory used drops below the soft limit (at which point we stop issuing | |
1527 | * I/O and start scanning metadata again). | |
1528 | * | |
1529 | * This limit is calculated by subtracting a fraction of the hard | |
1530 | * limit from the hard limit. By default this fraction is 5%, so | |
1531 | * the soft limit is 95% of the hard limit. We cap the size of the | |
1532 | * difference between the hard and soft limits at an absolute | |
1533 | * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is | |
1534 | * sufficient to not cause too frequent switching between the | |
1535 | * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's | |
1536 | * worth of queues is about 1.2 GiB of on-pool data, so scanning | |
1537 | * that should take at least a decent fraction of a second). | |
1538 | */ | |
1539 | static boolean_t | |
1540 | dsl_scan_should_clear(dsl_scan_t *scn) | |
1541 | { | |
fa130e01 | 1542 | spa_t *spa = scn->scn_dp->dp_spa; |
d4a72f23 | 1543 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
fa130e01 AM |
1544 | uint64_t alloc, mlim_hard, mlim_soft, mused; |
1545 | ||
1546 | alloc = metaslab_class_get_alloc(spa_normal_class(spa)); | |
1547 | alloc += metaslab_class_get_alloc(spa_special_class(spa)); | |
1548 | alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); | |
d4a72f23 TC |
1549 | |
1550 | mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, | |
1551 | zfs_scan_mem_lim_min); | |
1552 | mlim_hard = MIN(mlim_hard, alloc / 20); | |
1553 | mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, | |
1554 | zfs_scan_mem_lim_soft_max); | |
1555 | mused = 0; | |
1556 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
1557 | vdev_t *tvd = rvd->vdev_child[i]; | |
1558 | dsl_scan_io_queue_t *queue; | |
1559 | ||
1560 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
1561 | queue = tvd->vdev_scan_io_queue; | |
1562 | if (queue != NULL) { | |
87b46d63 | 1563 | /* |
1c0c729a | 1564 | * # of extents in exts_by_addr = # in exts_by_size. |
87b46d63 AM |
1565 | * B-tree efficiency is ~75%, but can be as low as 50%. |
1566 | */ | |
ca577779 | 1567 | mused += zfs_btree_numnodes(&queue->q_exts_by_size) * |
1c0c729a AM |
1568 | ((sizeof (range_seg_gap_t) + sizeof (uint64_t)) * |
1569 | 3 / 2) + queue->q_sio_memused; | |
d4a72f23 TC |
1570 | } |
1571 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
1572 | } | |
1573 | ||
1574 | dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); | |
1575 | ||
1576 | if (mused == 0) | |
1c0c729a | 1577 | ASSERT0(scn->scn_queues_pending); |
d4a72f23 TC |
1578 | |
1579 | /* | |
1580 | * If we are above our hard limit, we need to clear out memory. | |
1581 | * If we are below our soft limit, we need to accumulate sequential IOs. | |
1582 | * Otherwise, we should keep doing whatever we are currently doing. | |
1583 | */ | |
1584 | if (mused >= mlim_hard) | |
1585 | return (B_TRUE); | |
1586 | else if (mused < mlim_soft) | |
1587 | return (B_FALSE); | |
1588 | else | |
1589 | return (scn->scn_clearing); | |
1590 | } | |
10400bfe | 1591 | |
428870ff | 1592 | static boolean_t |
0ea05c64 | 1593 | dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) |
428870ff | 1594 | { |
428870ff BB |
1595 | /* we never skip user/group accounting objects */ |
1596 | if (zb && (int64_t)zb->zb_object < 0) | |
1597 | return (B_FALSE); | |
1598 | ||
0ea05c64 AP |
1599 | if (scn->scn_suspending) |
1600 | return (B_TRUE); /* we're already suspending */ | |
428870ff | 1601 | |
9ae529ec | 1602 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) |
428870ff BB |
1603 | return (B_FALSE); /* we're resuming */ |
1604 | ||
5815f7ac TC |
1605 | /* We only know how to resume from level-0 and objset blocks. */ |
1606 | if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL)) | |
428870ff BB |
1607 | return (B_FALSE); |
1608 | ||
10400bfe | 1609 | /* |
0ea05c64 | 1610 | * We suspend if: |
10400bfe MA |
1611 | * - we have scanned for at least the minimum time (default 1 sec |
1612 | * for scrub, 3 sec for resilver), and either we have sufficient | |
1613 | * dirty data that we are starting to write more quickly | |
d4a72f23 TC |
1614 | * (default 30%), someone is explicitly waiting for this txg |
1615 | * to complete, or we have used up all of the time in the txg | |
1616 | * timeout (default 5 sec). | |
10400bfe MA |
1617 | * or |
1618 | * - the spa is shutting down because this pool is being exported | |
1619 | * or the machine is rebooting. | |
d4a72f23 TC |
1620 | * or |
1621 | * - the scan queue has reached its memory use limit | |
10400bfe | 1622 | */ |
d4a72f23 TC |
1623 | uint64_t curr_time_ns = gethrtime(); |
1624 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
1625 | uint64_t sync_time_ns = curr_time_ns - | |
1626 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1cd72b9c AM |
1627 | uint64_t dirty_min_bytes = zfs_dirty_data_max * |
1628 | zfs_vdev_async_write_active_min_dirty_percent / 100; | |
fdc2d303 | 1629 | uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? |
d4a72f23 TC |
1630 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; |
1631 | ||
1632 | if ((NSEC2MSEC(scan_time_ns) > mintime && | |
1cd72b9c | 1633 | (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || |
d4a72f23 TC |
1634 | txg_sync_waiting(scn->scn_dp) || |
1635 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
1636 | spa_shutting_down(scn->scn_dp->dp_spa) || | |
1637 | (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { | |
5815f7ac TC |
1638 | if (zb && zb->zb_level == ZB_ROOT_LEVEL) { |
1639 | dprintf("suspending at first available bookmark " | |
1640 | "%llx/%llx/%llx/%llx\n", | |
1641 | (longlong_t)zb->zb_objset, | |
1642 | (longlong_t)zb->zb_object, | |
1643 | (longlong_t)zb->zb_level, | |
1644 | (longlong_t)zb->zb_blkid); | |
1645 | SET_BOOKMARK(&scn->scn_phys.scn_bookmark, | |
1646 | zb->zb_objset, 0, 0, 0); | |
1647 | } else if (zb != NULL) { | |
0ea05c64 | 1648 | dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", |
428870ff BB |
1649 | (longlong_t)zb->zb_objset, |
1650 | (longlong_t)zb->zb_object, | |
1651 | (longlong_t)zb->zb_level, | |
1652 | (longlong_t)zb->zb_blkid); | |
1653 | scn->scn_phys.scn_bookmark = *zb; | |
d4a72f23 | 1654 | } else { |
21a4f5cc | 1655 | #ifdef ZFS_DEBUG |
d4a72f23 | 1656 | dsl_scan_phys_t *scnp = &scn->scn_phys; |
d4a72f23 TC |
1657 | dprintf("suspending at at DDT bookmark " |
1658 | "%llx/%llx/%llx/%llx\n", | |
1659 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
1660 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
1661 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
1662 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
21a4f5cc | 1663 | #endif |
428870ff | 1664 | } |
0ea05c64 | 1665 | scn->scn_suspending = B_TRUE; |
428870ff BB |
1666 | return (B_TRUE); |
1667 | } | |
1668 | return (B_FALSE); | |
1669 | } | |
1670 | ||
482eeef8 GA |
1671 | static boolean_t |
1672 | dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) | |
1673 | { | |
1674 | /* | |
1675 | * We suspend if: | |
1676 | * - we have scrubbed for at least the minimum time (default 1 sec | |
1677 | * for error scrub), someone is explicitly waiting for this txg | |
1678 | * to complete, or we have used up all of the time in the txg | |
1679 | * timeout (default 5 sec). | |
1680 | * or | |
1681 | * - the spa is shutting down because this pool is being exported | |
1682 | * or the machine is rebooting. | |
1683 | */ | |
1684 | uint64_t curr_time_ns = gethrtime(); | |
1685 | uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
1686 | uint64_t sync_time_ns = curr_time_ns - | |
1687 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1688 | int mintime = zfs_scrub_min_time_ms; | |
1689 | ||
1690 | if ((NSEC2MSEC(error_scrub_time_ns) > mintime && | |
1691 | (txg_sync_waiting(scn->scn_dp) || | |
1692 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
1693 | spa_shutting_down(scn->scn_dp->dp_spa)) { | |
1694 | if (zb) { | |
1695 | dprintf("error scrub suspending at bookmark " | |
1696 | "%llx/%llx/%llx/%llx\n", | |
1697 | (longlong_t)zb->zb_objset, | |
1698 | (longlong_t)zb->zb_object, | |
1699 | (longlong_t)zb->zb_level, | |
1700 | (longlong_t)zb->zb_blkid); | |
1701 | } | |
1702 | return (B_TRUE); | |
1703 | } | |
1704 | return (B_FALSE); | |
1705 | } | |
1706 | ||
428870ff BB |
1707 | typedef struct zil_scan_arg { |
1708 | dsl_pool_t *zsa_dp; | |
1709 | zil_header_t *zsa_zh; | |
1710 | } zil_scan_arg_t; | |
1711 | ||
428870ff | 1712 | static int |
61868bb1 CS |
1713 | dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, |
1714 | uint64_t claim_txg) | |
428870ff | 1715 | { |
14e4e3cb | 1716 | (void) zilog; |
428870ff BB |
1717 | zil_scan_arg_t *zsa = arg; |
1718 | dsl_pool_t *dp = zsa->zsa_dp; | |
1719 | dsl_scan_t *scn = dp->dp_scan; | |
1720 | zil_header_t *zh = zsa->zsa_zh; | |
5dbd68a3 | 1721 | zbookmark_phys_t zb; |
428870ff | 1722 | |
30af21b0 | 1723 | ASSERT(!BP_IS_REDACTED(bp)); |
b0bc7a84 | 1724 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) |
428870ff BB |
1725 | return (0); |
1726 | ||
1727 | /* | |
1728 | * One block ("stubby") can be allocated a long time ago; we | |
1729 | * want to visit that one because it has been allocated | |
1730 | * (on-disk) even if it hasn't been claimed (even though for | |
1731 | * scrub there's nothing to do to it). | |
1732 | */ | |
d2734cce | 1733 | if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) |
428870ff BB |
1734 | return (0); |
1735 | ||
1736 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], | |
1737 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
1738 | ||
1739 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1740 | return (0); | |
1741 | } | |
1742 | ||
428870ff | 1743 | static int |
61868bb1 CS |
1744 | dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, |
1745 | uint64_t claim_txg) | |
428870ff | 1746 | { |
14e4e3cb | 1747 | (void) zilog; |
428870ff BB |
1748 | if (lrc->lrc_txtype == TX_WRITE) { |
1749 | zil_scan_arg_t *zsa = arg; | |
1750 | dsl_pool_t *dp = zsa->zsa_dp; | |
1751 | dsl_scan_t *scn = dp->dp_scan; | |
1752 | zil_header_t *zh = zsa->zsa_zh; | |
61868bb1 CS |
1753 | const lr_write_t *lr = (const lr_write_t *)lrc; |
1754 | const blkptr_t *bp = &lr->lr_blkptr; | |
5dbd68a3 | 1755 | zbookmark_phys_t zb; |
428870ff | 1756 | |
30af21b0 | 1757 | ASSERT(!BP_IS_REDACTED(bp)); |
b0bc7a84 MG |
1758 | if (BP_IS_HOLE(bp) || |
1759 | bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) | |
428870ff BB |
1760 | return (0); |
1761 | ||
1762 | /* | |
1763 | * birth can be < claim_txg if this record's txg is | |
1764 | * already txg sync'ed (but this log block contains | |
1765 | * other records that are not synced) | |
1766 | */ | |
1767 | if (claim_txg == 0 || bp->blk_birth < claim_txg) | |
1768 | return (0); | |
1769 | ||
a6ccb36b | 1770 | ASSERT3U(BP_GET_LSIZE(bp), !=, 0); |
428870ff BB |
1771 | SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], |
1772 | lr->lr_foid, ZB_ZIL_LEVEL, | |
1773 | lr->lr_offset / BP_GET_LSIZE(bp)); | |
1774 | ||
1775 | VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); | |
1776 | } | |
1777 | return (0); | |
1778 | } | |
1779 | ||
1780 | static void | |
1781 | dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) | |
1782 | { | |
1783 | uint64_t claim_txg = zh->zh_claim_txg; | |
1784 | zil_scan_arg_t zsa = { dp, zh }; | |
1785 | zilog_t *zilog; | |
1786 | ||
d2734cce SD |
1787 | ASSERT(spa_writeable(dp->dp_spa)); |
1788 | ||
428870ff BB |
1789 | /* |
1790 | * We only want to visit blocks that have been claimed but not yet | |
1791 | * replayed (or, in read-only mode, blocks that *would* be claimed). | |
1792 | */ | |
d2734cce | 1793 | if (claim_txg == 0) |
428870ff BB |
1794 | return; |
1795 | ||
1796 | zilog = zil_alloc(dp->dp_meta_objset, zh); | |
1797 | ||
1798 | (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, | |
b5256303 | 1799 | claim_txg, B_FALSE); |
428870ff BB |
1800 | |
1801 | zil_free(zilog); | |
1802 | } | |
1803 | ||
d4a72f23 TC |
1804 | /* |
1805 | * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea | |
1806 | * here is to sort the AVL tree by the order each block will be needed. | |
1807 | */ | |
1808 | static int | |
1809 | scan_prefetch_queue_compare(const void *a, const void *b) | |
428870ff | 1810 | { |
d4a72f23 TC |
1811 | const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; |
1812 | const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; | |
1813 | const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; | |
428870ff | 1814 | |
d4a72f23 TC |
1815 | return (zbookmark_compare(spc_a->spc_datablkszsec, |
1816 | spc_a->spc_indblkshift, spc_b->spc_datablkszsec, | |
1817 | spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); | |
1818 | } | |
428870ff | 1819 | |
d4a72f23 | 1820 | static void |
dd66857d | 1821 | scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag) |
d4a72f23 | 1822 | { |
424fd7c3 TS |
1823 | if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { |
1824 | zfs_refcount_destroy(&spc->spc_refcnt); | |
d4a72f23 TC |
1825 | kmem_free(spc, sizeof (scan_prefetch_ctx_t)); |
1826 | } | |
1827 | } | |
1828 | ||
1829 | static scan_prefetch_ctx_t * | |
dd66857d | 1830 | scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag) |
d4a72f23 TC |
1831 | { |
1832 | scan_prefetch_ctx_t *spc; | |
1833 | ||
1834 | spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); | |
424fd7c3 | 1835 | zfs_refcount_create(&spc->spc_refcnt); |
c13060e4 | 1836 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1837 | spc->spc_scn = scn; |
1838 | if (dnp != NULL) { | |
1839 | spc->spc_datablkszsec = dnp->dn_datablkszsec; | |
1840 | spc->spc_indblkshift = dnp->dn_indblkshift; | |
1841 | spc->spc_root = B_FALSE; | |
1842 | } else { | |
1843 | spc->spc_datablkszsec = 0; | |
1844 | spc->spc_indblkshift = 0; | |
1845 | spc->spc_root = B_TRUE; | |
1846 | } | |
1847 | ||
1848 | return (spc); | |
1849 | } | |
1850 | ||
1851 | static void | |
dd66857d | 1852 | scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag) |
d4a72f23 | 1853 | { |
c13060e4 | 1854 | zfs_refcount_add(&spc->spc_refcnt, tag); |
d4a72f23 TC |
1855 | } |
1856 | ||
d6496040 TC |
1857 | static void |
1858 | scan_ds_prefetch_queue_clear(dsl_scan_t *scn) | |
1859 | { | |
1860 | spa_t *spa = scn->scn_dp->dp_spa; | |
1861 | void *cookie = NULL; | |
1862 | scan_prefetch_issue_ctx_t *spic = NULL; | |
1863 | ||
1864 | mutex_enter(&spa->spa_scrub_lock); | |
1865 | while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue, | |
1866 | &cookie)) != NULL) { | |
1867 | scan_prefetch_ctx_rele(spic->spic_spc, scn); | |
1868 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1869 | } | |
1870 | mutex_exit(&spa->spa_scrub_lock); | |
1871 | } | |
1872 | ||
d4a72f23 TC |
1873 | static boolean_t |
1874 | dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, | |
1875 | const zbookmark_phys_t *zb) | |
1876 | { | |
1877 | zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; | |
1878 | dnode_phys_t tmp_dnp; | |
1879 | dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; | |
1880 | ||
1881 | if (zb->zb_objset != last_zb->zb_objset) | |
1882 | return (B_TRUE); | |
1883 | if ((int64_t)zb->zb_object < 0) | |
1884 | return (B_FALSE); | |
1885 | ||
1886 | tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; | |
1887 | tmp_dnp.dn_indblkshift = spc->spc_indblkshift; | |
1888 | ||
1889 | if (zbookmark_subtree_completed(dnp, zb, last_zb)) | |
1890 | return (B_TRUE); | |
1891 | ||
1892 | return (B_FALSE); | |
1893 | } | |
1894 | ||
1895 | static void | |
1896 | dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) | |
1897 | { | |
1898 | avl_index_t idx; | |
1899 | dsl_scan_t *scn = spc->spc_scn; | |
1900 | spa_t *spa = scn->scn_dp->dp_spa; | |
1901 | scan_prefetch_issue_ctx_t *spic; | |
1902 | ||
30af21b0 | 1903 | if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp)) |
d4a72f23 TC |
1904 | return; |
1905 | ||
1906 | if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || | |
1907 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && | |
1908 | BP_GET_TYPE(bp) != DMU_OT_OBJSET)) | |
1909 | return; | |
1910 | ||
1911 | if (dsl_scan_check_prefetch_resume(spc, zb)) | |
1912 | return; | |
1913 | ||
1914 | scan_prefetch_ctx_add_ref(spc, scn); | |
1915 | spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); | |
1916 | spic->spic_spc = spc; | |
1917 | spic->spic_bp = *bp; | |
1918 | spic->spic_zb = *zb; | |
1919 | ||
1920 | /* | |
1921 | * Add the IO to the queue of blocks to prefetch. This allows us to | |
1922 | * prioritize blocks that we will need first for the main traversal | |
1923 | * thread. | |
1924 | */ | |
1925 | mutex_enter(&spa->spa_scrub_lock); | |
1926 | if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { | |
1927 | /* this block is already queued for prefetch */ | |
1928 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
1929 | scan_prefetch_ctx_rele(spc, scn); | |
1930 | mutex_exit(&spa->spa_scrub_lock); | |
1931 | return; | |
1932 | } | |
1933 | ||
1934 | avl_insert(&scn->scn_prefetch_queue, spic, idx); | |
1935 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1936 | mutex_exit(&spa->spa_scrub_lock); | |
1937 | } | |
1938 | ||
1939 | static void | |
1940 | dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, | |
1941 | uint64_t objset, uint64_t object) | |
1942 | { | |
1943 | int i; | |
1944 | zbookmark_phys_t zb; | |
1945 | scan_prefetch_ctx_t *spc; | |
1946 | ||
1947 | if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) | |
1948 | return; | |
1949 | ||
1950 | SET_BOOKMARK(&zb, objset, object, 0, 0); | |
1951 | ||
1952 | spc = scan_prefetch_ctx_create(scn, dnp, FTAG); | |
1953 | ||
1954 | for (i = 0; i < dnp->dn_nblkptr; i++) { | |
1955 | zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); | |
1956 | zb.zb_blkid = i; | |
1957 | dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); | |
1958 | } | |
1959 | ||
1960 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
1961 | zb.zb_level = 0; | |
1962 | zb.zb_blkid = DMU_SPILL_BLKID; | |
1963 | dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb); | |
1964 | } | |
1965 | ||
1966 | scan_prefetch_ctx_rele(spc, FTAG); | |
1967 | } | |
1968 | ||
65c7cc49 | 1969 | static void |
d4a72f23 TC |
1970 | dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, |
1971 | arc_buf_t *buf, void *private) | |
1972 | { | |
14e4e3cb | 1973 | (void) zio; |
d4a72f23 TC |
1974 | scan_prefetch_ctx_t *spc = private; |
1975 | dsl_scan_t *scn = spc->spc_scn; | |
1976 | spa_t *spa = scn->scn_dp->dp_spa; | |
1977 | ||
13a2ff27 | 1978 | /* broadcast that the IO has completed for rate limiting purposes */ |
d4a72f23 TC |
1979 | mutex_enter(&spa->spa_scrub_lock); |
1980 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
1981 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
1982 | cv_broadcast(&spa->spa_scrub_io_cv); | |
1983 | mutex_exit(&spa->spa_scrub_lock); | |
1984 | ||
1985 | /* if there was an error or we are done prefetching, just cleanup */ | |
13a2ff27 | 1986 | if (buf == NULL || scn->scn_prefetch_stop) |
d4a72f23 TC |
1987 | goto out; |
1988 | ||
1989 | if (BP_GET_LEVEL(bp) > 0) { | |
1990 | int i; | |
1991 | blkptr_t *cbp; | |
1992 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
1993 | zbookmark_phys_t czb; | |
1994 | ||
1995 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { | |
1996 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
1997 | zb->zb_level - 1, zb->zb_blkid * epb + i); | |
1998 | dsl_scan_prefetch(spc, cbp, &czb); | |
1999 | } | |
2000 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { | |
2001 | dnode_phys_t *cdnp; | |
2002 | int i; | |
2003 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; | |
2004 | ||
2005 | for (i = 0, cdnp = buf->b_data; i < epb; | |
2006 | i += cdnp->dn_extra_slots + 1, | |
2007 | cdnp += cdnp->dn_extra_slots + 1) { | |
2008 | dsl_scan_prefetch_dnode(scn, cdnp, | |
2009 | zb->zb_objset, zb->zb_blkid * epb + i); | |
2010 | } | |
2011 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { | |
2012 | objset_phys_t *osp = buf->b_data; | |
2013 | ||
2014 | dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, | |
2015 | zb->zb_objset, DMU_META_DNODE_OBJECT); | |
2016 | ||
2017 | if (OBJSET_BUF_HAS_USERUSED(buf)) { | |
6db781d5 AM |
2018 | if (OBJSET_BUF_HAS_PROJECTUSED(buf)) { |
2019 | dsl_scan_prefetch_dnode(scn, | |
2020 | &osp->os_projectused_dnode, zb->zb_objset, | |
2021 | DMU_PROJECTUSED_OBJECT); | |
2022 | } | |
d4a72f23 TC |
2023 | dsl_scan_prefetch_dnode(scn, |
2024 | &osp->os_groupused_dnode, zb->zb_objset, | |
2025 | DMU_GROUPUSED_OBJECT); | |
2026 | dsl_scan_prefetch_dnode(scn, | |
2027 | &osp->os_userused_dnode, zb->zb_objset, | |
2028 | DMU_USERUSED_OBJECT); | |
2029 | } | |
2030 | } | |
2031 | ||
2032 | out: | |
2033 | if (buf != NULL) | |
2034 | arc_buf_destroy(buf, private); | |
2035 | scan_prefetch_ctx_rele(spc, scn); | |
2036 | } | |
2037 | ||
d4a72f23 TC |
2038 | static void |
2039 | dsl_scan_prefetch_thread(void *arg) | |
2040 | { | |
2041 | dsl_scan_t *scn = arg; | |
2042 | spa_t *spa = scn->scn_dp->dp_spa; | |
2043 | scan_prefetch_issue_ctx_t *spic; | |
2044 | ||
2045 | /* loop until we are told to stop */ | |
2046 | while (!scn->scn_prefetch_stop) { | |
2047 | arc_flags_t flags = ARC_FLAG_NOWAIT | | |
2048 | ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; | |
2049 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; | |
2050 | ||
2051 | mutex_enter(&spa->spa_scrub_lock); | |
2052 | ||
2053 | /* | |
2054 | * Wait until we have an IO to issue and are not above our | |
2055 | * maximum in flight limit. | |
2056 | */ | |
2057 | while (!scn->scn_prefetch_stop && | |
2058 | (avl_numnodes(&scn->scn_prefetch_queue) == 0 || | |
2059 | spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { | |
2060 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); | |
2061 | } | |
2062 | ||
2063 | /* recheck if we should stop since we waited for the cv */ | |
2064 | if (scn->scn_prefetch_stop) { | |
2065 | mutex_exit(&spa->spa_scrub_lock); | |
2066 | break; | |
2067 | } | |
2068 | ||
2069 | /* remove the prefetch IO from the tree */ | |
2070 | spic = avl_first(&scn->scn_prefetch_queue); | |
2071 | spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); | |
2072 | avl_remove(&scn->scn_prefetch_queue, spic); | |
2073 | ||
2074 | mutex_exit(&spa->spa_scrub_lock); | |
2075 | ||
2076 | if (BP_IS_PROTECTED(&spic->spic_bp)) { | |
2077 | ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE || | |
2078 | BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET); | |
2079 | ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0); | |
2080 | zio_flags |= ZIO_FLAG_RAW; | |
2081 | } | |
2082 | ||
7d0df542 AM |
2083 | /* We don't need data L1 buffer since we do not prefetch L0. */ |
2084 | blkptr_t *bp = &spic->spic_bp; | |
2085 | if (BP_GET_LEVEL(bp) == 1 && BP_GET_TYPE(bp) != DMU_OT_DNODE && | |
2086 | BP_GET_TYPE(bp) != DMU_OT_OBJSET) | |
2087 | flags |= ARC_FLAG_NO_BUF; | |
2088 | ||
d4a72f23 | 2089 | /* issue the prefetch asynchronously */ |
7d0df542 AM |
2090 | (void) arc_read(scn->scn_zio_root, spa, bp, |
2091 | dsl_scan_prefetch_cb, spic->spic_spc, ZIO_PRIORITY_SCRUB, | |
2092 | zio_flags, &flags, &spic->spic_zb); | |
428870ff | 2093 | |
d4a72f23 | 2094 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); |
b5256303 TC |
2095 | } |
2096 | ||
d4a72f23 | 2097 | ASSERT(scn->scn_prefetch_stop); |
428870ff | 2098 | |
d4a72f23 TC |
2099 | /* free any prefetches we didn't get to complete */ |
2100 | mutex_enter(&spa->spa_scrub_lock); | |
2101 | while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { | |
2102 | avl_remove(&scn->scn_prefetch_queue, spic); | |
2103 | scan_prefetch_ctx_rele(spic->spic_spc, scn); | |
2104 | kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); | |
2105 | } | |
2106 | ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); | |
2107 | mutex_exit(&spa->spa_scrub_lock); | |
428870ff BB |
2108 | } |
2109 | ||
2110 | static boolean_t | |
2111 | dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, | |
5dbd68a3 | 2112 | const zbookmark_phys_t *zb) |
428870ff BB |
2113 | { |
2114 | /* | |
2115 | * We never skip over user/group accounting objects (obj<0) | |
2116 | */ | |
9ae529ec | 2117 | if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && |
428870ff BB |
2118 | (int64_t)zb->zb_object >= 0) { |
2119 | /* | |
2120 | * If we already visited this bp & everything below (in | |
2121 | * a prior txg sync), don't bother doing it again. | |
2122 | */ | |
fcff0f35 PD |
2123 | if (zbookmark_subtree_completed(dnp, zb, |
2124 | &scn->scn_phys.scn_bookmark)) | |
428870ff BB |
2125 | return (B_TRUE); |
2126 | ||
2127 | /* | |
2128 | * If we found the block we're trying to resume from, or | |
33dba8c7 AM |
2129 | * we went past it, zero it out to indicate that it's OK |
2130 | * to start checking for suspending again. | |
428870ff | 2131 | */ |
33dba8c7 AM |
2132 | if (zbookmark_subtree_tbd(dnp, zb, |
2133 | &scn->scn_phys.scn_bookmark)) { | |
428870ff BB |
2134 | dprintf("resuming at %llx/%llx/%llx/%llx\n", |
2135 | (longlong_t)zb->zb_objset, | |
2136 | (longlong_t)zb->zb_object, | |
2137 | (longlong_t)zb->zb_level, | |
2138 | (longlong_t)zb->zb_blkid); | |
861166b0 | 2139 | memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb)); |
428870ff BB |
2140 | } |
2141 | } | |
2142 | return (B_FALSE); | |
2143 | } | |
2144 | ||
d4a72f23 TC |
2145 | static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
2146 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, | |
2147 | dmu_objset_type_t ostype, dmu_tx_t *tx); | |
2148 | inline __attribute__((always_inline)) static void dsl_scan_visitdnode( | |
2149 | dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, | |
2150 | dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); | |
2151 | ||
428870ff BB |
2152 | /* |
2153 | * Return nonzero on i/o error. | |
2154 | * Return new buf to write out in *bufp. | |
2155 | */ | |
10be533e | 2156 | inline __attribute__((always_inline)) static int |
428870ff BB |
2157 | dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, |
2158 | dnode_phys_t *dnp, const blkptr_t *bp, | |
ebcf4936 | 2159 | const zbookmark_phys_t *zb, dmu_tx_t *tx) |
428870ff BB |
2160 | { |
2161 | dsl_pool_t *dp = scn->scn_dp; | |
2cd0f98f | 2162 | spa_t *spa = dp->dp_spa; |
572e2857 | 2163 | int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; |
428870ff BB |
2164 | int err; |
2165 | ||
30af21b0 PD |
2166 | ASSERT(!BP_IS_REDACTED(bp)); |
2167 | ||
f3b08dfd GA |
2168 | /* |
2169 | * There is an unlikely case of encountering dnodes with contradicting | |
2170 | * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created | |
2171 | * or modified before commit 4254acb was merged. As it is not possible | |
2172 | * to know which of the two is correct, report an error. | |
2173 | */ | |
2174 | if (dnp != NULL && | |
2175 | dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) { | |
2176 | scn->scn_phys.scn_errors++; | |
431083f7 | 2177 | spa_log_error(spa, zb, &bp->blk_birth); |
f3b08dfd GA |
2178 | return (SET_ERROR(EINVAL)); |
2179 | } | |
2180 | ||
428870ff | 2181 | if (BP_GET_LEVEL(bp) > 0) { |
2a432414 | 2182 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff BB |
2183 | int i; |
2184 | blkptr_t *cbp; | |
2185 | int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; | |
ebcf4936 | 2186 | arc_buf_t *buf; |
428870ff | 2187 | |
2cd0f98f | 2188 | err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 2189 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
2190 | if (err) { |
2191 | scn->scn_phys.scn_errors++; | |
2192 | return (err); | |
2193 | } | |
ebcf4936 | 2194 | for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { |
5dbd68a3 | 2195 | zbookmark_phys_t czb; |
428870ff BB |
2196 | |
2197 | SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, | |
2198 | zb->zb_level - 1, | |
2199 | zb->zb_blkid * epb + i); | |
2200 | dsl_scan_visitbp(cbp, &czb, dnp, | |
ebcf4936 | 2201 | ds, scn, ostype, tx); |
428870ff | 2202 | } |
d3c2ae1c | 2203 | arc_buf_destroy(buf, &buf); |
428870ff | 2204 | } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { |
2a432414 | 2205 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 2206 | dnode_phys_t *cdnp; |
d4a72f23 | 2207 | int i; |
428870ff | 2208 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; |
ebcf4936 | 2209 | arc_buf_t *buf; |
428870ff | 2210 | |
b5256303 TC |
2211 | if (BP_IS_PROTECTED(bp)) { |
2212 | ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); | |
2213 | zio_flags |= ZIO_FLAG_RAW; | |
2214 | } | |
2215 | ||
2cd0f98f | 2216 | err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 2217 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
2218 | if (err) { |
2219 | scn->scn_phys.scn_errors++; | |
2220 | return (err); | |
2221 | } | |
50c957f7 NB |
2222 | for (i = 0, cdnp = buf->b_data; i < epb; |
2223 | i += cdnp->dn_extra_slots + 1, | |
2224 | cdnp += cdnp->dn_extra_slots + 1) { | |
428870ff | 2225 | dsl_scan_visitdnode(scn, ds, ostype, |
ebcf4936 | 2226 | cdnp, zb->zb_blkid * epb + i, tx); |
428870ff BB |
2227 | } |
2228 | ||
d3c2ae1c | 2229 | arc_buf_destroy(buf, &buf); |
428870ff | 2230 | } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { |
2a432414 | 2231 | arc_flags_t flags = ARC_FLAG_WAIT; |
428870ff | 2232 | objset_phys_t *osp; |
ebcf4936 | 2233 | arc_buf_t *buf; |
428870ff | 2234 | |
2cd0f98f | 2235 | err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, |
a8b2e306 | 2236 | ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); |
428870ff BB |
2237 | if (err) { |
2238 | scn->scn_phys.scn_errors++; | |
2239 | return (err); | |
2240 | } | |
2241 | ||
ebcf4936 | 2242 | osp = buf->b_data; |
428870ff | 2243 | |
428870ff | 2244 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 2245 | &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); |
428870ff | 2246 | |
ebcf4936 | 2247 | if (OBJSET_BUF_HAS_USERUSED(buf)) { |
428870ff | 2248 | /* |
9c5167d1 | 2249 | * We also always visit user/group/project accounting |
428870ff | 2250 | * objects, and never skip them, even if we are |
d4a72f23 TC |
2251 | * suspending. This is necessary so that the |
2252 | * space deltas from this txg get integrated. | |
428870ff | 2253 | */ |
9c5167d1 NF |
2254 | if (OBJSET_BUF_HAS_PROJECTUSED(buf)) |
2255 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
2256 | &osp->os_projectused_dnode, | |
2257 | DMU_PROJECTUSED_OBJECT, tx); | |
428870ff | 2258 | dsl_scan_visitdnode(scn, ds, osp->os_type, |
ebcf4936 | 2259 | &osp->os_groupused_dnode, |
428870ff BB |
2260 | DMU_GROUPUSED_OBJECT, tx); |
2261 | dsl_scan_visitdnode(scn, ds, osp->os_type, | |
ebcf4936 | 2262 | &osp->os_userused_dnode, |
428870ff BB |
2263 | DMU_USERUSED_OBJECT, tx); |
2264 | } | |
d3c2ae1c | 2265 | arc_buf_destroy(buf, &buf); |
3095ca91 MA |
2266 | } else if (!zfs_blkptr_verify(spa, bp, |
2267 | BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) { | |
2cd0f98f BB |
2268 | /* |
2269 | * Sanity check the block pointer contents, this is handled | |
2270 | * by arc_read() for the cases above. | |
2271 | */ | |
2272 | scn->scn_phys.scn_errors++; | |
431083f7 | 2273 | spa_log_error(spa, zb, &bp->blk_birth); |
2cd0f98f | 2274 | return (SET_ERROR(EINVAL)); |
428870ff BB |
2275 | } |
2276 | ||
2277 | return (0); | |
2278 | } | |
2279 | ||
10be533e | 2280 | inline __attribute__((always_inline)) static void |
428870ff | 2281 | dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, |
ebcf4936 | 2282 | dmu_objset_type_t ostype, dnode_phys_t *dnp, |
428870ff BB |
2283 | uint64_t object, dmu_tx_t *tx) |
2284 | { | |
2285 | int j; | |
2286 | ||
2287 | for (j = 0; j < dnp->dn_nblkptr; j++) { | |
5dbd68a3 | 2288 | zbookmark_phys_t czb; |
428870ff BB |
2289 | |
2290 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, | |
2291 | dnp->dn_nlevels - 1, j); | |
2292 | dsl_scan_visitbp(&dnp->dn_blkptr[j], | |
ebcf4936 | 2293 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
2294 | } |
2295 | ||
2296 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { | |
5dbd68a3 | 2297 | zbookmark_phys_t czb; |
428870ff BB |
2298 | SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, |
2299 | 0, DMU_SPILL_BLKID); | |
50c957f7 | 2300 | dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), |
ebcf4936 | 2301 | &czb, dnp, ds, scn, ostype, tx); |
428870ff BB |
2302 | } |
2303 | } | |
2304 | ||
2305 | /* | |
2306 | * The arguments are in this order because mdb can only print the | |
2307 | * first 5; we want them to be useful. | |
2308 | */ | |
2309 | static void | |
5dbd68a3 | 2310 | dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, |
ebcf4936 MA |
2311 | dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, |
2312 | dmu_objset_type_t ostype, dmu_tx_t *tx) | |
428870ff BB |
2313 | { |
2314 | dsl_pool_t *dp = scn->scn_dp; | |
d4a72f23 | 2315 | blkptr_t *bp_toread = NULL; |
428870ff | 2316 | |
0ea05c64 | 2317 | if (dsl_scan_check_suspend(scn, zb)) |
d4a72f23 | 2318 | return; |
428870ff BB |
2319 | |
2320 | if (dsl_scan_check_resume(scn, dnp, zb)) | |
d4a72f23 | 2321 | return; |
428870ff BB |
2322 | |
2323 | scn->scn_visited_this_txg++; | |
2324 | ||
d4a72f23 TC |
2325 | if (BP_IS_HOLE(bp)) { |
2326 | scn->scn_holes_this_txg++; | |
2327 | return; | |
2328 | } | |
2329 | ||
30af21b0 PD |
2330 | if (BP_IS_REDACTED(bp)) { |
2331 | ASSERT(dsl_dataset_feature_is_active(ds, | |
2332 | SPA_FEATURE_REDACTED_DATASETS)); | |
2333 | return; | |
2334 | } | |
2335 | ||
34ce4c42 GA |
2336 | /* |
2337 | * Check if this block contradicts any filesystem flags. | |
2338 | */ | |
2339 | spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS; | |
2340 | if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) | |
2341 | ASSERT(dsl_dataset_feature_is_active(ds, f)); | |
2342 | ||
2343 | f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp)); | |
2344 | if (f != SPA_FEATURE_NONE) | |
2345 | ASSERT(dsl_dataset_feature_is_active(ds, f)); | |
2346 | ||
2347 | f = zio_compress_to_feature(BP_GET_COMPRESS(bp)); | |
2348 | if (f != SPA_FEATURE_NONE) | |
2349 | ASSERT(dsl_dataset_feature_is_active(ds, f)); | |
2350 | ||
2351 | if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { | |
2352 | scn->scn_lt_min_this_txg++; | |
2353 | return; | |
2354 | } | |
2355 | ||
d4a72f23 TC |
2356 | bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); |
2357 | *bp_toread = *bp; | |
428870ff | 2358 | |
ebcf4936 | 2359 | if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) |
161ce7ce | 2360 | goto out; |
428870ff BB |
2361 | |
2362 | /* | |
4e33ba4c | 2363 | * If dsl_scan_ddt() has already visited this block, it will have |
428870ff BB |
2364 | * already done any translations or scrubbing, so don't call the |
2365 | * callback again. | |
2366 | */ | |
2367 | if (ddt_class_contains(dp->dp_spa, | |
2368 | scn->scn_phys.scn_ddt_class_max, bp)) { | |
d4a72f23 | 2369 | scn->scn_ddt_contained_this_txg++; |
161ce7ce | 2370 | goto out; |
428870ff BB |
2371 | } |
2372 | ||
2373 | /* | |
2374 | * If this block is from the future (after cur_max_txg), then we | |
2375 | * are doing this on behalf of a deleted snapshot, and we will | |
2376 | * revisit the future block on the next pass of this dataset. | |
2377 | * Don't scan it now unless we need to because something | |
2378 | * under it was modified. | |
2379 | */ | |
d4a72f23 TC |
2380 | if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { |
2381 | scn->scn_gt_max_this_txg++; | |
2382 | goto out; | |
428870ff | 2383 | } |
d4a72f23 TC |
2384 | |
2385 | scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); | |
2386 | ||
161ce7ce | 2387 | out: |
d1d7e268 | 2388 | kmem_free(bp_toread, sizeof (blkptr_t)); |
428870ff BB |
2389 | } |
2390 | ||
2391 | static void | |
2392 | dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, | |
2393 | dmu_tx_t *tx) | |
2394 | { | |
5dbd68a3 | 2395 | zbookmark_phys_t zb; |
d4a72f23 | 2396 | scan_prefetch_ctx_t *spc; |
428870ff BB |
2397 | |
2398 | SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, | |
2399 | ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); | |
d4a72f23 TC |
2400 | |
2401 | if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { | |
2402 | SET_BOOKMARK(&scn->scn_prefetch_bookmark, | |
2403 | zb.zb_objset, 0, 0, 0); | |
2404 | } else { | |
2405 | scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; | |
2406 | } | |
2407 | ||
2408 | scn->scn_objsets_visited_this_txg++; | |
2409 | ||
2410 | spc = scan_prefetch_ctx_create(scn, NULL, FTAG); | |
2411 | dsl_scan_prefetch(spc, bp, &zb); | |
2412 | scan_prefetch_ctx_rele(spc, FTAG); | |
2413 | ||
2414 | dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); | |
428870ff BB |
2415 | |
2416 | dprintf_ds(ds, "finished scan%s", ""); | |
2417 | } | |
2418 | ||
d4a72f23 TC |
2419 | static void |
2420 | ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) | |
428870ff | 2421 | { |
d4a72f23 | 2422 | if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { |
0c66c32d | 2423 | if (ds->ds_is_snapshot) { |
b77222c8 MA |
2424 | /* |
2425 | * Note: | |
2426 | * - scn_cur_{min,max}_txg stays the same. | |
2427 | * - Setting the flag is not really necessary if | |
2428 | * scn_cur_max_txg == scn_max_txg, because there | |
2429 | * is nothing after this snapshot that we care | |
2430 | * about. However, we set it anyway and then | |
2431 | * ignore it when we retraverse it in | |
2432 | * dsl_scan_visitds(). | |
2433 | */ | |
d4a72f23 | 2434 | scn_phys->scn_bookmark.zb_objset = |
d683ddbb | 2435 | dsl_dataset_phys(ds)->ds_next_snap_obj; |
6f57f1e3 RE |
2436 | zfs_dbgmsg("destroying ds %llu on %s; currently " |
2437 | "traversing; reset zb_objset to %llu", | |
428870ff | 2438 | (u_longlong_t)ds->ds_object, |
6f57f1e3 | 2439 | ds->ds_dir->dd_pool->dp_spa->spa_name, |
d683ddbb JG |
2440 | (u_longlong_t)dsl_dataset_phys(ds)-> |
2441 | ds_next_snap_obj); | |
d4a72f23 | 2442 | scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; |
428870ff | 2443 | } else { |
d4a72f23 | 2444 | SET_BOOKMARK(&scn_phys->scn_bookmark, |
428870ff | 2445 | ZB_DESTROYED_OBJSET, 0, 0, 0); |
6f57f1e3 RE |
2446 | zfs_dbgmsg("destroying ds %llu on %s; currently " |
2447 | "traversing; reset bookmark to -1,0,0,0", | |
2448 | (u_longlong_t)ds->ds_object, | |
2449 | ds->ds_dir->dd_pool->dp_spa->spa_name); | |
428870ff | 2450 | } |
d4a72f23 TC |
2451 | } |
2452 | } | |
2453 | ||
2454 | /* | |
2455 | * Invoked when a dataset is destroyed. We need to make sure that: | |
2456 | * | |
2457 | * 1) If it is the dataset that was currently being scanned, we write | |
2458 | * a new dsl_scan_phys_t and marking the objset reference in it | |
2459 | * as destroyed. | |
2460 | * 2) Remove it from the work queue, if it was present. | |
2461 | * | |
2462 | * If the dataset was actually a snapshot, instead of marking the dataset | |
2463 | * as destroyed, we instead substitute the next snapshot in line. | |
2464 | */ | |
2465 | void | |
2466 | dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) | |
2467 | { | |
2468 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
2469 | dsl_scan_t *scn = dp->dp_scan; | |
2470 | uint64_t mintxg; | |
2471 | ||
2472 | if (!dsl_scan_is_running(scn)) | |
2473 | return; | |
2474 | ||
2475 | ds_destroyed_scn_phys(ds, &scn->scn_phys); | |
2476 | ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); | |
2477 | ||
2478 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
2479 | scan_ds_queue_remove(scn, ds->ds_object); | |
2480 | if (ds->ds_is_snapshot) | |
2481 | scan_ds_queue_insert(scn, | |
2482 | dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); | |
2483 | } | |
2484 | ||
2485 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2486 | ds->ds_object, &mintxg) == 0) { | |
d683ddbb | 2487 | ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); |
428870ff BB |
2488 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2489 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
0c66c32d | 2490 | if (ds->ds_is_snapshot) { |
428870ff BB |
2491 | /* |
2492 | * We keep the same mintxg; it could be > | |
2493 | * ds_creation_txg if the previous snapshot was | |
2494 | * deleted too. | |
2495 | */ | |
2496 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
2497 | scn->scn_phys.scn_queue_obj, | |
d683ddbb JG |
2498 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
2499 | mintxg, tx) == 0); | |
6f57f1e3 | 2500 | zfs_dbgmsg("destroying ds %llu on %s; in queue; " |
428870ff BB |
2501 | "replacing with %llu", |
2502 | (u_longlong_t)ds->ds_object, | |
6f57f1e3 | 2503 | dp->dp_spa->spa_name, |
d683ddbb JG |
2504 | (u_longlong_t)dsl_dataset_phys(ds)-> |
2505 | ds_next_snap_obj); | |
428870ff | 2506 | } else { |
6f57f1e3 RE |
2507 | zfs_dbgmsg("destroying ds %llu on %s; in queue; " |
2508 | "removing", | |
2509 | (u_longlong_t)ds->ds_object, | |
2510 | dp->dp_spa->spa_name); | |
428870ff | 2511 | } |
428870ff BB |
2512 | } |
2513 | ||
2514 | /* | |
2515 | * dsl_scan_sync() should be called after this, and should sync | |
2516 | * out our changed state, but just to be safe, do it here. | |
2517 | */ | |
d4a72f23 TC |
2518 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
2519 | } | |
2520 | ||
2521 | static void | |
2522 | ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) | |
2523 | { | |
2524 | if (scn_bookmark->zb_objset == ds->ds_object) { | |
2525 | scn_bookmark->zb_objset = | |
2526 | dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
6f57f1e3 | 2527 | zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; " |
d4a72f23 TC |
2528 | "reset zb_objset to %llu", |
2529 | (u_longlong_t)ds->ds_object, | |
6f57f1e3 | 2530 | ds->ds_dir->dd_pool->dp_spa->spa_name, |
d4a72f23 TC |
2531 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); |
2532 | } | |
428870ff BB |
2533 | } |
2534 | ||
d4a72f23 TC |
2535 | /* |
2536 | * Called when a dataset is snapshotted. If we were currently traversing | |
2537 | * this snapshot, we reset our bookmark to point at the newly created | |
2538 | * snapshot. We also modify our work queue to remove the old snapshot and | |
2539 | * replace with the new one. | |
2540 | */ | |
428870ff BB |
2541 | void |
2542 | dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) | |
2543 | { | |
2544 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
2545 | dsl_scan_t *scn = dp->dp_scan; | |
2546 | uint64_t mintxg; | |
2547 | ||
d4a72f23 | 2548 | if (!dsl_scan_is_running(scn)) |
428870ff BB |
2549 | return; |
2550 | ||
d683ddbb | 2551 | ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); |
428870ff | 2552 | |
d4a72f23 TC |
2553 | ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); |
2554 | ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); | |
2555 | ||
2556 | if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { | |
2557 | scan_ds_queue_remove(scn, ds->ds_object); | |
2558 | scan_ds_queue_insert(scn, | |
2559 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); | |
2560 | } | |
2561 | ||
2562 | if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, | |
2563 | ds->ds_object, &mintxg) == 0) { | |
428870ff BB |
2564 | VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, |
2565 | scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); | |
2566 | VERIFY(zap_add_int_key(dp->dp_meta_objset, | |
2567 | scn->scn_phys.scn_queue_obj, | |
d683ddbb | 2568 | dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); |
6f57f1e3 | 2569 | zfs_dbgmsg("snapshotting ds %llu on %s; in queue; " |
428870ff BB |
2570 | "replacing with %llu", |
2571 | (u_longlong_t)ds->ds_object, | |
6f57f1e3 | 2572 | dp->dp_spa->spa_name, |
d683ddbb | 2573 | (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); |
428870ff | 2574 | } |
d4a72f23 TC |
2575 | |
2576 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); | |
428870ff BB |
2577 | } |
2578 | ||
d4a72f23 TC |
2579 | static void |
2580 | ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, | |
2581 | zbookmark_phys_t *scn_bookmark) | |
428870ff | 2582 | { |
d4a72f23 TC |
2583 | if (scn_bookmark->zb_objset == ds1->ds_object) { |
2584 | scn_bookmark->zb_objset = ds2->ds_object; | |
6f57f1e3 | 2585 | zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " |
428870ff BB |
2586 | "reset zb_objset to %llu", |
2587 | (u_longlong_t)ds1->ds_object, | |
6f57f1e3 | 2588 | ds1->ds_dir->dd_pool->dp_spa->spa_name, |
428870ff | 2589 | (u_longlong_t)ds2->ds_object); |
d4a72f23 TC |
2590 | } else if (scn_bookmark->zb_objset == ds2->ds_object) { |
2591 | scn_bookmark->zb_objset = ds1->ds_object; | |
6f57f1e3 | 2592 | zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " |
428870ff BB |
2593 | "reset zb_objset to %llu", |
2594 | (u_longlong_t)ds2->ds_object, | |
6f57f1e3 | 2595 | ds2->ds_dir->dd_pool->dp_spa->spa_name, |
428870ff BB |
2596 | (u_longlong_t)ds1->ds_object); |
2597 | } | |
d4a72f23 TC |
2598 | } |
2599 | ||
2600 | /* | |
dd262c96 | 2601 | * Called when an origin dataset and its clone are swapped. If we were |
d4a72f23 | 2602 | * currently traversing the dataset, we need to switch to traversing the |
dd262c96 | 2603 | * newly promoted clone. |
d4a72f23 TC |
2604 | */ |
2605 | void | |
2606 | dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) | |
2607 | { | |
2608 | dsl_pool_t *dp = ds1->ds_dir->dd_pool; | |
2609 | dsl_scan_t *scn = dp->dp_scan; | |
dd262c96 AG |
2610 | uint64_t mintxg1, mintxg2; |
2611 | boolean_t ds1_queued, ds2_queued; | |
d4a72f23 TC |
2612 | |
2613 | if (!dsl_scan_is_running(scn)) | |
2614 | return; | |
2615 | ||
2616 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); | |
2617 | ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); | |
2618 | ||
dd262c96 AG |
2619 | /* |
2620 | * Handle the in-memory scan queue. | |
2621 | */ | |
2622 | ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1); | |
2623 | ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2); | |
2624 | ||
2625 | /* Sanity checking. */ | |
2626 | if (ds1_queued) { | |
2627 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2628 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
2629 | } | |
2630 | if (ds2_queued) { | |
2631 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2632 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
d4a72f23 | 2633 | } |
dd262c96 AG |
2634 | |
2635 | if (ds1_queued && ds2_queued) { | |
2636 | /* | |
2637 | * If both are queued, we don't need to do anything. | |
2638 | * The swapping code below would not handle this case correctly, | |
2639 | * since we can't insert ds2 if it is already there. That's | |
2640 | * because scan_ds_queue_insert() prohibits a duplicate insert | |
2641 | * and panics. | |
2642 | */ | |
2643 | } else if (ds1_queued) { | |
2644 | scan_ds_queue_remove(scn, ds1->ds_object); | |
2645 | scan_ds_queue_insert(scn, ds2->ds_object, mintxg1); | |
2646 | } else if (ds2_queued) { | |
d4a72f23 | 2647 | scan_ds_queue_remove(scn, ds2->ds_object); |
dd262c96 | 2648 | scan_ds_queue_insert(scn, ds1->ds_object, mintxg2); |
d4a72f23 | 2649 | } |
428870ff | 2650 | |
dd262c96 AG |
2651 | /* |
2652 | * Handle the on-disk scan queue. | |
2653 | * The on-disk state is an out-of-date version of the in-memory state, | |
2654 | * so the in-memory and on-disk values for ds1_queued and ds2_queued may | |
2655 | * be different. Therefore we need to apply the swap logic to the | |
2656 | * on-disk state independently of the in-memory state. | |
2657 | */ | |
2658 | ds1_queued = zap_lookup_int_key(dp->dp_meta_objset, | |
2659 | scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0; | |
2660 | ds2_queued = zap_lookup_int_key(dp->dp_meta_objset, | |
2661 | scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0; | |
2662 | ||
2663 | /* Sanity checking. */ | |
2664 | if (ds1_queued) { | |
2665 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2666 | ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
2667 | } | |
2668 | if (ds2_queued) { | |
2669 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); | |
2670 | ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); | |
2671 | } | |
2672 | ||
2673 | if (ds1_queued && ds2_queued) { | |
2674 | /* | |
2675 | * If both are queued, we don't need to do anything. | |
2676 | * Alternatively, we could check for EEXIST from | |
2677 | * zap_add_int_key() and back out to the original state, but | |
2678 | * that would be more work than checking for this case upfront. | |
2679 | */ | |
2680 | } else if (ds1_queued) { | |
2681 | VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, | |
428870ff | 2682 | scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); |
dd262c96 AG |
2683 | VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, |
2684 | scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx)); | |
6f57f1e3 | 2685 | zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " |
428870ff BB |
2686 | "replacing with %llu", |
2687 | (u_longlong_t)ds1->ds_object, | |
6f57f1e3 | 2688 | dp->dp_spa->spa_name, |
428870ff | 2689 | (u_longlong_t)ds2->ds_object); |
dd262c96 AG |
2690 | } else if (ds2_queued) { |
2691 | VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, | |
428870ff | 2692 | scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); |
dd262c96 AG |
2693 | VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, |
2694 | scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx)); | |
6f57f1e3 | 2695 | zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " |
428870ff BB |
2696 | "replacing with %llu", |
2697 | (u_longlong_t)ds2->ds_object, | |
6f57f1e3 | 2698 | dp->dp_spa->spa_name, |
428870ff BB |
2699 | (u_longlong_t)ds1->ds_object); |
2700 | } | |
2701 | ||
d4a72f23 | 2702 | dsl_scan_sync_state(scn, tx, SYNC_CACHED); |
428870ff BB |
2703 | } |
2704 | ||
428870ff | 2705 | static int |
13fe0198 | 2706 | enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2707 | { |
d4a72f23 | 2708 | uint64_t originobj = *(uint64_t *)arg; |
428870ff BB |
2709 | dsl_dataset_t *ds; |
2710 | int err; | |
428870ff BB |
2711 | dsl_scan_t *scn = dp->dp_scan; |
2712 | ||
d4a72f23 | 2713 | if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) |
13fe0198 MA |
2714 | return (0); |
2715 | ||
2716 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); | |
428870ff BB |
2717 | if (err) |
2718 | return (err); | |
2719 | ||
d4a72f23 | 2720 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { |
13fe0198 MA |
2721 | dsl_dataset_t *prev; |
2722 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 2723 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); |
428870ff | 2724 | |
13fe0198 MA |
2725 | dsl_dataset_rele(ds, FTAG); |
2726 | if (err) | |
2727 | return (err); | |
2728 | ds = prev; | |
428870ff | 2729 | } |
d4a72f23 TC |
2730 | scan_ds_queue_insert(scn, ds->ds_object, |
2731 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2732 | dsl_dataset_rele(ds, FTAG); |
2733 | return (0); | |
2734 | } | |
2735 | ||
2736 | static void | |
2737 | dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) | |
2738 | { | |
2739 | dsl_pool_t *dp = scn->scn_dp; | |
2740 | dsl_dataset_t *ds; | |
2741 | ||
2742 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); | |
2743 | ||
b77222c8 MA |
2744 | if (scn->scn_phys.scn_cur_min_txg >= |
2745 | scn->scn_phys.scn_max_txg) { | |
2746 | /* | |
2747 | * This can happen if this snapshot was created after the | |
2748 | * scan started, and we already completed a previous snapshot | |
2749 | * that was created after the scan started. This snapshot | |
2750 | * only references blocks with: | |
2751 | * | |
2752 | * birth < our ds_creation_txg | |
2753 | * cur_min_txg is no less than ds_creation_txg. | |
2754 | * We have already visited these blocks. | |
2755 | * or | |
2756 | * birth > scn_max_txg | |
2757 | * The scan requested not to visit these blocks. | |
2758 | * | |
2759 | * Subsequent snapshots (and clones) can reference our | |
2760 | * blocks, or blocks with even higher birth times. | |
2761 | * Therefore we do not need to visit them either, | |
2762 | * so we do not add them to the work queue. | |
2763 | * | |
2764 | * Note that checking for cur_min_txg >= cur_max_txg | |
2765 | * is not sufficient, because in that case we may need to | |
2766 | * visit subsequent snapshots. This happens when min_txg > 0, | |
2767 | * which raises cur_min_txg. In this case we will visit | |
2768 | * this dataset but skip all of its blocks, because the | |
2769 | * rootbp's birth time is < cur_min_txg. Then we will | |
2770 | * add the next snapshots/clones to the work queue. | |
2771 | */ | |
eca7b760 | 2772 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
b77222c8 MA |
2773 | dsl_dataset_name(ds, dsname); |
2774 | zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " | |
2775 | "cur_min_txg (%llu) >= max_txg (%llu)", | |
d4a72f23 TC |
2776 | (longlong_t)dsobj, dsname, |
2777 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2778 | (longlong_t)scn->scn_phys.scn_max_txg); | |
b77222c8 MA |
2779 | kmem_free(dsname, MAXNAMELEN); |
2780 | ||
2781 | goto out; | |
2782 | } | |
2783 | ||
572e2857 | 2784 | /* |
a1d477c2 | 2785 | * Only the ZIL in the head (non-snapshot) is valid. Even though |
572e2857 | 2786 | * snapshots can have ZIL block pointers (which may be the same |
a1d477c2 MA |
2787 | * BP as in the head), they must be ignored. In addition, $ORIGIN |
2788 | * doesn't have a objset (i.e. its ds_bp is a hole) so we don't | |
2789 | * need to look for a ZIL in it either. So we traverse the ZIL here, | |
2790 | * rather than in scan_recurse(), because the regular snapshot | |
2791 | * block-sharing rules don't apply to it. | |
572e2857 | 2792 | */ |
a1d477c2 | 2793 | if (!dsl_dataset_is_snapshot(ds) && |
5e097c67 MA |
2794 | (dp->dp_origin_snap == NULL || |
2795 | ds->ds_dir != dp->dp_origin_snap->ds_dir)) { | |
a1d477c2 MA |
2796 | objset_t *os; |
2797 | if (dmu_objset_from_ds(ds, &os) != 0) { | |
2798 | goto out; | |
2799 | } | |
572e2857 | 2800 | dsl_scan_zil(dp, &os->os_zil_header); |
a1d477c2 | 2801 | } |
572e2857 | 2802 | |
428870ff BB |
2803 | /* |
2804 | * Iterate over the bps in this ds. | |
2805 | */ | |
2806 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
cc9bb3e5 | 2807 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 2808 | dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); |
cc9bb3e5 | 2809 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
428870ff | 2810 | |
1c27024e | 2811 | char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); |
428870ff BB |
2812 | dsl_dataset_name(ds, dsname); |
2813 | zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " | |
0ea05c64 | 2814 | "suspending=%u", |
428870ff BB |
2815 | (longlong_t)dsobj, dsname, |
2816 | (longlong_t)scn->scn_phys.scn_cur_min_txg, | |
2817 | (longlong_t)scn->scn_phys.scn_cur_max_txg, | |
0ea05c64 | 2818 | (int)scn->scn_suspending); |
eca7b760 | 2819 | kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); |
428870ff | 2820 | |
0ea05c64 | 2821 | if (scn->scn_suspending) |
428870ff BB |
2822 | goto out; |
2823 | ||
2824 | /* | |
2825 | * We've finished this pass over this dataset. | |
2826 | */ | |
2827 | ||
2828 | /* | |
2829 | * If we did not completely visit this dataset, do another pass. | |
2830 | */ | |
2831 | if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { | |
6f57f1e3 RE |
2832 | zfs_dbgmsg("incomplete pass on %s; visiting again", |
2833 | dp->dp_spa->spa_name); | |
428870ff | 2834 | scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; |
d4a72f23 TC |
2835 | scan_ds_queue_insert(scn, ds->ds_object, |
2836 | scn->scn_phys.scn_cur_max_txg); | |
428870ff BB |
2837 | goto out; |
2838 | } | |
2839 | ||
2840 | /* | |
13a2ff27 | 2841 | * Add descendant datasets to work queue. |
428870ff | 2842 | */ |
d683ddbb | 2843 | if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { |
d4a72f23 | 2844 | scan_ds_queue_insert(scn, |
d683ddbb | 2845 | dsl_dataset_phys(ds)->ds_next_snap_obj, |
d4a72f23 | 2846 | dsl_dataset_phys(ds)->ds_creation_txg); |
428870ff | 2847 | } |
d683ddbb | 2848 | if (dsl_dataset_phys(ds)->ds_num_children > 1) { |
428870ff | 2849 | boolean_t usenext = B_FALSE; |
d683ddbb | 2850 | if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { |
428870ff BB |
2851 | uint64_t count; |
2852 | /* | |
2853 | * A bug in a previous version of the code could | |
2854 | * cause upgrade_clones_cb() to not set | |
2855 | * ds_next_snap_obj when it should, leading to a | |
2856 | * missing entry. Therefore we can only use the | |
2857 | * next_clones_obj when its count is correct. | |
2858 | */ | |
2859 | int err = zap_count(dp->dp_meta_objset, | |
d683ddbb | 2860 | dsl_dataset_phys(ds)->ds_next_clones_obj, &count); |
428870ff | 2861 | if (err == 0 && |
d683ddbb | 2862 | count == dsl_dataset_phys(ds)->ds_num_children - 1) |
428870ff BB |
2863 | usenext = B_TRUE; |
2864 | } | |
2865 | ||
2866 | if (usenext) { | |
d4a72f23 TC |
2867 | zap_cursor_t zc; |
2868 | zap_attribute_t za; | |
2869 | for (zap_cursor_init(&zc, dp->dp_meta_objset, | |
2870 | dsl_dataset_phys(ds)->ds_next_clones_obj); | |
2871 | zap_cursor_retrieve(&zc, &za) == 0; | |
2872 | (void) zap_cursor_advance(&zc)) { | |
2873 | scan_ds_queue_insert(scn, | |
2874 | zfs_strtonum(za.za_name, NULL), | |
2875 | dsl_dataset_phys(ds)->ds_creation_txg); | |
2876 | } | |
2877 | zap_cursor_fini(&zc); | |
428870ff | 2878 | } else { |
13fe0198 | 2879 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 TC |
2880 | enqueue_clones_cb, &ds->ds_object, |
2881 | DS_FIND_CHILDREN)); | |
428870ff BB |
2882 | } |
2883 | } | |
2884 | ||
2885 | out: | |
2886 | dsl_dataset_rele(ds, FTAG); | |
2887 | } | |
2888 | ||
428870ff | 2889 | static int |
13fe0198 | 2890 | enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
428870ff | 2891 | { |
14e4e3cb | 2892 | (void) arg; |
428870ff BB |
2893 | dsl_dataset_t *ds; |
2894 | int err; | |
428870ff BB |
2895 | dsl_scan_t *scn = dp->dp_scan; |
2896 | ||
13fe0198 | 2897 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
428870ff BB |
2898 | if (err) |
2899 | return (err); | |
2900 | ||
d683ddbb | 2901 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
428870ff | 2902 | dsl_dataset_t *prev; |
d683ddbb JG |
2903 | err = dsl_dataset_hold_obj(dp, |
2904 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
428870ff BB |
2905 | if (err) { |
2906 | dsl_dataset_rele(ds, FTAG); | |
2907 | return (err); | |
2908 | } | |
2909 | ||
2910 | /* | |
2911 | * If this is a clone, we don't need to worry about it for now. | |
2912 | */ | |
d683ddbb | 2913 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { |
428870ff BB |
2914 | dsl_dataset_rele(ds, FTAG); |
2915 | dsl_dataset_rele(prev, FTAG); | |
2916 | return (0); | |
2917 | } | |
2918 | dsl_dataset_rele(ds, FTAG); | |
2919 | ds = prev; | |
2920 | } | |
2921 | ||
d4a72f23 TC |
2922 | scan_ds_queue_insert(scn, ds->ds_object, |
2923 | dsl_dataset_phys(ds)->ds_prev_snap_txg); | |
428870ff BB |
2924 | dsl_dataset_rele(ds, FTAG); |
2925 | return (0); | |
2926 | } | |
2927 | ||
d4a72f23 TC |
2928 | void |
2929 | dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, | |
2930 | ddt_entry_t *dde, dmu_tx_t *tx) | |
2931 | { | |
14e4e3cb | 2932 | (void) tx; |
d4a72f23 TC |
2933 | const ddt_key_t *ddk = &dde->dde_key; |
2934 | ddt_phys_t *ddp = dde->dde_phys; | |
2935 | blkptr_t bp; | |
2936 | zbookmark_phys_t zb = { 0 }; | |
d4a72f23 | 2937 | |
f90a30ad | 2938 | if (!dsl_scan_is_running(scn)) |
d4a72f23 TC |
2939 | return; |
2940 | ||
5e0bd0ae TC |
2941 | /* |
2942 | * This function is special because it is the only thing | |
2943 | * that can add scan_io_t's to the vdev scan queues from | |
2944 | * outside dsl_scan_sync(). For the most part this is ok | |
2945 | * as long as it is called from within syncing context. | |
2946 | * However, dsl_scan_sync() expects that no new sio's will | |
2947 | * be added between when all the work for a scan is done | |
2948 | * and the next txg when the scan is actually marked as | |
2949 | * completed. This check ensures we do not issue new sio's | |
2950 | * during this period. | |
2951 | */ | |
2952 | if (scn->scn_done_txg != 0) | |
2953 | return; | |
2954 | ||
14e4e3cb | 2955 | for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { |
d4a72f23 TC |
2956 | if (ddp->ddp_phys_birth == 0 || |
2957 | ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) | |
2958 | continue; | |
2959 | ddt_bp_create(checksum, ddk, ddp, &bp); | |
2960 | ||
2961 | scn->scn_visited_this_txg++; | |
2962 | scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); | |
2963 | } | |
2964 | } | |
2965 | ||
428870ff BB |
2966 | /* |
2967 | * Scrub/dedup interaction. | |
2968 | * | |
2969 | * If there are N references to a deduped block, we don't want to scrub it | |
2970 | * N times -- ideally, we should scrub it exactly once. | |
2971 | * | |
2972 | * We leverage the fact that the dde's replication class (enum ddt_class) | |
2973 | * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest | |
2974 | * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. | |
2975 | * | |
2976 | * To prevent excess scrubbing, the scrub begins by walking the DDT | |
2977 | * to find all blocks with refcnt > 1, and scrubs each of these once. | |
2978 | * Since there are two replication classes which contain blocks with | |
2979 | * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. | |
2980 | * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. | |
2981 | * | |
2982 | * There would be nothing more to say if a block's refcnt couldn't change | |
2983 | * during a scrub, but of course it can so we must account for changes | |
2984 | * in a block's replication class. | |
2985 | * | |
2986 | * Here's an example of what can occur: | |
2987 | * | |
2988 | * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 | |
2989 | * when visited during the top-down scrub phase, it will be scrubbed twice. | |
2990 | * This negates our scrub optimization, but is otherwise harmless. | |
2991 | * | |
2992 | * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 | |
2993 | * on each visit during the top-down scrub phase, it will never be scrubbed. | |
2994 | * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's | |
2995 | * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to | |
2996 | * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 | |
2997 | * while a scrub is in progress, it scrubs the block right then. | |
2998 | */ | |
2999 | static void | |
3000 | dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) | |
3001 | { | |
3002 | ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; | |
861166b0 | 3003 | ddt_entry_t dde = {{{{0}}}}; |
428870ff BB |
3004 | int error; |
3005 | uint64_t n = 0; | |
3006 | ||
3007 | while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { | |
3008 | ddt_t *ddt; | |
3009 | ||
3010 | if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) | |
3011 | break; | |
3012 | dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", | |
3013 | (longlong_t)ddb->ddb_class, | |
3014 | (longlong_t)ddb->ddb_type, | |
3015 | (longlong_t)ddb->ddb_checksum, | |
3016 | (longlong_t)ddb->ddb_cursor); | |
3017 | ||
3018 | /* There should be no pending changes to the dedup table */ | |
3019 | ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; | |
3020 | ASSERT(avl_first(&ddt->ddt_tree) == NULL); | |
3021 | ||
3022 | dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); | |
3023 | n++; | |
3024 | ||
0ea05c64 | 3025 | if (dsl_scan_check_suspend(scn, NULL)) |
428870ff BB |
3026 | break; |
3027 | } | |
3028 | ||
6f57f1e3 RE |
3029 | zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; " |
3030 | "suspending=%u", (longlong_t)n, scn->scn_dp->dp_spa->spa_name, | |
0ea05c64 | 3031 | (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); |
428870ff BB |
3032 | |
3033 | ASSERT(error == 0 || error == ENOENT); | |
3034 | ASSERT(error != ENOENT || | |
3035 | ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); | |
3036 | } | |
3037 | ||
d4a72f23 TC |
3038 | static uint64_t |
3039 | dsl_scan_ds_maxtxg(dsl_dataset_t *ds) | |
428870ff | 3040 | { |
d4a72f23 TC |
3041 | uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; |
3042 | if (ds->ds_is_snapshot) | |
3043 | return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); | |
3044 | return (smt); | |
428870ff BB |
3045 | } |
3046 | ||
3047 | static void | |
3048 | dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) | |
3049 | { | |
d4a72f23 | 3050 | scan_ds_t *sds; |
428870ff | 3051 | dsl_pool_t *dp = scn->scn_dp; |
428870ff BB |
3052 | |
3053 | if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= | |
3054 | scn->scn_phys.scn_ddt_class_max) { | |
3055 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
3056 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
3057 | dsl_scan_ddt(scn, tx); | |
0ea05c64 | 3058 | if (scn->scn_suspending) |
428870ff BB |
3059 | return; |
3060 | } | |
3061 | ||
3062 | if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { | |
3063 | /* First do the MOS & ORIGIN */ | |
3064 | ||
3065 | scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; | |
3066 | scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; | |
3067 | dsl_scan_visit_rootbp(scn, NULL, | |
3068 | &dp->dp_meta_rootbp, tx); | |
0ea05c64 | 3069 | if (scn->scn_suspending) |
428870ff BB |
3070 | return; |
3071 | ||
3072 | if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { | |
13fe0198 | 3073 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
d4a72f23 | 3074 | enqueue_cb, NULL, DS_FIND_CHILDREN)); |
428870ff BB |
3075 | } else { |
3076 | dsl_scan_visitds(scn, | |
3077 | dp->dp_origin_snap->ds_object, tx); | |
3078 | } | |
0ea05c64 | 3079 | ASSERT(!scn->scn_suspending); |
428870ff BB |
3080 | } else if (scn->scn_phys.scn_bookmark.zb_objset != |
3081 | ZB_DESTROYED_OBJSET) { | |
d4a72f23 | 3082 | uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; |
428870ff | 3083 | /* |
d4a72f23 | 3084 | * If we were suspended, continue from here. Note if the |
0ea05c64 | 3085 | * ds we were suspended on was deleted, the zb_objset may |
428870ff BB |
3086 | * be -1, so we will skip this and find a new objset |
3087 | * below. | |
3088 | */ | |
d4a72f23 | 3089 | dsl_scan_visitds(scn, dsobj, tx); |
0ea05c64 | 3090 | if (scn->scn_suspending) |
428870ff BB |
3091 | return; |
3092 | } | |
3093 | ||
3094 | /* | |
d4a72f23 | 3095 | * In case we suspended right at the end of the ds, zero the |
428870ff BB |
3096 | * bookmark so we don't think that we're still trying to resume. |
3097 | */ | |
861166b0 | 3098 | memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t)); |
428870ff | 3099 | |
d4a72f23 TC |
3100 | /* |
3101 | * Keep pulling things out of the dataset avl queue. Updates to the | |
3102 | * persistent zap-object-as-queue happen only at checkpoints. | |
3103 | */ | |
3104 | while ((sds = avl_first(&scn->scn_queue)) != NULL) { | |
428870ff | 3105 | dsl_dataset_t *ds; |
d4a72f23 TC |
3106 | uint64_t dsobj = sds->sds_dsobj; |
3107 | uint64_t txg = sds->sds_txg; | |
428870ff | 3108 | |
d4a72f23 TC |
3109 | /* dequeue and free the ds from the queue */ |
3110 | scan_ds_queue_remove(scn, dsobj); | |
3111 | sds = NULL; | |
428870ff | 3112 | |
d4a72f23 | 3113 | /* set up min / max txg */ |
428870ff | 3114 | VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
d4a72f23 | 3115 | if (txg != 0) { |
428870ff | 3116 | scn->scn_phys.scn_cur_min_txg = |
d4a72f23 | 3117 | MAX(scn->scn_phys.scn_min_txg, txg); |
428870ff BB |
3118 | } else { |
3119 | scn->scn_phys.scn_cur_min_txg = | |
3120 | MAX(scn->scn_phys.scn_min_txg, | |
d683ddbb | 3121 | dsl_dataset_phys(ds)->ds_prev_snap_txg); |
428870ff BB |
3122 | } |
3123 | scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); | |
3124 | dsl_dataset_rele(ds, FTAG); | |
3125 | ||
3126 | dsl_scan_visitds(scn, dsobj, tx); | |
0ea05c64 | 3127 | if (scn->scn_suspending) |
d4a72f23 | 3128 | return; |
428870ff | 3129 | } |
d4a72f23 TC |
3130 | |
3131 | /* No more objsets to fetch, we're done */ | |
3132 | scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; | |
3133 | ASSERT0(scn->scn_suspending); | |
3134 | } | |
3135 | ||
3136 | static uint64_t | |
c0aea7cf | 3137 | dsl_scan_count_data_disks(spa_t *spa) |
d4a72f23 | 3138 | { |
c0aea7cf | 3139 | vdev_t *rvd = spa->spa_root_vdev; |
d4a72f23 TC |
3140 | uint64_t i, leaves = 0; |
3141 | ||
2041d6ee AM |
3142 | for (i = 0; i < rvd->vdev_children; i++) { |
3143 | vdev_t *vd = rvd->vdev_child[i]; | |
3144 | if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache) | |
3145 | continue; | |
3146 | leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd); | |
d4a72f23 | 3147 | } |
d4a72f23 TC |
3148 | return (leaves); |
3149 | } | |
3150 | ||
3151 | static void | |
3152 | scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) | |
3153 | { | |
3154 | int i; | |
3155 | uint64_t cur_size = 0; | |
3156 | ||
3157 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
3158 | cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); | |
3159 | } | |
3160 | ||
3161 | q->q_total_zio_size_this_txg += cur_size; | |
3162 | q->q_zios_this_txg++; | |
3163 | } | |
3164 | ||
3165 | static void | |
3166 | scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, | |
3167 | uint64_t end) | |
3168 | { | |
3169 | q->q_total_seg_size_this_txg += end - start; | |
3170 | q->q_segs_this_txg++; | |
3171 | } | |
3172 | ||
3173 | static boolean_t | |
3174 | scan_io_queue_check_suspend(dsl_scan_t *scn) | |
3175 | { | |
3176 | /* See comment in dsl_scan_check_suspend() */ | |
3177 | uint64_t curr_time_ns = gethrtime(); | |
3178 | uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; | |
3179 | uint64_t sync_time_ns = curr_time_ns - | |
3180 | scn->scn_dp->dp_spa->spa_sync_starttime; | |
1cd72b9c AM |
3181 | uint64_t dirty_min_bytes = zfs_dirty_data_max * |
3182 | zfs_vdev_async_write_active_min_dirty_percent / 100; | |
fdc2d303 | 3183 | uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? |
d4a72f23 TC |
3184 | zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; |
3185 | ||
3186 | return ((NSEC2MSEC(scan_time_ns) > mintime && | |
1cd72b9c | 3187 | (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || |
d4a72f23 TC |
3188 | txg_sync_waiting(scn->scn_dp) || |
3189 | NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || | |
3190 | spa_shutting_down(scn->scn_dp->dp_spa)); | |
3191 | } | |
3192 | ||
3193 | /* | |
13a2ff27 | 3194 | * Given a list of scan_io_t's in io_list, this issues the I/Os out to |
d4a72f23 TC |
3195 | * disk. This consumes the io_list and frees the scan_io_t's. This is |
3196 | * called when emptying queues, either when we're up against the memory | |
3197 | * limit or when we have finished scanning. Returns B_TRUE if we stopped | |
13a2ff27 | 3198 | * processing the list before we finished. Any sios that were not issued |
d4a72f23 TC |
3199 | * will remain in the io_list. |
3200 | */ | |
3201 | static boolean_t | |
3202 | scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) | |
3203 | { | |
3204 | dsl_scan_t *scn = queue->q_scn; | |
3205 | scan_io_t *sio; | |
d4a72f23 TC |
3206 | boolean_t suspended = B_FALSE; |
3207 | ||
3208 | while ((sio = list_head(io_list)) != NULL) { | |
3209 | blkptr_t bp; | |
3210 | ||
3211 | if (scan_io_queue_check_suspend(scn)) { | |
3212 | suspended = B_TRUE; | |
3213 | break; | |
3214 | } | |
3215 | ||
ab7615d9 | 3216 | sio2bp(sio, &bp); |
d4a72f23 TC |
3217 | scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, |
3218 | &sio->sio_zb, queue); | |
3219 | (void) list_remove_head(io_list); | |
3220 | scan_io_queues_update_zio_stats(queue, &bp); | |
ab7615d9 | 3221 | sio_free(sio); |
d4a72f23 | 3222 | } |
d4a72f23 TC |
3223 | return (suspended); |
3224 | } | |
3225 | ||
3226 | /* | |
3227 | * This function removes sios from an IO queue which reside within a given | |
3228 | * range_seg_t and inserts them (in offset order) into a list. Note that | |
3229 | * we only ever return a maximum of 32 sios at once. If there are more sios | |
3230 | * to process within this segment that did not make it onto the list we | |
3231 | * return B_TRUE and otherwise B_FALSE. | |
3232 | */ | |
3233 | static boolean_t | |
3234 | scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) | |
3235 | { | |
ab7615d9 | 3236 | scan_io_t *srch_sio, *sio, *next_sio; |
d4a72f23 TC |
3237 | avl_index_t idx; |
3238 | uint_t num_sios = 0; | |
3239 | int64_t bytes_issued = 0; | |
3240 | ||
3241 | ASSERT(rs != NULL); | |
3242 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3243 | ||
ab7615d9 TC |
3244 | srch_sio = sio_alloc(1); |
3245 | srch_sio->sio_nr_dvas = 1; | |
ca577779 | 3246 | SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr)); |
d4a72f23 TC |
3247 | |
3248 | /* | |
3249 | * The exact start of the extent might not contain any matching zios, | |
3250 | * so if that's the case, examine the next one in the tree. | |
3251 | */ | |
ab7615d9 TC |
3252 | sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); |
3253 | sio_free(srch_sio); | |
3254 | ||
d4a72f23 TC |
3255 | if (sio == NULL) |
3256 | sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); | |
3257 | ||
ca577779 PD |
3258 | while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, |
3259 | queue->q_exts_by_addr) && num_sios <= 32) { | |
3260 | ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs, | |
3261 | queue->q_exts_by_addr)); | |
3262 | ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs, | |
3263 | queue->q_exts_by_addr)); | |
d4a72f23 TC |
3264 | |
3265 | next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); | |
3266 | avl_remove(&queue->q_sios_by_addr, sio); | |
1c0c729a AM |
3267 | if (avl_is_empty(&queue->q_sios_by_addr)) |
3268 | atomic_add_64(&queue->q_scn->scn_queues_pending, -1); | |
ab7615d9 | 3269 | queue->q_sio_memused -= SIO_GET_MUSED(sio); |
d4a72f23 | 3270 | |
ab7615d9 | 3271 | bytes_issued += SIO_GET_ASIZE(sio); |
d4a72f23 TC |
3272 | num_sios++; |
3273 | list_insert_tail(list, sio); | |
3274 | sio = next_sio; | |
3275 | } | |
3276 | ||
3277 | /* | |
3278 | * We limit the number of sios we process at once to 32 to avoid | |
3279 | * biting off more than we can chew. If we didn't take everything | |
3280 | * in the segment we update it to reflect the work we were able to | |
3281 | * complete. Otherwise, we remove it from the range tree entirely. | |
3282 | */ | |
ca577779 PD |
3283 | if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, |
3284 | queue->q_exts_by_addr)) { | |
d4a72f23 TC |
3285 | range_tree_adjust_fill(queue->q_exts_by_addr, rs, |
3286 | -bytes_issued); | |
3287 | range_tree_resize_segment(queue->q_exts_by_addr, rs, | |
ca577779 PD |
3288 | SIO_GET_OFFSET(sio), rs_get_end(rs, |
3289 | queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); | |
1c0c729a | 3290 | queue->q_last_ext_addr = SIO_GET_OFFSET(sio); |
d4a72f23 TC |
3291 | return (B_TRUE); |
3292 | } else { | |
ca577779 PD |
3293 | uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr); |
3294 | uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr); | |
3295 | range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart); | |
1c0c729a | 3296 | queue->q_last_ext_addr = -1; |
d4a72f23 TC |
3297 | return (B_FALSE); |
3298 | } | |
3299 | } | |
3300 | ||
3301 | /* | |
3302 | * This is called from the queue emptying thread and selects the next | |
13a2ff27 | 3303 | * extent from which we are to issue I/Os. The behavior of this function |
d4a72f23 TC |
3304 | * depends on the state of the scan, the current memory consumption and |
3305 | * whether or not we are performing a scan shutdown. | |
3306 | * 1) We select extents in an elevator algorithm (LBA-order) if the scan | |
3307 | * needs to perform a checkpoint | |
3308 | * 2) We select the largest available extent if we are up against the | |
3309 | * memory limit. | |
3310 | * 3) Otherwise we don't select any extents. | |
3311 | */ | |
3312 | static range_seg_t * | |
3313 | scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) | |
3314 | { | |
3315 | dsl_scan_t *scn = queue->q_scn; | |
ca577779 | 3316 | range_tree_t *rt = queue->q_exts_by_addr; |
d4a72f23 TC |
3317 | |
3318 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3319 | ASSERT(scn->scn_is_sorted); | |
3320 | ||
1c0c729a AM |
3321 | if (!scn->scn_checkpointing && !scn->scn_clearing) |
3322 | return (NULL); | |
d4a72f23 TC |
3323 | |
3324 | /* | |
3325 | * During normal clearing, we want to issue our largest segments | |
3326 | * first, keeping IO as sequential as possible, and leaving the | |
3327 | * smaller extents for later with the hope that they might eventually | |
3328 | * grow to larger sequential segments. However, when the scan is | |
3329 | * checkpointing, no new extents will be added to the sorting queue, | |
3330 | * so the way we are sorted now is as good as it will ever get. | |
3331 | * In this case, we instead switch to issuing extents in LBA order. | |
3332 | */ | |
1c0c729a AM |
3333 | if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) || |
3334 | zfs_scan_issue_strategy == 1) | |
ca577779 | 3335 | return (range_tree_first(rt)); |
1c0c729a AM |
3336 | |
3337 | /* | |
3338 | * Try to continue previous extent if it is not completed yet. After | |
3339 | * shrink in scan_io_queue_gather() it may no longer be the best, but | |
3340 | * otherwise we leave shorter remnant every txg. | |
3341 | */ | |
3342 | uint64_t start; | |
e506a0ce | 3343 | uint64_t size = 1ULL << rt->rt_shift; |
1c0c729a AM |
3344 | range_seg_t *addr_rs; |
3345 | if (queue->q_last_ext_addr != -1) { | |
3346 | start = queue->q_last_ext_addr; | |
3347 | addr_rs = range_tree_find(rt, start, size); | |
3348 | if (addr_rs != NULL) | |
3349 | return (addr_rs); | |
d4a72f23 | 3350 | } |
1c0c729a AM |
3351 | |
3352 | /* | |
3353 | * Nothing to continue, so find new best extent. | |
3354 | */ | |
3355 | uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL); | |
3356 | if (v == NULL) | |
3357 | return (NULL); | |
3358 | queue->q_last_ext_addr = start = *v << rt->rt_shift; | |
3359 | ||
3360 | /* | |
3361 | * We need to get the original entry in the by_addr tree so we can | |
3362 | * modify it. | |
3363 | */ | |
3364 | addr_rs = range_tree_find(rt, start, size); | |
3365 | ASSERT3P(addr_rs, !=, NULL); | |
3366 | ASSERT3U(rs_get_start(addr_rs, rt), ==, start); | |
3367 | ASSERT3U(rs_get_end(addr_rs, rt), >, start); | |
3368 | return (addr_rs); | |
d4a72f23 TC |
3369 | } |
3370 | ||
3371 | static void | |
3372 | scan_io_queues_run_one(void *arg) | |
3373 | { | |
3374 | dsl_scan_io_queue_t *queue = arg; | |
3375 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
3376 | boolean_t suspended = B_FALSE; | |
dd867145 AM |
3377 | range_seg_t *rs; |
3378 | scan_io_t *sio; | |
3379 | zio_t *zio; | |
d4a72f23 | 3380 | list_t sio_list; |
d4a72f23 TC |
3381 | |
3382 | ASSERT(queue->q_scn->scn_is_sorted); | |
3383 | ||
3384 | list_create(&sio_list, sizeof (scan_io_t), | |
3385 | offsetof(scan_io_t, sio_nodes.sio_list_node)); | |
dd867145 AM |
3386 | zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa, |
3387 | NULL, NULL, NULL, ZIO_FLAG_CANFAIL); | |
d4a72f23 | 3388 | mutex_enter(q_lock); |
dd867145 | 3389 | queue->q_zio = zio; |
d4a72f23 | 3390 | |
2041d6ee AM |
3391 | /* Calculate maximum in-flight bytes for this vdev. */ |
3392 | queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit * | |
3393 | (vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd))); | |
d4a72f23 TC |
3394 | |
3395 | /* reset per-queue scan statistics for this txg */ | |
3396 | queue->q_total_seg_size_this_txg = 0; | |
3397 | queue->q_segs_this_txg = 0; | |
3398 | queue->q_total_zio_size_this_txg = 0; | |
3399 | queue->q_zios_this_txg = 0; | |
3400 | ||
3401 | /* loop until we run out of time or sios */ | |
3402 | while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) { | |
3403 | uint64_t seg_start = 0, seg_end = 0; | |
1c0c729a | 3404 | boolean_t more_left; |
d4a72f23 TC |
3405 | |
3406 | ASSERT(list_is_empty(&sio_list)); | |
3407 | ||
3408 | /* loop while we still have sios left to process in this rs */ | |
1c0c729a | 3409 | do { |
d4a72f23 TC |
3410 | scan_io_t *first_sio, *last_sio; |
3411 | ||
3412 | /* | |
3413 | * We have selected which extent needs to be | |
3414 | * processed next. Gather up the corresponding sios. | |
3415 | */ | |
3416 | more_left = scan_io_queue_gather(queue, rs, &sio_list); | |
3417 | ASSERT(!list_is_empty(&sio_list)); | |
3418 | first_sio = list_head(&sio_list); | |
3419 | last_sio = list_tail(&sio_list); | |
3420 | ||
ab7615d9 | 3421 | seg_end = SIO_GET_END_OFFSET(last_sio); |
d4a72f23 | 3422 | if (seg_start == 0) |
ab7615d9 | 3423 | seg_start = SIO_GET_OFFSET(first_sio); |
d4a72f23 TC |
3424 | |
3425 | /* | |
3426 | * Issuing sios can take a long time so drop the | |
3427 | * queue lock. The sio queue won't be updated by | |
3428 | * other threads since we're in syncing context so | |
3429 | * we can be sure that our trees will remain exactly | |
3430 | * as we left them. | |
3431 | */ | |
3432 | mutex_exit(q_lock); | |
3433 | suspended = scan_io_queue_issue(queue, &sio_list); | |
3434 | mutex_enter(q_lock); | |
3435 | ||
3436 | if (suspended) | |
3437 | break; | |
1c0c729a | 3438 | } while (more_left); |
d4a72f23 TC |
3439 | |
3440 | /* update statistics for debugging purposes */ | |
3441 | scan_io_queues_update_seg_stats(queue, seg_start, seg_end); | |
3442 | ||
3443 | if (suspended) | |
3444 | break; | |
3445 | } | |
3446 | ||
3447 | /* | |
3448 | * If we were suspended in the middle of processing, | |
3449 | * requeue any unfinished sios and exit. | |
3450 | */ | |
b3ad3f48 | 3451 | while ((sio = list_remove_head(&sio_list)) != NULL) |
d4a72f23 | 3452 | scan_io_queue_insert_impl(queue, sio); |
d4a72f23 | 3453 | |
dd867145 | 3454 | queue->q_zio = NULL; |
d4a72f23 | 3455 | mutex_exit(q_lock); |
dd867145 | 3456 | zio_nowait(zio); |
d4a72f23 TC |
3457 | list_destroy(&sio_list); |
3458 | } | |
3459 | ||
3460 | /* | |
3461 | * Performs an emptying run on all scan queues in the pool. This just | |
3462 | * punches out one thread per top-level vdev, each of which processes | |
3463 | * only that vdev's scan queue. We can parallelize the I/O here because | |
13a2ff27 | 3464 | * we know that each queue's I/Os only affect its own top-level vdev. |
d4a72f23 TC |
3465 | * |
3466 | * This function waits for the queue runs to complete, and must be | |
3467 | * called from dsl_scan_sync (or in general, syncing context). | |
3468 | */ | |
3469 | static void | |
3470 | scan_io_queues_run(dsl_scan_t *scn) | |
3471 | { | |
3472 | spa_t *spa = scn->scn_dp->dp_spa; | |
3473 | ||
3474 | ASSERT(scn->scn_is_sorted); | |
3475 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); | |
3476 | ||
1c0c729a | 3477 | if (scn->scn_queues_pending == 0) |
d4a72f23 TC |
3478 | return; |
3479 | ||
3480 | if (scn->scn_taskq == NULL) { | |
3481 | int nthreads = spa->spa_root_vdev->vdev_children; | |
3482 | ||
3483 | /* | |
3484 | * We need to make this taskq *always* execute as many | |
3485 | * threads in parallel as we have top-level vdevs and no | |
3486 | * less, otherwise strange serialization of the calls to | |
3487 | * scan_io_queues_run_one can occur during spa_sync runs | |
3488 | * and that significantly impacts performance. | |
3489 | */ | |
3490 | scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads, | |
3491 | minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE); | |
3492 | } | |
3493 | ||
3494 | for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
3495 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
3496 | ||
3497 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
3498 | if (vd->vdev_scan_io_queue != NULL) { | |
3499 | VERIFY(taskq_dispatch(scn->scn_taskq, | |
3500 | scan_io_queues_run_one, vd->vdev_scan_io_queue, | |
3501 | TQ_SLEEP) != TASKQID_INVALID); | |
3502 | } | |
3503 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
3504 | } | |
3505 | ||
3506 | /* | |
13a2ff27 | 3507 | * Wait for the queues to finish issuing their IOs for this run |
d4a72f23 TC |
3508 | * before we return. There may still be IOs in flight at this |
3509 | * point. | |
3510 | */ | |
3511 | taskq_wait(scn->scn_taskq); | |
428870ff BB |
3512 | } |
3513 | ||
9ae529ec | 3514 | static boolean_t |
a1d477c2 | 3515 | dsl_scan_async_block_should_pause(dsl_scan_t *scn) |
428870ff | 3516 | { |
428870ff BB |
3517 | uint64_t elapsed_nanosecs; |
3518 | ||
78e2739d MA |
3519 | if (zfs_recover) |
3520 | return (B_FALSE); | |
3521 | ||
a3c98d57 TJ |
3522 | if (zfs_async_block_max_blocks != 0 && |
3523 | scn->scn_visited_this_txg >= zfs_async_block_max_blocks) { | |
36283ca2 | 3524 | return (B_TRUE); |
a3c98d57 | 3525 | } |
36283ca2 | 3526 | |
4fe3a842 MA |
3527 | if (zfs_max_async_dedup_frees != 0 && |
3528 | scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) { | |
3529 | return (B_TRUE); | |
3530 | } | |
3531 | ||
428870ff | 3532 | elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; |
9ae529ec | 3533 | return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || |
a1d477c2 | 3534 | (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && |
428870ff | 3535 | txg_sync_waiting(scn->scn_dp)) || |
9ae529ec CS |
3536 | spa_shutting_down(scn->scn_dp->dp_spa)); |
3537 | } | |
3538 | ||
3539 | static int | |
3540 | dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) | |
3541 | { | |
3542 | dsl_scan_t *scn = arg; | |
3543 | ||
3544 | if (!scn->scn_is_bptree || | |
3545 | (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { | |
a1d477c2 | 3546 | if (dsl_scan_async_block_should_pause(scn)) |
2e528b49 | 3547 | return (SET_ERROR(ERESTART)); |
9ae529ec | 3548 | } |
428870ff BB |
3549 | |
3550 | zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, | |
3551 | dmu_tx_get_txg(tx), bp, 0)); | |
3552 | dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, | |
3553 | -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), | |
3554 | -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); | |
3555 | scn->scn_visited_this_txg++; | |
4fe3a842 MA |
3556 | if (BP_GET_DEDUP(bp)) |
3557 | scn->scn_dedup_frees_this_txg++; | |
428870ff BB |
3558 | return (0); |
3559 | } | |
3560 | ||
d4a72f23 TC |
3561 | static void |
3562 | dsl_scan_update_stats(dsl_scan_t *scn) | |
3563 | { | |
3564 | spa_t *spa = scn->scn_dp->dp_spa; | |
3565 | uint64_t i; | |
3566 | uint64_t seg_size_total = 0, zio_size_total = 0; | |
3567 | uint64_t seg_count_total = 0, zio_count_total = 0; | |
3568 | ||
3569 | for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { | |
3570 | vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; | |
3571 | dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; | |
3572 | ||
3573 | if (queue == NULL) | |
3574 | continue; | |
3575 | ||
3576 | seg_size_total += queue->q_total_seg_size_this_txg; | |
3577 | zio_size_total += queue->q_total_zio_size_this_txg; | |
3578 | seg_count_total += queue->q_segs_this_txg; | |
3579 | zio_count_total += queue->q_zios_this_txg; | |
3580 | } | |
3581 | ||
3582 | if (seg_count_total == 0 || zio_count_total == 0) { | |
3583 | scn->scn_avg_seg_size_this_txg = 0; | |
3584 | scn->scn_avg_zio_size_this_txg = 0; | |
3585 | scn->scn_segs_this_txg = 0; | |
3586 | scn->scn_zios_this_txg = 0; | |
3587 | return; | |
3588 | } | |
3589 | ||
3590 | scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; | |
3591 | scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; | |
3592 | scn->scn_segs_this_txg = seg_count_total; | |
3593 | scn->scn_zios_this_txg = zio_count_total; | |
3594 | } | |
3595 | ||
a1d477c2 | 3596 | static int |
37f03da8 SH |
3597 | bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, |
3598 | dmu_tx_t *tx) | |
a1d477c2 | 3599 | { |
37f03da8 SH |
3600 | ASSERT(!bp_freed); |
3601 | return (dsl_scan_free_block_cb(arg, bp, tx)); | |
3602 | } | |
3603 | ||
3604 | static int | |
3605 | dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, | |
3606 | dmu_tx_t *tx) | |
3607 | { | |
3608 | ASSERT(!bp_freed); | |
a1d477c2 MA |
3609 | dsl_scan_t *scn = arg; |
3610 | const dva_t *dva = &bp->blk_dva[0]; | |
3611 | ||
3612 | if (dsl_scan_async_block_should_pause(scn)) | |
3613 | return (SET_ERROR(ERESTART)); | |
3614 | ||
3615 | spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, | |
3616 | DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), | |
3617 | DVA_GET_ASIZE(dva), tx); | |
3618 | scn->scn_visited_this_txg++; | |
3619 | return (0); | |
3620 | } | |
3621 | ||
428870ff BB |
3622 | boolean_t |
3623 | dsl_scan_active(dsl_scan_t *scn) | |
3624 | { | |
3625 | spa_t *spa = scn->scn_dp->dp_spa; | |
3626 | uint64_t used = 0, comp, uncomp; | |
37f03da8 | 3627 | boolean_t clones_left; |
428870ff BB |
3628 | |
3629 | if (spa->spa_load_state != SPA_LOAD_NONE) | |
3630 | return (B_FALSE); | |
3631 | if (spa_shutting_down(spa)) | |
3632 | return (B_FALSE); | |
d4a72f23 | 3633 | if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || |
fbeddd60 | 3634 | (scn->scn_async_destroying && !scn->scn_async_stalled)) |
428870ff BB |
3635 | return (B_TRUE); |
3636 | ||
3637 | if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { | |
3638 | (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, | |
3639 | &used, &comp, &uncomp); | |
3640 | } | |
37f03da8 SH |
3641 | clones_left = spa_livelist_delete_check(spa); |
3642 | return ((used != 0) || (clones_left)); | |
428870ff BB |
3643 | } |
3644 | ||
482eeef8 GA |
3645 | boolean_t |
3646 | dsl_errorscrub_active(dsl_scan_t *scn) | |
3647 | { | |
3648 | spa_t *spa = scn->scn_dp->dp_spa; | |
3649 | if (spa->spa_load_state != SPA_LOAD_NONE) | |
3650 | return (B_FALSE); | |
3651 | if (spa_shutting_down(spa)) | |
3652 | return (B_FALSE); | |
3653 | if (dsl_errorscrubbing(scn->scn_dp)) | |
3654 | return (B_TRUE); | |
3655 | return (B_FALSE); | |
3656 | } | |
3657 | ||
80a91e74 TC |
3658 | static boolean_t |
3659 | dsl_scan_check_deferred(vdev_t *vd) | |
3660 | { | |
3661 | boolean_t need_resilver = B_FALSE; | |
3662 | ||
3663 | for (int c = 0; c < vd->vdev_children; c++) { | |
3664 | need_resilver |= | |
3665 | dsl_scan_check_deferred(vd->vdev_child[c]); | |
3666 | } | |
3667 | ||
3668 | if (!vdev_is_concrete(vd) || vd->vdev_aux || | |
3669 | !vd->vdev_ops->vdev_op_leaf) | |
3670 | return (need_resilver); | |
3671 | ||
3672 | if (!vd->vdev_resilver_deferred) | |
3673 | need_resilver = B_TRUE; | |
3674 | ||
3675 | return (need_resilver); | |
3676 | } | |
3677 | ||
d4a72f23 TC |
3678 | static boolean_t |
3679 | dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, | |
3680 | uint64_t phys_birth) | |
3681 | { | |
3682 | vdev_t *vd; | |
3683 | ||
9e052db4 MA |
3684 | vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); |
3685 | ||
3686 | if (vd->vdev_ops == &vdev_indirect_ops) { | |
3687 | /* | |
3688 | * The indirect vdev can point to multiple | |
3689 | * vdevs. For simplicity, always create | |
3690 | * the resilver zio_t. zio_vdev_io_start() | |
3691 | * will bypass the child resilver i/o's if | |
3692 | * they are on vdevs that don't have DTL's. | |
3693 | */ | |
3694 | return (B_TRUE); | |
3695 | } | |
3696 | ||
d4a72f23 TC |
3697 | if (DVA_GET_GANG(dva)) { |
3698 | /* | |
3699 | * Gang members may be spread across multiple | |
3700 | * vdevs, so the best estimate we have is the | |
3701 | * scrub range, which has already been checked. | |
3702 | * XXX -- it would be better to change our | |
3703 | * allocation policy to ensure that all | |
3704 | * gang members reside on the same vdev. | |
3705 | */ | |
3706 | return (B_TRUE); | |
3707 | } | |
3708 | ||
d4a72f23 TC |
3709 | /* |
3710 | * Check if the top-level vdev must resilver this offset. | |
3711 | * When the offset does not intersect with a dirty leaf DTL | |
3712 | * then it may be possible to skip the resilver IO. The psize | |
3713 | * is provided instead of asize to simplify the check for RAIDZ. | |
3714 | */ | |
b2255edc | 3715 | if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth)) |
d4a72f23 TC |
3716 | return (B_FALSE); |
3717 | ||
80a91e74 TC |
3718 | /* |
3719 | * Check that this top-level vdev has a device under it which | |
3720 | * is resilvering and is not deferred. | |
3721 | */ | |
3722 | if (!dsl_scan_check_deferred(vd)) | |
3723 | return (B_FALSE); | |
3724 | ||
d4a72f23 TC |
3725 | return (B_TRUE); |
3726 | } | |
3727 | ||
d2734cce SD |
3728 | static int |
3729 | dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) | |
428870ff BB |
3730 | { |
3731 | dsl_scan_t *scn = dp->dp_scan; | |
3732 | spa_t *spa = dp->dp_spa; | |
d2734cce | 3733 | int err = 0; |
428870ff | 3734 | |
d2734cce SD |
3735 | if (spa_suspend_async_destroy(spa)) |
3736 | return (0); | |
428870ff | 3737 | |
ba5ad9a4 | 3738 | if (zfs_free_bpobj_enabled && |
d4a72f23 | 3739 | spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
9ae529ec | 3740 | scn->scn_is_bptree = B_FALSE; |
a1d477c2 | 3741 | scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; |
d4a72f23 | 3742 | scn->scn_zio_root = zio_root(spa, NULL, |
428870ff BB |
3743 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3744 | err = bpobj_iterate(&dp->dp_free_bpobj, | |
37f03da8 | 3745 | bpobj_dsl_scan_free_block_cb, scn, tx); |
d4a72f23 TC |
3746 | VERIFY0(zio_wait(scn->scn_zio_root)); |
3747 | scn->scn_zio_root = NULL; | |
9ae529ec | 3748 | |
fbeddd60 MA |
3749 | if (err != 0 && err != ERESTART) |
3750 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3751 | } | |
13fe0198 | 3752 | |
fbeddd60 MA |
3753 | if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { |
3754 | ASSERT(scn->scn_async_destroying); | |
3755 | scn->scn_is_bptree = B_TRUE; | |
d4a72f23 | 3756 | scn->scn_zio_root = zio_root(spa, NULL, |
fbeddd60 MA |
3757 | NULL, ZIO_FLAG_MUSTSUCCEED); |
3758 | err = bptree_iterate(dp->dp_meta_objset, | |
3759 | dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); | |
3760 | VERIFY0(zio_wait(scn->scn_zio_root)); | |
d4a72f23 | 3761 | scn->scn_zio_root = NULL; |
fbeddd60 MA |
3762 | |
3763 | if (err == EIO || err == ECKSUM) { | |
3764 | err = 0; | |
3765 | } else if (err != 0 && err != ERESTART) { | |
3766 | zfs_panic_recover("error %u from " | |
3767 | "traverse_dataset_destroyed()", err); | |
9ae529ec | 3768 | } |
fbeddd60 | 3769 | |
fbeddd60 MA |
3770 | if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { |
3771 | /* finished; deactivate async destroy feature */ | |
3772 | spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); | |
3773 | ASSERT(!spa_feature_is_active(spa, | |
3774 | SPA_FEATURE_ASYNC_DESTROY)); | |
3775 | VERIFY0(zap_remove(dp->dp_meta_objset, | |
3776 | DMU_POOL_DIRECTORY_OBJECT, | |
3777 | DMU_POOL_BPTREE_OBJ, tx)); | |
3778 | VERIFY0(bptree_free(dp->dp_meta_objset, | |
3779 | dp->dp_bptree_obj, tx)); | |
3780 | dp->dp_bptree_obj = 0; | |
3781 | scn->scn_async_destroying = B_FALSE; | |
905edb40 | 3782 | scn->scn_async_stalled = B_FALSE; |
89b1cd65 | 3783 | } else { |
3784 | /* | |
905edb40 MA |
3785 | * If we didn't make progress, mark the async |
3786 | * destroy as stalled, so that we will not initiate | |
3787 | * a spa_sync() on its behalf. Note that we only | |
3788 | * check this if we are not finished, because if the | |
3789 | * bptree had no blocks for us to visit, we can | |
3790 | * finish without "making progress". | |
89b1cd65 | 3791 | */ |
3792 | scn->scn_async_stalled = | |
3793 | (scn->scn_visited_this_txg == 0); | |
428870ff | 3794 | } |
fbeddd60 MA |
3795 | } |
3796 | if (scn->scn_visited_this_txg) { | |
3797 | zfs_dbgmsg("freed %llu blocks in %llums from " | |
6f57f1e3 | 3798 | "free_bpobj/bptree on %s in txg %llu; err=%u", |
fbeddd60 MA |
3799 | (longlong_t)scn->scn_visited_this_txg, |
3800 | (longlong_t) | |
3801 | NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), | |
6f57f1e3 | 3802 | spa->spa_name, (longlong_t)tx->tx_txg, err); |
fbeddd60 | 3803 | scn->scn_visited_this_txg = 0; |
4fe3a842 | 3804 | scn->scn_dedup_frees_this_txg = 0; |
fbeddd60 MA |
3805 | |
3806 | /* | |
67a1b037 PJD |
3807 | * Write out changes to the DDT and the BRT that may be required |
3808 | * as a result of the blocks freed. This ensures that the DDT | |
3809 | * and the BRT are clean when a scrub/resilver runs. | |
fbeddd60 MA |
3810 | */ |
3811 | ddt_sync(spa, tx->tx_txg); | |
67a1b037 | 3812 | brt_sync(spa, tx->tx_txg); |
fbeddd60 MA |
3813 | } |
3814 | if (err != 0) | |
d2734cce | 3815 | return (err); |
7c9abfa7 GM |
3816 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && |
3817 | zfs_free_leak_on_eio && | |
d683ddbb JG |
3818 | (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || |
3819 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || | |
3820 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { | |
fbeddd60 MA |
3821 | /* |
3822 | * We have finished background destroying, but there is still | |
3823 | * some space left in the dp_free_dir. Transfer this leaked | |
3824 | * space to the dp_leak_dir. | |
3825 | */ | |
3826 | if (dp->dp_leak_dir == NULL) { | |
3827 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); | |
3828 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
3829 | LEAK_DIR_NAME, tx); | |
3830 | VERIFY0(dsl_pool_open_special_dir(dp, | |
3831 | LEAK_DIR_NAME, &dp->dp_leak_dir)); | |
3832 | rrw_exit(&dp->dp_config_rwlock, FTAG); | |
3833 | } | |
3834 | dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, | |
d683ddbb JG |
3835 | dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3836 | dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3837 | dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3838 | dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, |
d683ddbb JG |
3839 | -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, |
3840 | -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, | |
3841 | -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); | |
fbeddd60 | 3842 | } |
a1d477c2 | 3843 | |
37f03da8 SH |
3844 | if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && |
3845 | !spa_livelist_delete_check(spa)) { | |
9b67f605 | 3846 | /* finished; verify that space accounting went to zero */ |
d683ddbb JG |
3847 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); |
3848 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); | |
3849 | ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); | |
428870ff BB |
3850 | } |
3851 | ||
e60e158e JG |
3852 | spa_notify_waiters(spa); |
3853 | ||
a1d477c2 MA |
3854 | EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), |
3855 | 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
3856 | DMU_POOL_OBSOLETE_BPOBJ)); | |
3857 | if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { | |
3858 | ASSERT(spa_feature_is_active(dp->dp_spa, | |
3859 | SPA_FEATURE_OBSOLETE_COUNTS)); | |
3860 | ||
3861 | scn->scn_is_bptree = B_FALSE; | |
3862 | scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; | |
3863 | err = bpobj_iterate(&dp->dp_obsolete_bpobj, | |
3864 | dsl_scan_obsolete_block_cb, scn, tx); | |
3865 | if (err != 0 && err != ERESTART) | |
3866 | zfs_panic_recover("error %u from bpobj_iterate()", err); | |
3867 | ||
3868 | if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) | |
3869 | dsl_pool_destroy_obsolete_bpobj(dp, tx); | |
3870 | } | |
d2734cce SD |
3871 | return (0); |
3872 | } | |
3873 | ||
482eeef8 GA |
3874 | static void |
3875 | name_to_bookmark(char *buf, zbookmark_phys_t *zb) | |
3876 | { | |
3877 | zb->zb_objset = zfs_strtonum(buf, &buf); | |
3878 | ASSERT(*buf == ':'); | |
3879 | zb->zb_object = zfs_strtonum(buf + 1, &buf); | |
3880 | ASSERT(*buf == ':'); | |
3881 | zb->zb_level = (int)zfs_strtonum(buf + 1, &buf); | |
3882 | ASSERT(*buf == ':'); | |
3883 | zb->zb_blkid = zfs_strtonum(buf + 1, &buf); | |
3884 | ASSERT(*buf == '\0'); | |
3885 | } | |
3886 | ||
3887 | static void | |
3888 | name_to_object(char *buf, uint64_t *obj) | |
3889 | { | |
3890 | *obj = zfs_strtonum(buf, &buf); | |
3891 | ASSERT(*buf == '\0'); | |
3892 | } | |
3893 | ||
3894 | static void | |
3895 | read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb) | |
3896 | { | |
3897 | dsl_pool_t *dp = scn->scn_dp; | |
3898 | dsl_dataset_t *ds; | |
3899 | objset_t *os; | |
3900 | if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0) | |
3901 | return; | |
3902 | ||
3903 | if (dmu_objset_from_ds(ds, &os) != 0) { | |
3904 | dsl_dataset_rele(ds, FTAG); | |
3905 | return; | |
3906 | } | |
3907 | ||
3908 | /* | |
3909 | * If the key is not loaded dbuf_dnode_findbp() will error out with | |
3910 | * EACCES. However in that case dnode_hold() will eventually call | |
3911 | * dbuf_read()->zio_wait() which may call spa_log_error(). This will | |
3912 | * lead to a deadlock due to us holding the mutex spa_errlist_lock. | |
3913 | * Avoid this by checking here if the keys are loaded, if not return. | |
3914 | * If the keys are not loaded the head_errlog feature is meaningless | |
3915 | * as we cannot figure out the birth txg of the block pointer. | |
3916 | */ | |
3917 | if (dsl_dataset_get_keystatus(ds->ds_dir) == | |
3918 | ZFS_KEYSTATUS_UNAVAILABLE) { | |
3919 | dsl_dataset_rele(ds, FTAG); | |
3920 | return; | |
3921 | } | |
3922 | ||
3923 | dnode_t *dn; | |
3924 | blkptr_t bp; | |
3925 | ||
3926 | if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) { | |
3927 | dsl_dataset_rele(ds, FTAG); | |
3928 | return; | |
3929 | } | |
3930 | ||
3931 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
3932 | int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL, | |
3933 | NULL); | |
3934 | ||
3935 | if (error) { | |
3936 | rw_exit(&dn->dn_struct_rwlock); | |
3937 | dnode_rele(dn, FTAG); | |
3938 | dsl_dataset_rele(ds, FTAG); | |
3939 | return; | |
3940 | } | |
3941 | ||
3942 | if (!error && BP_IS_HOLE(&bp)) { | |
3943 | rw_exit(&dn->dn_struct_rwlock); | |
3944 | dnode_rele(dn, FTAG); | |
3945 | dsl_dataset_rele(ds, FTAG); | |
3946 | return; | |
3947 | } | |
3948 | ||
3949 | int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | | |
3950 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB; | |
3951 | ||
3952 | /* If it's an intent log block, failure is expected. */ | |
3953 | if (zb.zb_level == ZB_ZIL_LEVEL) | |
3954 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
3955 | ||
3956 | ASSERT(!BP_IS_EMBEDDED(&bp)); | |
3957 | scan_exec_io(dp, &bp, zio_flags, &zb, NULL); | |
3958 | rw_exit(&dn->dn_struct_rwlock); | |
3959 | dnode_rele(dn, FTAG); | |
3960 | dsl_dataset_rele(ds, FTAG); | |
3961 | } | |
3962 | ||
3963 | /* | |
3964 | * We keep track of the scrubbed error blocks in "count". This will be used | |
3965 | * when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This | |
3966 | * function is modelled after check_filesystem(). | |
3967 | */ | |
3968 | static int | |
3969 | scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep, | |
3970 | int *count) | |
3971 | { | |
3972 | dsl_dataset_t *ds; | |
3973 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
3974 | dsl_scan_t *scn = dp->dp_scan; | |
3975 | ||
3976 | int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds); | |
3977 | if (error != 0) | |
3978 | return (error); | |
3979 | ||
3980 | uint64_t latest_txg; | |
3981 | uint64_t txg_to_consider = spa->spa_syncing_txg; | |
3982 | boolean_t check_snapshot = B_TRUE; | |
3983 | ||
3984 | error = find_birth_txg(ds, zep, &latest_txg); | |
3985 | ||
3986 | /* | |
3987 | * If find_birth_txg() errors out, then err on the side of caution and | |
3988 | * proceed. In worst case scenario scrub all objects. If zep->zb_birth | |
3989 | * is 0 (e.g. in case of encryption with unloaded keys) also proceed to | |
3990 | * scrub all objects. | |
3991 | */ | |
3992 | if (error == 0 && zep->zb_birth == latest_txg) { | |
3993 | /* Block neither free nor re written. */ | |
3994 | zbookmark_phys_t zb; | |
3995 | zep_to_zb(fs, zep, &zb); | |
3996 | scn->scn_zio_root = zio_root(spa, NULL, NULL, | |
3997 | ZIO_FLAG_CANFAIL); | |
3998 | /* We have already acquired the config lock for spa */ | |
3999 | read_by_block_level(scn, zb); | |
4000 | ||
4001 | (void) zio_wait(scn->scn_zio_root); | |
4002 | scn->scn_zio_root = NULL; | |
4003 | ||
4004 | scn->errorscrub_phys.dep_examined++; | |
4005 | scn->errorscrub_phys.dep_to_examine--; | |
4006 | (*count)++; | |
4007 | if ((*count) == zfs_scrub_error_blocks_per_txg || | |
4008 | dsl_error_scrub_check_suspend(scn, &zb)) { | |
4009 | dsl_dataset_rele(ds, FTAG); | |
4010 | return (SET_ERROR(EFAULT)); | |
4011 | } | |
4012 | ||
4013 | check_snapshot = B_FALSE; | |
4014 | } else if (error == 0) { | |
4015 | txg_to_consider = latest_txg; | |
4016 | } | |
4017 | ||
4018 | /* | |
4019 | * Retrieve the number of snapshots if the dataset is not a snapshot. | |
4020 | */ | |
4021 | uint64_t snap_count = 0; | |
4022 | if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) { | |
4023 | ||
4024 | error = zap_count(spa->spa_meta_objset, | |
4025 | dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count); | |
4026 | ||
4027 | if (error != 0) { | |
4028 | dsl_dataset_rele(ds, FTAG); | |
4029 | return (error); | |
4030 | } | |
4031 | } | |
4032 | ||
4033 | if (snap_count == 0) { | |
4034 | /* Filesystem without snapshots. */ | |
4035 | dsl_dataset_rele(ds, FTAG); | |
4036 | return (0); | |
4037 | } | |
4038 | ||
4039 | uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
4040 | uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; | |
4041 | ||
4042 | dsl_dataset_rele(ds, FTAG); | |
4043 | ||
4044 | /* Check only snapshots created from this file system. */ | |
4045 | while (snap_obj != 0 && zep->zb_birth < snap_obj_txg && | |
4046 | snap_obj_txg <= txg_to_consider) { | |
4047 | ||
4048 | error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds); | |
4049 | if (error != 0) | |
4050 | return (error); | |
4051 | ||
4052 | if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) { | |
4053 | snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
4054 | snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; | |
4055 | dsl_dataset_rele(ds, FTAG); | |
4056 | continue; | |
4057 | } | |
4058 | ||
4059 | boolean_t affected = B_TRUE; | |
4060 | if (check_snapshot) { | |
4061 | uint64_t blk_txg; | |
4062 | error = find_birth_txg(ds, zep, &blk_txg); | |
4063 | ||
4064 | /* | |
4065 | * Scrub the snapshot also when zb_birth == 0 or when | |
4066 | * find_birth_txg() returns an error. | |
4067 | */ | |
4068 | affected = (error == 0 && zep->zb_birth == blk_txg) || | |
4069 | (error != 0) || (zep->zb_birth == 0); | |
4070 | } | |
4071 | ||
4072 | /* Scrub snapshots. */ | |
4073 | if (affected) { | |
4074 | zbookmark_phys_t zb; | |
4075 | zep_to_zb(snap_obj, zep, &zb); | |
4076 | scn->scn_zio_root = zio_root(spa, NULL, NULL, | |
4077 | ZIO_FLAG_CANFAIL); | |
4078 | /* We have already acquired the config lock for spa */ | |
4079 | read_by_block_level(scn, zb); | |
4080 | ||
4081 | (void) zio_wait(scn->scn_zio_root); | |
4082 | scn->scn_zio_root = NULL; | |
4083 | ||
4084 | scn->errorscrub_phys.dep_examined++; | |
4085 | scn->errorscrub_phys.dep_to_examine--; | |
4086 | (*count)++; | |
4087 | if ((*count) == zfs_scrub_error_blocks_per_txg || | |
4088 | dsl_error_scrub_check_suspend(scn, &zb)) { | |
4089 | dsl_dataset_rele(ds, FTAG); | |
4090 | return (EFAULT); | |
4091 | } | |
4092 | } | |
4093 | snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; | |
4094 | snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
4095 | dsl_dataset_rele(ds, FTAG); | |
4096 | } | |
4097 | return (0); | |
4098 | } | |
4099 | ||
4100 | void | |
4101 | dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx) | |
4102 | { | |
4103 | spa_t *spa = dp->dp_spa; | |
4104 | dsl_scan_t *scn = dp->dp_scan; | |
4105 | ||
4106 | /* | |
4107 | * Only process scans in sync pass 1. | |
4108 | */ | |
4109 | ||
4110 | if (spa_sync_pass(spa) > 1) | |
4111 | return; | |
4112 | ||
4113 | /* | |
4114 | * If the spa is shutting down, then stop scanning. This will | |
4115 | * ensure that the scan does not dirty any new data during the | |
4116 | * shutdown phase. | |
4117 | */ | |
4118 | if (spa_shutting_down(spa)) | |
4119 | return; | |
4120 | ||
4121 | if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) { | |
4122 | return; | |
4123 | } | |
4124 | ||
4125 | if (dsl_scan_resilvering(scn->scn_dp)) { | |
4126 | /* cancel the error scrub if resilver started */ | |
4127 | dsl_scan_cancel(scn->scn_dp); | |
4128 | return; | |
4129 | } | |
4130 | ||
4131 | spa->spa_scrub_active = B_TRUE; | |
4132 | scn->scn_sync_start_time = gethrtime(); | |
4133 | ||
4134 | /* | |
4135 | * zfs_scan_suspend_progress can be set to disable scrub progress. | |
4136 | * See more detailed comment in dsl_scan_sync(). | |
4137 | */ | |
4138 | if (zfs_scan_suspend_progress) { | |
4139 | uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
4140 | int mintime = zfs_scrub_min_time_ms; | |
4141 | ||
4142 | while (zfs_scan_suspend_progress && | |
4143 | !txg_sync_waiting(scn->scn_dp) && | |
4144 | !spa_shutting_down(scn->scn_dp->dp_spa) && | |
4145 | NSEC2MSEC(scan_time_ns) < mintime) { | |
4146 | delay(hz); | |
4147 | scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
4148 | } | |
4149 | return; | |
4150 | } | |
4151 | ||
4152 | int i = 0; | |
4153 | zap_attribute_t *za; | |
4154 | zbookmark_phys_t *zb; | |
4155 | boolean_t limit_exceeded = B_FALSE; | |
4156 | ||
4157 | za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP); | |
4158 | zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP); | |
4159 | ||
4160 | if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { | |
4161 | for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0; | |
4162 | zap_cursor_advance(&scn->errorscrub_cursor)) { | |
4163 | name_to_bookmark(za->za_name, zb); | |
4164 | ||
4165 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, | |
4166 | NULL, ZIO_FLAG_CANFAIL); | |
4167 | dsl_pool_config_enter(dp, FTAG); | |
4168 | read_by_block_level(scn, *zb); | |
4169 | dsl_pool_config_exit(dp, FTAG); | |
4170 | ||
4171 | (void) zio_wait(scn->scn_zio_root); | |
4172 | scn->scn_zio_root = NULL; | |
4173 | ||
4174 | scn->errorscrub_phys.dep_examined += 1; | |
4175 | scn->errorscrub_phys.dep_to_examine -= 1; | |
4176 | i++; | |
4177 | if (i == zfs_scrub_error_blocks_per_txg || | |
4178 | dsl_error_scrub_check_suspend(scn, zb)) { | |
4179 | limit_exceeded = B_TRUE; | |
4180 | break; | |
4181 | } | |
4182 | } | |
4183 | ||
4184 | if (!limit_exceeded) | |
4185 | dsl_errorscrub_done(scn, B_TRUE, tx); | |
4186 | ||
4187 | dsl_errorscrub_sync_state(scn, tx); | |
4188 | kmem_free(za, sizeof (*za)); | |
4189 | kmem_free(zb, sizeof (*zb)); | |
4190 | return; | |
4191 | } | |
4192 | ||
4193 | int error = 0; | |
4194 | for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0; | |
4195 | zap_cursor_advance(&scn->errorscrub_cursor)) { | |
4196 | ||
4197 | zap_cursor_t *head_ds_cursor; | |
4198 | zap_attribute_t *head_ds_attr; | |
4199 | zbookmark_err_phys_t head_ds_block; | |
4200 | ||
4201 | head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP); | |
4202 | head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP); | |
4203 | ||
4204 | uint64_t head_ds_err_obj = za->za_first_integer; | |
4205 | uint64_t head_ds; | |
4206 | name_to_object(za->za_name, &head_ds); | |
4207 | boolean_t config_held = B_FALSE; | |
4208 | uint64_t top_affected_fs; | |
4209 | ||
4210 | for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset, | |
4211 | head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor, | |
4212 | head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) { | |
4213 | ||
4214 | name_to_errphys(head_ds_attr->za_name, &head_ds_block); | |
4215 | ||
4216 | /* | |
4217 | * In case we are called from spa_sync the pool | |
4218 | * config is already held. | |
4219 | */ | |
4220 | if (!dsl_pool_config_held(dp)) { | |
4221 | dsl_pool_config_enter(dp, FTAG); | |
4222 | config_held = B_TRUE; | |
4223 | } | |
4224 | ||
4225 | error = find_top_affected_fs(spa, | |
4226 | head_ds, &head_ds_block, &top_affected_fs); | |
4227 | if (error) | |
4228 | break; | |
4229 | ||
4230 | error = scrub_filesystem(spa, top_affected_fs, | |
4231 | &head_ds_block, &i); | |
4232 | ||
4233 | if (error == SET_ERROR(EFAULT)) { | |
4234 | limit_exceeded = B_TRUE; | |
4235 | break; | |
4236 | } | |
4237 | } | |
4238 | ||
4239 | zap_cursor_fini(head_ds_cursor); | |
4240 | kmem_free(head_ds_cursor, sizeof (*head_ds_cursor)); | |
4241 | kmem_free(head_ds_attr, sizeof (*head_ds_attr)); | |
4242 | ||
4243 | if (config_held) | |
4244 | dsl_pool_config_exit(dp, FTAG); | |
4245 | } | |
4246 | ||
4247 | kmem_free(za, sizeof (*za)); | |
4248 | kmem_free(zb, sizeof (*zb)); | |
4249 | if (!limit_exceeded) | |
4250 | dsl_errorscrub_done(scn, B_TRUE, tx); | |
4251 | ||
4252 | dsl_errorscrub_sync_state(scn, tx); | |
4253 | } | |
4254 | ||
d2734cce SD |
4255 | /* |
4256 | * This is the primary entry point for scans that is called from syncing | |
4257 | * context. Scans must happen entirely during syncing context so that we | |
e1cfd73f | 4258 | * can guarantee that blocks we are currently scanning will not change out |
d2734cce SD |
4259 | * from under us. While a scan is active, this function controls how quickly |
4260 | * transaction groups proceed, instead of the normal handling provided by | |
4261 | * txg_sync_thread(). | |
4262 | */ | |
4263 | void | |
4264 | dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) | |
4265 | { | |
4266 | int err = 0; | |
4267 | dsl_scan_t *scn = dp->dp_scan; | |
4268 | spa_t *spa = dp->dp_spa; | |
4269 | state_sync_type_t sync_type = SYNC_OPTIONAL; | |
4270 | ||
80a91e74 TC |
4271 | if (spa->spa_resilver_deferred && |
4272 | !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) | |
4273 | spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx); | |
4274 | ||
d2734cce SD |
4275 | /* |
4276 | * Check for scn_restart_txg before checking spa_load_state, so | |
4277 | * that we can restart an old-style scan while the pool is being | |
80a91e74 TC |
4278 | * imported (see dsl_scan_init). We also restart scans if there |
4279 | * is a deferred resilver and the user has manually disabled | |
4280 | * deferred resilvers via the tunable. | |
d2734cce | 4281 | */ |
80a91e74 TC |
4282 | if (dsl_scan_restarting(scn, tx) || |
4283 | (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) { | |
d2734cce SD |
4284 | pool_scan_func_t func = POOL_SCAN_SCRUB; |
4285 | dsl_scan_done(scn, B_FALSE, tx); | |
4286 | if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) | |
4287 | func = POOL_SCAN_RESILVER; | |
6f57f1e3 RE |
4288 | zfs_dbgmsg("restarting scan func=%u on %s txg=%llu", |
4289 | func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg); | |
d2734cce SD |
4290 | dsl_scan_setup_sync(&func, tx); |
4291 | } | |
4292 | ||
4293 | /* | |
4294 | * Only process scans in sync pass 1. | |
4295 | */ | |
4296 | if (spa_sync_pass(spa) > 1) | |
4297 | return; | |
4298 | ||
4299 | /* | |
4300 | * If the spa is shutting down, then stop scanning. This will | |
4301 | * ensure that the scan does not dirty any new data during the | |
4302 | * shutdown phase. | |
4303 | */ | |
4304 | if (spa_shutting_down(spa)) | |
4305 | return; | |
4306 | ||
4307 | /* | |
4308 | * If the scan is inactive due to a stalled async destroy, try again. | |
4309 | */ | |
4310 | if (!scn->scn_async_stalled && !dsl_scan_active(scn)) | |
4311 | return; | |
4312 | ||
4313 | /* reset scan statistics */ | |
4314 | scn->scn_visited_this_txg = 0; | |
4fe3a842 | 4315 | scn->scn_dedup_frees_this_txg = 0; |
d2734cce SD |
4316 | scn->scn_holes_this_txg = 0; |
4317 | scn->scn_lt_min_this_txg = 0; | |
4318 | scn->scn_gt_max_this_txg = 0; | |
4319 | scn->scn_ddt_contained_this_txg = 0; | |
4320 | scn->scn_objsets_visited_this_txg = 0; | |
4321 | scn->scn_avg_seg_size_this_txg = 0; | |
4322 | scn->scn_segs_this_txg = 0; | |
4323 | scn->scn_avg_zio_size_this_txg = 0; | |
4324 | scn->scn_zios_this_txg = 0; | |
4325 | scn->scn_suspending = B_FALSE; | |
4326 | scn->scn_sync_start_time = gethrtime(); | |
4327 | spa->spa_scrub_active = B_TRUE; | |
4328 | ||
4329 | /* | |
4330 | * First process the async destroys. If we suspend, don't do | |
4331 | * any scrubbing or resilvering. This ensures that there are no | |
4332 | * async destroys while we are scanning, so the scan code doesn't | |
4333 | * have to worry about traversing it. It is also faster to free the | |
4334 | * blocks than to scrub them. | |
4335 | */ | |
4336 | err = dsl_process_async_destroys(dp, tx); | |
4337 | if (err != 0) | |
4338 | return; | |
a1d477c2 | 4339 | |
d4a72f23 | 4340 | if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) |
428870ff BB |
4341 | return; |
4342 | ||
d4a72f23 TC |
4343 | /* |
4344 | * Wait a few txgs after importing to begin scanning so that | |
4345 | * we can get the pool imported quickly. | |
4346 | */ | |
4347 | if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) | |
5d1f7fb6 | 4348 | return; |
5d1f7fb6 | 4349 | |
cef48f14 TC |
4350 | /* |
4351 | * zfs_scan_suspend_progress can be set to disable scan progress. | |
4352 | * We don't want to spin the txg_sync thread, so we add a delay | |
4353 | * here to simulate the time spent doing a scan. This is mostly | |
4354 | * useful for testing and debugging. | |
4355 | */ | |
4356 | if (zfs_scan_suspend_progress) { | |
4357 | uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
fdc2d303 RY |
4358 | uint_t mintime = (scn->scn_phys.scn_func == |
4359 | POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms : | |
4360 | zfs_scrub_min_time_ms; | |
cef48f14 TC |
4361 | |
4362 | while (zfs_scan_suspend_progress && | |
4363 | !txg_sync_waiting(scn->scn_dp) && | |
4364 | !spa_shutting_down(scn->scn_dp->dp_spa) && | |
4365 | NSEC2MSEC(scan_time_ns) < mintime) { | |
4366 | delay(hz); | |
4367 | scan_time_ns = gethrtime() - scn->scn_sync_start_time; | |
4368 | } | |
4369 | return; | |
4370 | } | |
4371 | ||
c85ac731 BB |
4372 | /* |
4373 | * Disabled by default, set zfs_scan_report_txgs to report | |
4374 | * average performance over the last zfs_scan_report_txgs TXGs. | |
4375 | */ | |
fa7b2390 | 4376 | if (zfs_scan_report_txgs != 0 && |
c85ac731 BB |
4377 | tx->tx_txg % zfs_scan_report_txgs == 0) { |
4378 | scn->scn_issued_before_pass += spa->spa_scan_pass_issued; | |
4379 | spa_scan_stat_init(spa); | |
4380 | } | |
4381 | ||
d4a72f23 TC |
4382 | /* |
4383 | * It is possible to switch from unsorted to sorted at any time, | |
4384 | * but afterwards the scan will remain sorted unless reloaded from | |
4385 | * a checkpoint after a reboot. | |
4386 | */ | |
4387 | if (!zfs_scan_legacy) { | |
4388 | scn->scn_is_sorted = B_TRUE; | |
4389 | if (scn->scn_last_checkpoint == 0) | |
4390 | scn->scn_last_checkpoint = ddi_get_lbolt(); | |
4391 | } | |
0ea05c64 | 4392 | |
d4a72f23 TC |
4393 | /* |
4394 | * For sorted scans, determine what kind of work we will be doing | |
4395 | * this txg based on our memory limitations and whether or not we | |
4396 | * need to perform a checkpoint. | |
4397 | */ | |
4398 | if (scn->scn_is_sorted) { | |
4399 | /* | |
4400 | * If we are over our checkpoint interval, set scn_clearing | |
4401 | * so that we can begin checkpointing immediately. The | |
13a2ff27 | 4402 | * checkpoint allows us to save a consistent bookmark |
d4a72f23 TC |
4403 | * representing how much data we have scrubbed so far. |
4404 | * Otherwise, use the memory limit to determine if we should | |
4405 | * scan for metadata or start issue scrub IOs. We accumulate | |
4406 | * metadata until we hit our hard memory limit at which point | |
4407 | * we issue scrub IOs until we are at our soft memory limit. | |
4408 | */ | |
4409 | if (scn->scn_checkpointing || | |
4410 | ddi_get_lbolt() - scn->scn_last_checkpoint > | |
4411 | SEC_TO_TICK(zfs_scan_checkpoint_intval)) { | |
4412 | if (!scn->scn_checkpointing) | |
6f57f1e3 RE |
4413 | zfs_dbgmsg("begin scan checkpoint for %s", |
4414 | spa->spa_name); | |
d4a72f23 TC |
4415 | |
4416 | scn->scn_checkpointing = B_TRUE; | |
4417 | scn->scn_clearing = B_TRUE; | |
4418 | } else { | |
4419 | boolean_t should_clear = dsl_scan_should_clear(scn); | |
4420 | if (should_clear && !scn->scn_clearing) { | |
6f57f1e3 RE |
4421 | zfs_dbgmsg("begin scan clearing for %s", |
4422 | spa->spa_name); | |
d4a72f23 TC |
4423 | scn->scn_clearing = B_TRUE; |
4424 | } else if (!should_clear && scn->scn_clearing) { | |
6f57f1e3 RE |
4425 | zfs_dbgmsg("finish scan clearing for %s", |
4426 | spa->spa_name); | |
d4a72f23 TC |
4427 | scn->scn_clearing = B_FALSE; |
4428 | } | |
4429 | } | |
428870ff | 4430 | } else { |
d4a72f23 TC |
4431 | ASSERT0(scn->scn_checkpointing); |
4432 | ASSERT0(scn->scn_clearing); | |
428870ff BB |
4433 | } |
4434 | ||
d4a72f23 TC |
4435 | if (!scn->scn_clearing && scn->scn_done_txg == 0) { |
4436 | /* Need to scan metadata for more blocks to scrub */ | |
4437 | dsl_scan_phys_t *scnp = &scn->scn_phys; | |
4438 | taskqid_t prefetch_tqid; | |
428870ff | 4439 | |
d4a72f23 | 4440 | /* |
c0aea7cf BB |
4441 | * Calculate the max number of in-flight bytes for pool-wide |
4442 | * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). | |
4443 | * Limits for the issuing phase are done per top-level vdev and | |
4444 | * are handled separately. | |
d4a72f23 | 4445 | */ |
c0aea7cf BB |
4446 | scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, |
4447 | zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); | |
d4a72f23 TC |
4448 | |
4449 | if (scnp->scn_ddt_bookmark.ddb_class <= | |
4450 | scnp->scn_ddt_class_max) { | |
4451 | ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); | |
6f57f1e3 | 4452 | zfs_dbgmsg("doing scan sync for %s txg %llu; " |
d4a72f23 | 4453 | "ddt bm=%llu/%llu/%llu/%llx", |
6f57f1e3 | 4454 | spa->spa_name, |
d4a72f23 TC |
4455 | (longlong_t)tx->tx_txg, |
4456 | (longlong_t)scnp->scn_ddt_bookmark.ddb_class, | |
4457 | (longlong_t)scnp->scn_ddt_bookmark.ddb_type, | |
4458 | (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, | |
4459 | (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); | |
4460 | } else { | |
6f57f1e3 | 4461 | zfs_dbgmsg("doing scan sync for %s txg %llu; " |
d4a72f23 | 4462 | "bm=%llu/%llu/%llu/%llu", |
6f57f1e3 | 4463 | spa->spa_name, |
d4a72f23 TC |
4464 | (longlong_t)tx->tx_txg, |
4465 | (longlong_t)scnp->scn_bookmark.zb_objset, | |
4466 | (longlong_t)scnp->scn_bookmark.zb_object, | |
4467 | (longlong_t)scnp->scn_bookmark.zb_level, | |
4468 | (longlong_t)scnp->scn_bookmark.zb_blkid); | |
4469 | } | |
428870ff | 4470 | |
d4a72f23 TC |
4471 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, |
4472 | NULL, ZIO_FLAG_CANFAIL); | |
428870ff | 4473 | |
d4a72f23 TC |
4474 | scn->scn_prefetch_stop = B_FALSE; |
4475 | prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, | |
4476 | dsl_scan_prefetch_thread, scn, TQ_SLEEP); | |
4477 | ASSERT(prefetch_tqid != TASKQID_INVALID); | |
428870ff | 4478 | |
d4a72f23 TC |
4479 | dsl_pool_config_enter(dp, FTAG); |
4480 | dsl_scan_visit(scn, tx); | |
4481 | dsl_pool_config_exit(dp, FTAG); | |
428870ff | 4482 | |
d4a72f23 TC |
4483 | mutex_enter(&dp->dp_spa->spa_scrub_lock); |
4484 | scn->scn_prefetch_stop = B_TRUE; | |
4485 | cv_broadcast(&spa->spa_scrub_io_cv); | |
4486 | mutex_exit(&dp->dp_spa->spa_scrub_lock); | |
428870ff | 4487 | |
d4a72f23 TC |
4488 | taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); |
4489 | (void) zio_wait(scn->scn_zio_root); | |
4490 | scn->scn_zio_root = NULL; | |
4491 | ||
6f57f1e3 | 4492 | zfs_dbgmsg("scan visited %llu blocks of %s in %llums " |
d4a72f23 TC |
4493 | "(%llu os's, %llu holes, %llu < mintxg, " |
4494 | "%llu in ddt, %llu > maxtxg)", | |
4495 | (longlong_t)scn->scn_visited_this_txg, | |
6f57f1e3 | 4496 | spa->spa_name, |
d4a72f23 TC |
4497 | (longlong_t)NSEC2MSEC(gethrtime() - |
4498 | scn->scn_sync_start_time), | |
4499 | (longlong_t)scn->scn_objsets_visited_this_txg, | |
4500 | (longlong_t)scn->scn_holes_this_txg, | |
4501 | (longlong_t)scn->scn_lt_min_this_txg, | |
4502 | (longlong_t)scn->scn_ddt_contained_this_txg, | |
4503 | (longlong_t)scn->scn_gt_max_this_txg); | |
4504 | ||
4505 | if (!scn->scn_suspending) { | |
4506 | ASSERT0(avl_numnodes(&scn->scn_queue)); | |
4507 | scn->scn_done_txg = tx->tx_txg + 1; | |
4508 | if (scn->scn_is_sorted) { | |
4509 | scn->scn_checkpointing = B_TRUE; | |
4510 | scn->scn_clearing = B_TRUE; | |
c85ac731 BB |
4511 | scn->scn_issued_before_pass += |
4512 | spa->spa_scan_pass_issued; | |
4513 | spa_scan_stat_init(spa); | |
d4a72f23 | 4514 | } |
6f57f1e3 RE |
4515 | zfs_dbgmsg("scan complete for %s txg %llu", |
4516 | spa->spa_name, | |
d4a72f23 TC |
4517 | (longlong_t)tx->tx_txg); |
4518 | } | |
1c0c729a | 4519 | } else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) { |
5e0bd0ae TC |
4520 | ASSERT(scn->scn_clearing); |
4521 | ||
d4a72f23 TC |
4522 | /* need to issue scrubbing IOs from per-vdev queues */ |
4523 | scn->scn_zio_root = zio_root(dp->dp_spa, NULL, | |
4524 | NULL, ZIO_FLAG_CANFAIL); | |
4525 | scan_io_queues_run(scn); | |
4526 | (void) zio_wait(scn->scn_zio_root); | |
4527 | scn->scn_zio_root = NULL; | |
4528 | ||
4529 | /* calculate and dprintf the current memory usage */ | |
4530 | (void) dsl_scan_should_clear(scn); | |
4531 | dsl_scan_update_stats(scn); | |
4532 | ||
6f57f1e3 RE |
4533 | zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) " |
4534 | "in %llums (avg_block_size = %llu, avg_seg_size = %llu)", | |
d4a72f23 | 4535 | (longlong_t)scn->scn_zios_this_txg, |
6f57f1e3 | 4536 | spa->spa_name, |
d4a72f23 TC |
4537 | (longlong_t)scn->scn_segs_this_txg, |
4538 | (longlong_t)NSEC2MSEC(gethrtime() - | |
4539 | scn->scn_sync_start_time), | |
4540 | (longlong_t)scn->scn_avg_zio_size_this_txg, | |
4541 | (longlong_t)scn->scn_avg_seg_size_this_txg); | |
4542 | } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { | |
4543 | /* Finished with everything. Mark the scrub as complete */ | |
6f57f1e3 RE |
4544 | zfs_dbgmsg("scan issuing complete txg %llu for %s", |
4545 | (longlong_t)tx->tx_txg, | |
4546 | spa->spa_name); | |
d4a72f23 TC |
4547 | ASSERT3U(scn->scn_done_txg, !=, 0); |
4548 | ASSERT0(spa->spa_scrub_inflight); | |
1c0c729a | 4549 | ASSERT0(scn->scn_queues_pending); |
d4a72f23 TC |
4550 | dsl_scan_done(scn, B_TRUE, tx); |
4551 | sync_type = SYNC_MANDATORY; | |
428870ff | 4552 | } |
428870ff | 4553 | |
d4a72f23 | 4554 | dsl_scan_sync_state(scn, tx, sync_type); |
428870ff BB |
4555 | } |
4556 | ||
428870ff | 4557 | static void |
82732299 | 4558 | count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all) |
428870ff | 4559 | { |
3b61ca3e TC |
4560 | /* |
4561 | * Don't count embedded bp's, since we already did the work of | |
4562 | * scanning these when we scanned the containing block. | |
4563 | */ | |
4564 | if (BP_IS_EMBEDDED(bp)) | |
4565 | return; | |
4566 | ||
ab7615d9 TC |
4567 | /* |
4568 | * Update the spa's stats on how many bytes we have issued. | |
4569 | * Sequential scrubs create a zio for each DVA of the bp. Each | |
4570 | * of these will include all DVAs for repair purposes, but the | |
4571 | * zio code will only try the first one unless there is an issue. | |
4572 | * Therefore, we should only count the first DVA for these IOs. | |
4573 | */ | |
82732299 AM |
4574 | atomic_add_64(&spa->spa_scan_pass_issued, |
4575 | all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0])); | |
4576 | } | |
d4a72f23 | 4577 | |
fa7b2390 AM |
4578 | static void |
4579 | count_block_skipped(dsl_scan_t *scn, const blkptr_t *bp, boolean_t all) | |
4580 | { | |
4581 | if (BP_IS_EMBEDDED(bp)) | |
4582 | return; | |
4583 | atomic_add_64(&scn->scn_phys.scn_skipped, | |
4584 | all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0])); | |
4585 | } | |
4586 | ||
82732299 AM |
4587 | static void |
4588 | count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) | |
4589 | { | |
428870ff BB |
4590 | /* |
4591 | * If we resume after a reboot, zab will be NULL; don't record | |
4592 | * incomplete stats in that case. | |
4593 | */ | |
4594 | if (zab == NULL) | |
4595 | return; | |
4596 | ||
82732299 | 4597 | for (int i = 0; i < 4; i++) { |
428870ff BB |
4598 | int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; |
4599 | int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; | |
9ae529ec CS |
4600 | |
4601 | if (t & DMU_OT_NEWTYPE) | |
4602 | t = DMU_OT_OTHER; | |
1c27024e DB |
4603 | zfs_blkstat_t *zb = &zab->zab_type[l][t]; |
4604 | int equal; | |
428870ff BB |
4605 | |
4606 | zb->zb_count++; | |
4607 | zb->zb_asize += BP_GET_ASIZE(bp); | |
4608 | zb->zb_lsize += BP_GET_LSIZE(bp); | |
4609 | zb->zb_psize += BP_GET_PSIZE(bp); | |
4610 | zb->zb_gangs += BP_COUNT_GANG(bp); | |
4611 | ||
4612 | switch (BP_GET_NDVAS(bp)) { | |
4613 | case 2: | |
4614 | if (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
4615 | DVA_GET_VDEV(&bp->blk_dva[1])) | |
4616 | zb->zb_ditto_2_of_2_samevdev++; | |
4617 | break; | |
4618 | case 3: | |
4619 | equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
4620 | DVA_GET_VDEV(&bp->blk_dva[1])) + | |
4621 | (DVA_GET_VDEV(&bp->blk_dva[0]) == | |
4622 | DVA_GET_VDEV(&bp->blk_dva[2])) + | |
4623 | (DVA_GET_VDEV(&bp->blk_dva[1]) == | |
4624 | DVA_GET_VDEV(&bp->blk_dva[2])); | |
4625 | if (equal == 1) | |
4626 | zb->zb_ditto_2_of_3_samevdev++; | |
4627 | else if (equal == 3) | |
4628 | zb->zb_ditto_3_of_3_samevdev++; | |
4629 | break; | |
4630 | } | |
4631 | } | |
4632 | } | |
4633 | ||
4634 | static void | |
d4a72f23 | 4635 | scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) |
428870ff | 4636 | { |
d4a72f23 | 4637 | avl_index_t idx; |
d4a72f23 | 4638 | dsl_scan_t *scn = queue->q_scn; |
428870ff | 4639 | |
d4a72f23 | 4640 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); |
428870ff | 4641 | |
1c0c729a AM |
4642 | if (unlikely(avl_is_empty(&queue->q_sios_by_addr))) |
4643 | atomic_add_64(&scn->scn_queues_pending, 1); | |
d4a72f23 TC |
4644 | if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { |
4645 | /* block is already scheduled for reading */ | |
ab7615d9 | 4646 | sio_free(sio); |
d4a72f23 | 4647 | return; |
428870ff | 4648 | } |
d4a72f23 | 4649 | avl_insert(&queue->q_sios_by_addr, sio, idx); |
ab7615d9 | 4650 | queue->q_sio_memused += SIO_GET_MUSED(sio); |
1c0c729a AM |
4651 | range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), |
4652 | SIO_GET_ASIZE(sio)); | |
428870ff BB |
4653 | } |
4654 | ||
d4a72f23 TC |
4655 | /* |
4656 | * Given all the info we got from our metadata scanning process, we | |
4657 | * construct a scan_io_t and insert it into the scan sorting queue. The | |
4658 | * I/O must already be suitable for us to process. This is controlled | |
4659 | * by dsl_scan_enqueue(). | |
4660 | */ | |
4661 | static void | |
4662 | scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, | |
4663 | int zio_flags, const zbookmark_phys_t *zb) | |
3d6da72d | 4664 | { |
ab7615d9 | 4665 | scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp)); |
3d6da72d | 4666 | |
d4a72f23 TC |
4667 | ASSERT0(BP_IS_GANG(bp)); |
4668 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
3d6da72d | 4669 | |
d4a72f23 TC |
4670 | bp2sio(bp, sio, dva_i); |
4671 | sio->sio_flags = zio_flags; | |
4672 | sio->sio_zb = *zb; | |
3d6da72d | 4673 | |
1c0c729a | 4674 | queue->q_last_ext_addr = -1; |
d4a72f23 TC |
4675 | scan_io_queue_insert_impl(queue, sio); |
4676 | } | |
4677 | ||
4678 | /* | |
4679 | * Given a set of I/O parameters as discovered by the metadata traversal | |
4680 | * process, attempts to place the I/O into the sorted queues (if allowed), | |
4681 | * or immediately executes the I/O. | |
4682 | */ | |
4683 | static void | |
4684 | dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
4685 | const zbookmark_phys_t *zb) | |
4686 | { | |
4687 | spa_t *spa = dp->dp_spa; | |
4688 | ||
4689 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
3d6da72d IH |
4690 | |
4691 | /* | |
d4a72f23 TC |
4692 | * Gang blocks are hard to issue sequentially, so we just issue them |
4693 | * here immediately instead of queuing them. | |
3d6da72d | 4694 | */ |
d4a72f23 TC |
4695 | if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { |
4696 | scan_exec_io(dp, bp, zio_flags, zb, NULL); | |
4697 | return; | |
4698 | } | |
3d6da72d | 4699 | |
d4a72f23 TC |
4700 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
4701 | dva_t dva; | |
4702 | vdev_t *vdev; | |
4703 | ||
4704 | dva = bp->blk_dva[i]; | |
4705 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); | |
4706 | ASSERT(vdev != NULL); | |
4707 | ||
4708 | mutex_enter(&vdev->vdev_scan_io_queue_lock); | |
4709 | if (vdev->vdev_scan_io_queue == NULL) | |
4710 | vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); | |
4711 | ASSERT(dp->dp_scan != NULL); | |
4712 | scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, | |
4713 | i, zio_flags, zb); | |
4714 | mutex_exit(&vdev->vdev_scan_io_queue_lock); | |
4715 | } | |
3d6da72d IH |
4716 | } |
4717 | ||
428870ff BB |
4718 | static int |
4719 | dsl_scan_scrub_cb(dsl_pool_t *dp, | |
5dbd68a3 | 4720 | const blkptr_t *bp, const zbookmark_phys_t *zb) |
428870ff BB |
4721 | { |
4722 | dsl_scan_t *scn = dp->dp_scan; | |
428870ff BB |
4723 | spa_t *spa = dp->dp_spa; |
4724 | uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); | |
d4a72f23 | 4725 | size_t psize = BP_GET_PSIZE(bp); |
d6320ddb | 4726 | boolean_t needs_io = B_FALSE; |
572e2857 | 4727 | int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; |
428870ff | 4728 | |
82732299 | 4729 | count_block(dp->dp_blkstats, bp); |
428870ff | 4730 | if (phys_birth <= scn->scn_phys.scn_min_txg || |
863522b1 | 4731 | phys_birth >= scn->scn_phys.scn_max_txg) { |
fa7b2390 | 4732 | count_block_skipped(scn, bp, B_TRUE); |
428870ff | 4733 | return (0); |
863522b1 | 4734 | } |
428870ff | 4735 | |
00c405b4 MA |
4736 | /* Embedded BP's have phys_birth==0, so we reject them above. */ |
4737 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
9b67f605 | 4738 | |
428870ff BB |
4739 | ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); |
4740 | if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { | |
4741 | zio_flags |= ZIO_FLAG_SCRUB; | |
428870ff | 4742 | needs_io = B_TRUE; |
a117a6d6 GW |
4743 | } else { |
4744 | ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); | |
428870ff | 4745 | zio_flags |= ZIO_FLAG_RESILVER; |
428870ff BB |
4746 | needs_io = B_FALSE; |
4747 | } | |
4748 | ||
4749 | /* If it's an intent log block, failure is expected. */ | |
4750 | if (zb->zb_level == ZB_ZIL_LEVEL) | |
4751 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
4752 | ||
1c27024e | 4753 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) { |
3d6da72d | 4754 | const dva_t *dva = &bp->blk_dva[d]; |
428870ff BB |
4755 | |
4756 | /* | |
4757 | * Keep track of how much data we've examined so that | |
76d04993 | 4758 | * zpool(8) status can make useful progress reports. |
428870ff | 4759 | */ |
1c0c729a AM |
4760 | uint64_t asize = DVA_GET_ASIZE(dva); |
4761 | scn->scn_phys.scn_examined += asize; | |
4762 | spa->spa_scan_pass_exam += asize; | |
428870ff BB |
4763 | |
4764 | /* if it's a resilver, this may not be in the target range */ | |
3d6da72d IH |
4765 | if (!needs_io) |
4766 | needs_io = dsl_scan_need_resilver(spa, dva, psize, | |
4767 | phys_birth); | |
428870ff BB |
4768 | } |
4769 | ||
4770 | if (needs_io && !zfs_no_scrub_io) { | |
d4a72f23 TC |
4771 | dsl_scan_enqueue(dp, bp, zio_flags, zb); |
4772 | } else { | |
fa7b2390 | 4773 | count_block_skipped(scn, bp, B_TRUE); |
d4a72f23 TC |
4774 | } |
4775 | ||
4776 | /* do not relocate this block */ | |
4777 | return (0); | |
4778 | } | |
4779 | ||
4780 | static void | |
4781 | dsl_scan_scrub_done(zio_t *zio) | |
4782 | { | |
4783 | spa_t *spa = zio->io_spa; | |
4784 | blkptr_t *bp = zio->io_bp; | |
4785 | dsl_scan_io_queue_t *queue = zio->io_private; | |
4786 | ||
4787 | abd_free(zio->io_abd); | |
4788 | ||
4789 | if (queue == NULL) { | |
4790 | mutex_enter(&spa->spa_scrub_lock); | |
4791 | ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); | |
4792 | spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); | |
4793 | cv_broadcast(&spa->spa_scrub_io_cv); | |
4794 | mutex_exit(&spa->spa_scrub_lock); | |
4795 | } else { | |
4796 | mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); | |
4797 | ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); | |
4798 | queue->q_inflight_bytes -= BP_GET_PSIZE(bp); | |
4799 | cv_broadcast(&queue->q_zio_cv); | |
4800 | mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); | |
4801 | } | |
4802 | ||
4803 | if (zio->io_error && (zio->io_error != ECKSUM || | |
4804 | !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { | |
482eeef8 GA |
4805 | if (dsl_errorscrubbing(spa->spa_dsl_pool) && |
4806 | !dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) { | |
4807 | atomic_inc_64(&spa->spa_dsl_pool->dp_scan | |
4808 | ->errorscrub_phys.dep_errors); | |
4809 | } else { | |
4810 | atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys | |
4811 | .scn_errors); | |
4812 | } | |
d4a72f23 TC |
4813 | } |
4814 | } | |
428870ff | 4815 | |
d4a72f23 TC |
4816 | /* |
4817 | * Given a scanning zio's information, executes the zio. The zio need | |
4818 | * not necessarily be only sortable, this function simply executes the | |
4819 | * zio, no matter what it is. The optional queue argument allows the | |
4820 | * caller to specify that they want per top level vdev IO rate limiting | |
4821 | * instead of the legacy global limiting. | |
4822 | */ | |
4823 | static void | |
4824 | scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, | |
4825 | const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) | |
4826 | { | |
4827 | spa_t *spa = dp->dp_spa; | |
4828 | dsl_scan_t *scn = dp->dp_scan; | |
4829 | size_t size = BP_GET_PSIZE(bp); | |
4830 | abd_t *data = abd_alloc_for_io(size, B_FALSE); | |
dd867145 | 4831 | zio_t *pio; |
d4a72f23 TC |
4832 | |
4833 | if (queue == NULL) { | |
2041d6ee | 4834 | ASSERT3U(scn->scn_maxinflight_bytes, >, 0); |
428870ff | 4835 | mutex_enter(&spa->spa_scrub_lock); |
d4a72f23 | 4836 | while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) |
428870ff | 4837 | cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); |
d4a72f23 | 4838 | spa->spa_scrub_inflight += BP_GET_PSIZE(bp); |
428870ff | 4839 | mutex_exit(&spa->spa_scrub_lock); |
dd867145 | 4840 | pio = scn->scn_zio_root; |
d4a72f23 TC |
4841 | } else { |
4842 | kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; | |
428870ff | 4843 | |
2041d6ee | 4844 | ASSERT3U(queue->q_maxinflight_bytes, >, 0); |
d4a72f23 TC |
4845 | mutex_enter(q_lock); |
4846 | while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) | |
4847 | cv_wait(&queue->q_zio_cv, q_lock); | |
4848 | queue->q_inflight_bytes += BP_GET_PSIZE(bp); | |
dd867145 | 4849 | pio = queue->q_zio; |
d4a72f23 TC |
4850 | mutex_exit(q_lock); |
4851 | } | |
4852 | ||
dd867145 | 4853 | ASSERT(pio != NULL); |
82732299 | 4854 | count_block_issued(spa, bp, queue == NULL); |
dd867145 AM |
4855 | zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done, |
4856 | queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); | |
d4a72f23 | 4857 | } |
572e2857 | 4858 | |
d4a72f23 TC |
4859 | /* |
4860 | * This is the primary extent sorting algorithm. We balance two parameters: | |
4861 | * 1) how many bytes of I/O are in an extent | |
4862 | * 2) how well the extent is filled with I/O (as a fraction of its total size) | |
4863 | * Since we allow extents to have gaps between their constituent I/Os, it's | |
4864 | * possible to have a fairly large extent that contains the same amount of | |
4865 | * I/O bytes than a much smaller extent, which just packs the I/O more tightly. | |
4866 | * The algorithm sorts based on a score calculated from the extent's size, | |
4867 | * the relative fill volume (in %) and a "fill weight" parameter that controls | |
4868 | * the split between whether we prefer larger extents or more well populated | |
4869 | * extents: | |
4870 | * | |
4871 | * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) | |
4872 | * | |
4873 | * Example: | |
4874 | * 1) assume extsz = 64 MiB | |
4875 | * 2) assume fill = 32 MiB (extent is half full) | |
4876 | * 3) assume fill_weight = 3 | |
4877 | * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 | |
4878 | * SCORE = 32M + (50 * 3 * 32M) / 100 | |
4879 | * SCORE = 32M + (4800M / 100) | |
4880 | * SCORE = 32M + 48M | |
4881 | * ^ ^ | |
4882 | * | +--- final total relative fill-based score | |
4883 | * +--------- final total fill-based score | |
4884 | * SCORE = 80M | |
4885 | * | |
4886 | * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards | |
4887 | * extents that are more completely filled (in a 3:2 ratio) vs just larger. | |
4888 | * Note that as an optimization, we replace multiplication and division by | |
e1cfd73f | 4889 | * 100 with bitshifting by 7 (which effectively multiplies and divides by 128). |
1c0c729a AM |
4890 | * |
4891 | * Since we do not care if one extent is only few percent better than another, | |
4892 | * compress the score into 6 bits via binary logarithm AKA highbit64() and | |
4893 | * put into otherwise unused due to ashift high bits of offset. This allows | |
4894 | * to reduce q_exts_by_size B-tree elements to only 64 bits and compare them | |
4895 | * with single operation. Plus it makes scrubs more sequential and reduces | |
4896 | * chances that minor extent change move it within the B-tree. | |
d4a72f23 | 4897 | */ |
677c6f84 | 4898 | __attribute__((always_inline)) inline |
d4a72f23 TC |
4899 | static int |
4900 | ext_size_compare(const void *x, const void *y) | |
4901 | { | |
1c0c729a AM |
4902 | const uint64_t *a = x, *b = y; |
4903 | ||
4904 | return (TREE_CMP(*a, *b)); | |
4905 | } | |
4906 | ||
677c6f84 RY |
4907 | ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t, |
4908 | ext_size_compare) | |
4909 | ||
1c0c729a AM |
4910 | static void |
4911 | ext_size_create(range_tree_t *rt, void *arg) | |
4912 | { | |
4913 | (void) rt; | |
4914 | zfs_btree_t *size_tree = arg; | |
ca577779 | 4915 | |
677c6f84 RY |
4916 | zfs_btree_create(size_tree, ext_size_compare, ext_size_find_in_buf, |
4917 | sizeof (uint64_t)); | |
1c0c729a | 4918 | } |
d4a72f23 | 4919 | |
1c0c729a AM |
4920 | static void |
4921 | ext_size_destroy(range_tree_t *rt, void *arg) | |
4922 | { | |
4923 | (void) rt; | |
4924 | zfs_btree_t *size_tree = arg; | |
4925 | ASSERT0(zfs_btree_numnodes(size_tree)); | |
d4a72f23 | 4926 | |
1c0c729a AM |
4927 | zfs_btree_destroy(size_tree); |
4928 | } | |
4929 | ||
4930 | static uint64_t | |
4931 | ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg) | |
4932 | { | |
4933 | (void) rt; | |
4934 | uint64_t size = rsg->rs_end - rsg->rs_start; | |
4935 | uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) * | |
4936 | fill_weight * rsg->rs_fill) >> 7); | |
4937 | ASSERT3U(rt->rt_shift, >=, 8); | |
4938 | return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start); | |
d4a72f23 | 4939 | } |
428870ff | 4940 | |
1c0c729a AM |
4941 | static void |
4942 | ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg) | |
4943 | { | |
4944 | zfs_btree_t *size_tree = arg; | |
4945 | ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); | |
4946 | uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); | |
4947 | zfs_btree_add(size_tree, &v); | |
4948 | } | |
4949 | ||
4950 | static void | |
4951 | ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg) | |
4952 | { | |
4953 | zfs_btree_t *size_tree = arg; | |
4954 | ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); | |
4955 | uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); | |
4956 | zfs_btree_remove(size_tree, &v); | |
4957 | } | |
4958 | ||
4959 | static void | |
4960 | ext_size_vacate(range_tree_t *rt, void *arg) | |
4961 | { | |
4962 | zfs_btree_t *size_tree = arg; | |
4963 | zfs_btree_clear(size_tree); | |
4964 | zfs_btree_destroy(size_tree); | |
4965 | ||
4966 | ext_size_create(rt, arg); | |
4967 | } | |
4968 | ||
4969 | static const range_tree_ops_t ext_size_ops = { | |
4970 | .rtop_create = ext_size_create, | |
4971 | .rtop_destroy = ext_size_destroy, | |
4972 | .rtop_add = ext_size_add, | |
4973 | .rtop_remove = ext_size_remove, | |
4974 | .rtop_vacate = ext_size_vacate | |
4975 | }; | |
4976 | ||
d4a72f23 TC |
4977 | /* |
4978 | * Comparator for the q_sios_by_addr tree. Sorting is simply performed | |
4979 | * based on LBA-order (from lowest to highest). | |
4980 | */ | |
4981 | static int | |
4982 | sio_addr_compare(const void *x, const void *y) | |
4983 | { | |
4984 | const scan_io_t *a = x, *b = y; | |
4985 | ||
ca577779 | 4986 | return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b))); |
d4a72f23 TC |
4987 | } |
4988 | ||
4989 | /* IO queues are created on demand when they are needed. */ | |
4990 | static dsl_scan_io_queue_t * | |
4991 | scan_io_queue_create(vdev_t *vd) | |
4992 | { | |
4993 | dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; | |
4994 | dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); | |
4995 | ||
4996 | q->q_scn = scn; | |
4997 | q->q_vd = vd; | |
ab7615d9 | 4998 | q->q_sio_memused = 0; |
1c0c729a | 4999 | q->q_last_ext_addr = -1; |
d4a72f23 | 5000 | cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); |
1c0c729a AM |
5001 | q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP, |
5002 | &q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap); | |
d4a72f23 TC |
5003 | avl_create(&q->q_sios_by_addr, sio_addr_compare, |
5004 | sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); | |
5005 | ||
5006 | return (q); | |
428870ff BB |
5007 | } |
5008 | ||
0ea05c64 | 5009 | /* |
d4a72f23 TC |
5010 | * Destroys a scan queue and all segments and scan_io_t's contained in it. |
5011 | * No further execution of I/O occurs, anything pending in the queue is | |
5012 | * simply freed without being executed. | |
0ea05c64 | 5013 | */ |
d4a72f23 TC |
5014 | void |
5015 | dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) | |
428870ff | 5016 | { |
d4a72f23 TC |
5017 | dsl_scan_t *scn = queue->q_scn; |
5018 | scan_io_t *sio; | |
5019 | void *cookie = NULL; | |
d4a72f23 TC |
5020 | |
5021 | ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); | |
5022 | ||
1c0c729a AM |
5023 | if (!avl_is_empty(&queue->q_sios_by_addr)) |
5024 | atomic_add_64(&scn->scn_queues_pending, -1); | |
d4a72f23 TC |
5025 | while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != |
5026 | NULL) { | |
5027 | ASSERT(range_tree_contains(queue->q_exts_by_addr, | |
ab7615d9 | 5028 | SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); |
ab7615d9 TC |
5029 | queue->q_sio_memused -= SIO_GET_MUSED(sio); |
5030 | sio_free(sio); | |
d4a72f23 | 5031 | } |
428870ff | 5032 | |
ab7615d9 | 5033 | ASSERT0(queue->q_sio_memused); |
d4a72f23 TC |
5034 | range_tree_vacate(queue->q_exts_by_addr, NULL, queue); |
5035 | range_tree_destroy(queue->q_exts_by_addr); | |
5036 | avl_destroy(&queue->q_sios_by_addr); | |
5037 | cv_destroy(&queue->q_zio_cv); | |
428870ff | 5038 | |
d4a72f23 TC |
5039 | kmem_free(queue, sizeof (*queue)); |
5040 | } | |
0ea05c64 | 5041 | |
d4a72f23 TC |
5042 | /* |
5043 | * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is | |
5044 | * called on behalf of vdev_top_transfer when creating or destroying | |
5045 | * a mirror vdev due to zpool attach/detach. | |
5046 | */ | |
5047 | void | |
5048 | dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) | |
5049 | { | |
5050 | mutex_enter(&svd->vdev_scan_io_queue_lock); | |
5051 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
5052 | ||
5053 | VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); | |
5054 | tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; | |
5055 | svd->vdev_scan_io_queue = NULL; | |
a1d477c2 | 5056 | if (tvd->vdev_scan_io_queue != NULL) |
d4a72f23 | 5057 | tvd->vdev_scan_io_queue->q_vd = tvd; |
0ea05c64 | 5058 | |
d4a72f23 TC |
5059 | mutex_exit(&tvd->vdev_scan_io_queue_lock); |
5060 | mutex_exit(&svd->vdev_scan_io_queue_lock); | |
428870ff | 5061 | } |
c409e464 | 5062 | |
d4a72f23 TC |
5063 | static void |
5064 | scan_io_queues_destroy(dsl_scan_t *scn) | |
784d15c1 | 5065 | { |
d4a72f23 TC |
5066 | vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; |
5067 | ||
5068 | for (uint64_t i = 0; i < rvd->vdev_children; i++) { | |
5069 | vdev_t *tvd = rvd->vdev_child[i]; | |
5070 | ||
5071 | mutex_enter(&tvd->vdev_scan_io_queue_lock); | |
5072 | if (tvd->vdev_scan_io_queue != NULL) | |
5073 | dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); | |
5074 | tvd->vdev_scan_io_queue = NULL; | |
5075 | mutex_exit(&tvd->vdev_scan_io_queue_lock); | |
5076 | } | |
784d15c1 NR |
5077 | } |
5078 | ||
d4a72f23 TC |
5079 | static void |
5080 | dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) | |
5081 | { | |
5082 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
5083 | dsl_scan_t *scn = dp->dp_scan; | |
5084 | vdev_t *vdev; | |
5085 | kmutex_t *q_lock; | |
5086 | dsl_scan_io_queue_t *queue; | |
ab7615d9 | 5087 | scan_io_t *srch_sio, *sio; |
d4a72f23 TC |
5088 | avl_index_t idx; |
5089 | uint64_t start, size; | |
5090 | ||
5091 | vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); | |
5092 | ASSERT(vdev != NULL); | |
5093 | q_lock = &vdev->vdev_scan_io_queue_lock; | |
5094 | queue = vdev->vdev_scan_io_queue; | |
5095 | ||
5096 | mutex_enter(q_lock); | |
5097 | if (queue == NULL) { | |
5098 | mutex_exit(q_lock); | |
5099 | return; | |
5100 | } | |
5101 | ||
ab7615d9 TC |
5102 | srch_sio = sio_alloc(BP_GET_NDVAS(bp)); |
5103 | bp2sio(bp, srch_sio, dva_i); | |
5104 | start = SIO_GET_OFFSET(srch_sio); | |
5105 | size = SIO_GET_ASIZE(srch_sio); | |
d4a72f23 TC |
5106 | |
5107 | /* | |
5108 | * We can find the zio in two states: | |
5109 | * 1) Cold, just sitting in the queue of zio's to be issued at | |
5110 | * some point in the future. In this case, all we do is | |
5111 | * remove the zio from the q_sios_by_addr tree, decrement | |
5112 | * its data volume from the containing range_seg_t and | |
5113 | * resort the q_exts_by_size tree to reflect that the | |
5114 | * range_seg_t has lost some of its 'fill'. We don't shorten | |
5115 | * the range_seg_t - this is usually rare enough not to be | |
5116 | * worth the extra hassle of trying keep track of precise | |
5117 | * extent boundaries. | |
5118 | * 2) Hot, where the zio is currently in-flight in | |
5119 | * dsl_scan_issue_ios. In this case, we can't simply | |
5120 | * reach in and stop the in-flight zio's, so we instead | |
5121 | * block the caller. Eventually, dsl_scan_issue_ios will | |
5122 | * be done with issuing the zio's it gathered and will | |
5123 | * signal us. | |
5124 | */ | |
ab7615d9 TC |
5125 | sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); |
5126 | sio_free(srch_sio); | |
5127 | ||
d4a72f23 | 5128 | if (sio != NULL) { |
d4a72f23 TC |
5129 | blkptr_t tmpbp; |
5130 | ||
5131 | /* Got it while it was cold in the queue */ | |
ab7615d9 | 5132 | ASSERT3U(start, ==, SIO_GET_OFFSET(sio)); |
1c0c729a | 5133 | ASSERT3U(size, ==, SIO_GET_ASIZE(sio)); |
d4a72f23 | 5134 | avl_remove(&queue->q_sios_by_addr, sio); |
1c0c729a AM |
5135 | if (avl_is_empty(&queue->q_sios_by_addr)) |
5136 | atomic_add_64(&scn->scn_queues_pending, -1); | |
ab7615d9 | 5137 | queue->q_sio_memused -= SIO_GET_MUSED(sio); |
c409e464 | 5138 | |
d4a72f23 TC |
5139 | ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); |
5140 | range_tree_remove_fill(queue->q_exts_by_addr, start, size); | |
5141 | ||
fa7b2390 | 5142 | /* count the block as though we skipped it */ |
ab7615d9 | 5143 | sio2bp(sio, &tmpbp); |
fa7b2390 | 5144 | count_block_skipped(scn, &tmpbp, B_FALSE); |
c409e464 | 5145 | |
ab7615d9 | 5146 | sio_free(sio); |
d4a72f23 TC |
5147 | } |
5148 | mutex_exit(q_lock); | |
5149 | } | |
c409e464 | 5150 | |
d4a72f23 TC |
5151 | /* |
5152 | * Callback invoked when a zio_free() zio is executing. This needs to be | |
5153 | * intercepted to prevent the zio from deallocating a particular portion | |
5154 | * of disk space and it then getting reallocated and written to, while we | |
5155 | * still have it queued up for processing. | |
5156 | */ | |
5157 | void | |
5158 | dsl_scan_freed(spa_t *spa, const blkptr_t *bp) | |
5159 | { | |
5160 | dsl_pool_t *dp = spa->spa_dsl_pool; | |
5161 | dsl_scan_t *scn = dp->dp_scan; | |
5162 | ||
5163 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
5164 | ASSERT(scn != NULL); | |
5165 | if (!dsl_scan_is_running(scn)) | |
5166 | return; | |
5167 | ||
5168 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) | |
5169 | dsl_scan_freed_dva(spa, bp, i); | |
5170 | } | |
5171 | ||
3c819a2c JP |
5172 | /* |
5173 | * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has | |
5174 | * not started, start it. Otherwise, only restart if max txg in DTL range is | |
5175 | * greater than the max txg in the current scan. If the DTL max is less than | |
5176 | * the scan max, then the vdev has not missed any new data since the resilver | |
5177 | * started, so a restart is not needed. | |
5178 | */ | |
5179 | void | |
5180 | dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd) | |
5181 | { | |
5182 | uint64_t min, max; | |
5183 | ||
5184 | if (!vdev_resilver_needed(vd, &min, &max)) | |
5185 | return; | |
5186 | ||
5187 | if (!dsl_scan_resilvering(dp)) { | |
5188 | spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); | |
5189 | return; | |
5190 | } | |
5191 | ||
5192 | if (max <= dp->dp_scan->scn_phys.scn_max_txg) | |
5193 | return; | |
5194 | ||
5195 | /* restart is needed, check if it can be deferred */ | |
5196 | if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) | |
5197 | vdev_defer_resilver(vd); | |
5198 | else | |
5199 | spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); | |
5200 | } | |
5201 | ||
ab8d9c17 | 5202 | ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW, |
d4a72f23 TC |
5203 | "Max bytes in flight per leaf vdev for scrubs and resilvers"); |
5204 | ||
fdc2d303 | 5205 | ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5206 | "Min millisecs to scrub per txg"); |
c409e464 | 5207 | |
fdc2d303 | 5208 | ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5209 | "Min millisecs to obsolete per txg"); |
a1d477c2 | 5210 | |
fdc2d303 | 5211 | ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5212 | "Min millisecs to free per txg"); |
c409e464 | 5213 | |
fdc2d303 | 5214 | ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW, |
03fdcb9a | 5215 | "Min millisecs to resilver per txg"); |
c409e464 | 5216 | |
03fdcb9a | 5217 | ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW, |
cef48f14 TC |
5218 | "Set to prevent scans from progressing"); |
5219 | ||
03fdcb9a MM |
5220 | ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW, |
5221 | "Set to disable scrub I/O"); | |
c409e464 | 5222 | |
03fdcb9a MM |
5223 | ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW, |
5224 | "Set to disable scrub prefetching"); | |
36283ca2 | 5225 | |
ab8d9c17 | 5226 | ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW, |
a1d477c2 | 5227 | "Max number of blocks freed in one txg"); |
ba5ad9a4 | 5228 | |
ab8d9c17 | 5229 | ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW, |
4fe3a842 MA |
5230 | "Max number of dedup blocks freed in one txg"); |
5231 | ||
03fdcb9a MM |
5232 | ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW, |
5233 | "Enable processing of the free_bpobj"); | |
d4a72f23 | 5234 | |
82732299 AM |
5235 | ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW, |
5236 | "Enable block statistics calculation during scrub"); | |
5237 | ||
fdc2d303 | 5238 | ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW, |
03fdcb9a | 5239 | "Fraction of RAM for scan hard limit"); |
d4a72f23 | 5240 | |
fdc2d303 | 5241 | ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW, |
7ada752a | 5242 | "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size"); |
d4a72f23 | 5243 | |
03fdcb9a MM |
5244 | ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW, |
5245 | "Scrub using legacy non-sequential method"); | |
d4a72f23 | 5246 | |
fdc2d303 | 5247 | ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW, |
d4a72f23 TC |
5248 | "Scan progress on-disk checkpointing interval"); |
5249 | ||
ab8d9c17 | 5250 | ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW, |
63f88c12 | 5251 | "Max gap in bytes between sequential scrub / resilver I/Os"); |
5252 | ||
fdc2d303 | 5253 | ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW, |
d4a72f23 TC |
5254 | "Fraction of hard limit used as soft limit"); |
5255 | ||
03fdcb9a | 5256 | ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW, |
d4a72f23 TC |
5257 | "Tunable to attempt to reduce lock contention"); |
5258 | ||
fdc2d303 | 5259 | ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW, |
d4a72f23 | 5260 | "Tunable to adjust bias towards more filled segments during scans"); |
80a91e74 | 5261 | |
c85ac731 BB |
5262 | ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW, |
5263 | "Tunable to report resilver performance over the last N txgs"); | |
5264 | ||
03fdcb9a | 5265 | ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW, |
80a91e74 | 5266 | "Process all resilvers immediately"); |
482eeef8 | 5267 | |
bb736d98 | 5268 | ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW, |
482eeef8 GA |
5269 | "Error blocks to be scrubbed in one txg"); |
5270 | /* END CSTYLED */ |