]>
Commit | Line | Data |
---|---|---|
a1d477c2 MA |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * This file and its contents are supplied under the terms of the | |
5 | * Common Development and Distribution License ("CDDL"), version 1.0. | |
6 | * You may only use this file in accordance with the terms of version | |
7 | * 1.0 of the CDDL. | |
8 | * | |
9 | * A full copy of the text of the CDDL should have accompanied this | |
10 | * source. A copy of the CDDL is also available via the Internet at | |
11 | * http://www.illumos.org/license/CDDL. | |
12 | * | |
13 | * CDDL HEADER END | |
14 | */ | |
15 | ||
16 | /* | |
4bf8108e | 17 | * Copyright (c) 2014, 2017 by Delphix. All rights reserved. |
a1d477c2 MA |
18 | */ |
19 | ||
20 | #include <sys/zfs_context.h> | |
21 | #include <sys/spa.h> | |
22 | #include <sys/spa_impl.h> | |
23 | #include <sys/vdev_impl.h> | |
24 | #include <sys/fs/zfs.h> | |
25 | #include <sys/zio.h> | |
9e052db4 | 26 | #include <sys/zio_checksum.h> |
a1d477c2 MA |
27 | #include <sys/metaslab.h> |
28 | #include <sys/refcount.h> | |
29 | #include <sys/dmu.h> | |
30 | #include <sys/vdev_indirect_mapping.h> | |
31 | #include <sys/dmu_tx.h> | |
32 | #include <sys/dsl_synctask.h> | |
33 | #include <sys/zap.h> | |
9d5b5245 SD |
34 | #include <sys/abd.h> |
35 | #include <sys/zthr.h> | |
a1d477c2 MA |
36 | |
37 | /* | |
38 | * An indirect vdev corresponds to a vdev that has been removed. Since | |
39 | * we cannot rewrite block pointers of snapshots, etc., we keep a | |
40 | * mapping from old location on the removed device to the new location | |
41 | * on another device in the pool and use this mapping whenever we need | |
42 | * to access the DVA. Unfortunately, this mapping did not respect | |
43 | * logical block boundaries when it was first created, and so a DVA on | |
44 | * this indirect vdev may be "split" into multiple sections that each | |
45 | * map to a different location. As a consequence, not all DVAs can be | |
46 | * translated to an equivalent new DVA. Instead we must provide a | |
47 | * "vdev_remap" operation that executes a callback on each contiguous | |
48 | * segment of the new location. This function is used in multiple ways: | |
49 | * | |
9e052db4 MA |
50 | * - i/os to this vdev use the callback to determine where the |
51 | * data is now located, and issue child i/os for each segment's new | |
52 | * location. | |
a1d477c2 | 53 | * |
9e052db4 | 54 | * - frees and claims to this vdev use the callback to free or claim |
a1d477c2 MA |
55 | * each mapped segment. (Note that we don't actually need to claim |
56 | * log blocks on indirect vdevs, because we don't allocate to | |
57 | * removing vdevs. However, zdb uses zio_claim() for its leak | |
58 | * detection.) | |
59 | */ | |
60 | ||
61 | /* | |
62 | * "Big theory statement" for how we mark blocks obsolete. | |
63 | * | |
64 | * When a block on an indirect vdev is freed or remapped, a section of | |
65 | * that vdev's mapping may no longer be referenced (aka "obsolete"). We | |
66 | * keep track of how much of each mapping entry is obsolete. When | |
67 | * an entry becomes completely obsolete, we can remove it, thus reducing | |
68 | * the memory used by the mapping. The complete picture of obsolescence | |
69 | * is given by the following data structures, described below: | |
70 | * - the entry-specific obsolete count | |
71 | * - the vdev-specific obsolete spacemap | |
72 | * - the pool-specific obsolete bpobj | |
73 | * | |
74 | * == On disk data structures used == | |
75 | * | |
76 | * We track the obsolete space for the pool using several objects. Each | |
77 | * of these objects is created on demand and freed when no longer | |
78 | * needed, and is assumed to be empty if it does not exist. | |
79 | * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects. | |
80 | * | |
81 | * - Each vic_mapping_object (associated with an indirect vdev) can | |
82 | * have a vimp_counts_object. This is an array of uint32_t's | |
83 | * with the same number of entries as the vic_mapping_object. When | |
84 | * the mapping is condensed, entries from the vic_obsolete_sm_object | |
85 | * (see below) are folded into the counts. Therefore, each | |
86 | * obsolete_counts entry tells us the number of bytes in the | |
87 | * corresponding mapping entry that were not referenced when the | |
88 | * mapping was last condensed. | |
89 | * | |
90 | * - Each indirect or removing vdev can have a vic_obsolete_sm_object. | |
91 | * This is a space map containing an alloc entry for every DVA that | |
92 | * has been obsoleted since the last time this indirect vdev was | |
93 | * condensed. We use this object in order to improve performance | |
94 | * when marking a DVA as obsolete. Instead of modifying an arbitrary | |
95 | * offset of the vimp_counts_object, we only need to append an entry | |
96 | * to the end of this object. When a DVA becomes obsolete, it is | |
97 | * added to the obsolete space map. This happens when the DVA is | |
98 | * freed, remapped and not referenced by a snapshot, or the last | |
99 | * snapshot referencing it is destroyed. | |
100 | * | |
101 | * - Each dataset can have a ds_remap_deadlist object. This is a | |
102 | * deadlist object containing all blocks that were remapped in this | |
103 | * dataset but referenced in a previous snapshot. Blocks can *only* | |
104 | * appear on this list if they were remapped (dsl_dataset_block_remapped); | |
105 | * blocks that were killed in a head dataset are put on the normal | |
106 | * ds_deadlist and marked obsolete when they are freed. | |
107 | * | |
108 | * - The pool can have a dp_obsolete_bpobj. This is a list of blocks | |
109 | * in the pool that need to be marked obsolete. When a snapshot is | |
110 | * destroyed, we move some of the ds_remap_deadlist to the obsolete | |
111 | * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then | |
112 | * asynchronously process the obsolete bpobj, moving its entries to | |
113 | * the specific vdevs' obsolete space maps. | |
114 | * | |
115 | * == Summary of how we mark blocks as obsolete == | |
116 | * | |
117 | * - When freeing a block: if any DVA is on an indirect vdev, append to | |
118 | * vic_obsolete_sm_object. | |
119 | * - When remapping a block, add dva to ds_remap_deadlist (if prev snap | |
120 | * references; otherwise append to vic_obsolete_sm_object). | |
121 | * - When freeing a snapshot: move parts of ds_remap_deadlist to | |
122 | * dp_obsolete_bpobj (same algorithm as ds_deadlist). | |
123 | * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to | |
124 | * individual vdev's vic_obsolete_sm_object. | |
125 | */ | |
126 | ||
127 | /* | |
128 | * "Big theory statement" for how we condense indirect vdevs. | |
129 | * | |
130 | * Condensing an indirect vdev's mapping is the process of determining | |
131 | * the precise counts of obsolete space for each mapping entry (by | |
132 | * integrating the obsolete spacemap into the obsolete counts) and | |
133 | * writing out a new mapping that contains only referenced entries. | |
134 | * | |
135 | * We condense a vdev when we expect the mapping to shrink (see | |
136 | * vdev_indirect_should_condense()), but only perform one condense at a | |
137 | * time to limit the memory usage. In addition, we use a separate | |
138 | * open-context thread (spa_condense_indirect_thread) to incrementally | |
139 | * create the new mapping object in a way that minimizes the impact on | |
140 | * the rest of the system. | |
141 | * | |
142 | * == Generating a new mapping == | |
143 | * | |
144 | * To generate a new mapping, we follow these steps: | |
145 | * | |
146 | * 1. Save the old obsolete space map and create a new mapping object | |
147 | * (see spa_condense_indirect_start_sync()). This initializes the | |
148 | * spa_condensing_indirect_phys with the "previous obsolete space map", | |
149 | * which is now read only. Newly obsolete DVAs will be added to a | |
150 | * new (initially empty) obsolete space map, and will not be | |
151 | * considered as part of this condense operation. | |
152 | * | |
153 | * 2. Construct in memory the precise counts of obsolete space for each | |
154 | * mapping entry, by incorporating the obsolete space map into the | |
155 | * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().) | |
156 | * | |
157 | * 3. Iterate through each mapping entry, writing to the new mapping any | |
158 | * entries that are not completely obsolete (i.e. which don't have | |
159 | * obsolete count == mapping length). (See | |
160 | * spa_condense_indirect_generate_new_mapping().) | |
161 | * | |
162 | * 4. Destroy the old mapping object and switch over to the new one | |
163 | * (spa_condense_indirect_complete_sync). | |
164 | * | |
165 | * == Restarting from failure == | |
166 | * | |
167 | * To restart the condense when we import/open the pool, we must start | |
168 | * at the 2nd step above: reconstruct the precise counts in memory, | |
169 | * based on the space map + counts. Then in the 3rd step, we start | |
170 | * iterating where we left off: at vimp_max_offset of the new mapping | |
171 | * object. | |
172 | */ | |
173 | ||
0dc2f70c | 174 | int zfs_condense_indirect_vdevs_enable = B_TRUE; |
a1d477c2 MA |
175 | |
176 | /* | |
177 | * Condense if at least this percent of the bytes in the mapping is | |
178 | * obsolete. With the default of 25%, the amount of space mapped | |
179 | * will be reduced to 1% of its original size after at most 16 | |
180 | * condenses. Higher values will condense less often (causing less | |
181 | * i/o); lower values will reduce the mapping size more quickly. | |
182 | */ | |
183 | int zfs_indirect_condense_obsolete_pct = 25; | |
184 | ||
185 | /* | |
186 | * Condense if the obsolete space map takes up more than this amount of | |
187 | * space on disk (logically). This limits the amount of disk space | |
188 | * consumed by the obsolete space map; the default of 1GB is small enough | |
189 | * that we typically don't mind "wasting" it. | |
190 | */ | |
0dc2f70c | 191 | unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; |
a1d477c2 MA |
192 | |
193 | /* | |
194 | * Don't bother condensing if the mapping uses less than this amount of | |
195 | * memory. The default of 128KB is considered a "trivial" amount of | |
196 | * memory and not worth reducing. | |
197 | */ | |
198 | unsigned long zfs_condense_min_mapping_bytes = 128 * 1024; | |
199 | ||
200 | /* | |
201 | * This is used by the test suite so that it can ensure that certain | |
202 | * actions happen while in the middle of a condense (which might otherwise | |
203 | * complete too quickly). If used to reduce the performance impact of | |
204 | * condensing in production, a maximum value of 1 should be sufficient. | |
205 | */ | |
206 | int zfs_condense_indirect_commit_entry_delay_ms = 0; | |
207 | ||
9e052db4 | 208 | /* |
4589f3ae BB |
209 | * If an indirect split block contains more than this many possible unique |
210 | * combinations when being reconstructed, consider it too computationally | |
211 | * expensive to check them all. Instead, try at most 100 randomly-selected | |
212 | * combinations each time the block is accessed. This allows all segment | |
213 | * copies to participate fairly in the reconstruction when all combinations | |
214 | * cannot be checked and prevents repeated use of one bad copy. | |
9e052db4 | 215 | */ |
64bdf63f | 216 | int zfs_reconstruct_indirect_combinations_max = 4096; |
1258bd77 BB |
217 | |
218 | /* | |
219 | * Enable to simulate damaged segments and validate reconstruction. This | |
220 | * is intentionally not exposed as a module parameter. | |
221 | */ | |
222 | unsigned long zfs_reconstruct_indirect_damage_fraction = 0; | |
9e052db4 MA |
223 | |
224 | /* | |
225 | * The indirect_child_t represents the vdev that we will read from, when we | |
226 | * need to read all copies of the data (e.g. for scrub or reconstruction). | |
227 | * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), | |
228 | * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, | |
229 | * ic_vdev is a child of the mirror. | |
230 | */ | |
231 | typedef struct indirect_child { | |
232 | abd_t *ic_data; | |
233 | vdev_t *ic_vdev; | |
4589f3ae BB |
234 | |
235 | /* | |
1258bd77 BB |
236 | * ic_duplicate is NULL when the ic_data contents are unique, when it |
237 | * is determined to be a duplicate it references the primary child. | |
4589f3ae | 238 | */ |
1258bd77 BB |
239 | struct indirect_child *ic_duplicate; |
240 | list_node_t ic_node; /* node on is_unique_child */ | |
9e052db4 MA |
241 | } indirect_child_t; |
242 | ||
243 | /* | |
244 | * The indirect_split_t represents one mapped segment of an i/o to the | |
245 | * indirect vdev. For non-split (contiguously-mapped) blocks, there will be | |
246 | * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. | |
247 | * For split blocks, there will be several of these. | |
248 | */ | |
249 | typedef struct indirect_split { | |
250 | list_node_t is_node; /* link on iv_splits */ | |
251 | ||
252 | /* | |
253 | * is_split_offset is the offset into the i/o. | |
254 | * This is the sum of the previous splits' is_size's. | |
255 | */ | |
256 | uint64_t is_split_offset; | |
257 | ||
258 | vdev_t *is_vdev; /* top-level vdev */ | |
259 | uint64_t is_target_offset; /* offset on is_vdev */ | |
260 | uint64_t is_size; | |
261 | int is_children; /* number of entries in is_child[] */ | |
1258bd77 BB |
262 | int is_unique_children; /* number of entries in is_unique_child */ |
263 | list_t is_unique_child; | |
9e052db4 MA |
264 | |
265 | /* | |
266 | * is_good_child is the child that we are currently using to | |
267 | * attempt reconstruction. | |
268 | */ | |
1258bd77 | 269 | indirect_child_t *is_good_child; |
9e052db4 MA |
270 | |
271 | indirect_child_t is_child[1]; /* variable-length */ | |
272 | } indirect_split_t; | |
273 | ||
274 | /* | |
275 | * The indirect_vsd_t is associated with each i/o to the indirect vdev. | |
276 | * It is the "Vdev-Specific Data" in the zio_t's io_vsd. | |
277 | */ | |
278 | typedef struct indirect_vsd { | |
279 | boolean_t iv_split_block; | |
280 | boolean_t iv_reconstruct; | |
1258bd77 BB |
281 | uint64_t iv_unique_combinations; |
282 | uint64_t iv_attempts; | |
283 | uint64_t iv_attempts_max; | |
9e052db4 MA |
284 | |
285 | list_t iv_splits; /* list of indirect_split_t's */ | |
286 | } indirect_vsd_t; | |
287 | ||
288 | static void | |
289 | vdev_indirect_map_free(zio_t *zio) | |
290 | { | |
291 | indirect_vsd_t *iv = zio->io_vsd; | |
292 | ||
293 | indirect_split_t *is; | |
294 | while ((is = list_head(&iv->iv_splits)) != NULL) { | |
295 | for (int c = 0; c < is->is_children; c++) { | |
296 | indirect_child_t *ic = &is->is_child[c]; | |
297 | if (ic->ic_data != NULL) | |
298 | abd_free(ic->ic_data); | |
299 | } | |
300 | list_remove(&iv->iv_splits, is); | |
1258bd77 BB |
301 | |
302 | indirect_child_t *ic; | |
303 | while ((ic = list_head(&is->is_unique_child)) != NULL) | |
304 | list_remove(&is->is_unique_child, ic); | |
305 | ||
306 | list_destroy(&is->is_unique_child); | |
307 | ||
9e052db4 MA |
308 | kmem_free(is, |
309 | offsetof(indirect_split_t, is_child[is->is_children])); | |
310 | } | |
311 | kmem_free(iv, sizeof (*iv)); | |
312 | } | |
313 | ||
314 | static const zio_vsd_ops_t vdev_indirect_vsd_ops = { | |
089500e7 MW |
315 | .vsd_free = vdev_indirect_map_free, |
316 | .vsd_cksum_report = zio_vsd_default_cksum_report | |
9e052db4 MA |
317 | }; |
318 | ||
a1d477c2 | 319 | /* |
d2734cce | 320 | * Mark the given offset and size as being obsolete. |
a1d477c2 MA |
321 | */ |
322 | void | |
d2734cce | 323 | vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size) |
a1d477c2 MA |
324 | { |
325 | spa_t *spa = vd->vdev_spa; | |
d2734cce | 326 | |
a1d477c2 MA |
327 | ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0); |
328 | ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); | |
329 | ASSERT(size > 0); | |
330 | VERIFY(vdev_indirect_mapping_entry_for_offset( | |
331 | vd->vdev_indirect_mapping, offset) != NULL); | |
332 | ||
333 | if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { | |
334 | mutex_enter(&vd->vdev_obsolete_lock); | |
335 | range_tree_add(vd->vdev_obsolete_segments, offset, size); | |
336 | mutex_exit(&vd->vdev_obsolete_lock); | |
d2734cce | 337 | vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa)); |
a1d477c2 MA |
338 | } |
339 | } | |
340 | ||
341 | /* | |
342 | * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This | |
343 | * wrapper is provided because the DMU does not know about vdev_t's and | |
344 | * cannot directly call vdev_indirect_mark_obsolete. | |
345 | */ | |
346 | void | |
347 | spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset, | |
348 | uint64_t size, dmu_tx_t *tx) | |
349 | { | |
350 | vdev_t *vd = vdev_lookup_top(spa, vdev_id); | |
351 | ASSERT(dmu_tx_is_syncing(tx)); | |
352 | ||
353 | /* The DMU can only remap indirect vdevs. */ | |
354 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
d2734cce | 355 | vdev_indirect_mark_obsolete(vd, offset, size); |
a1d477c2 MA |
356 | } |
357 | ||
358 | static spa_condensing_indirect_t * | |
359 | spa_condensing_indirect_create(spa_t *spa) | |
360 | { | |
361 | spa_condensing_indirect_phys_t *scip = | |
362 | &spa->spa_condensing_indirect_phys; | |
363 | spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP); | |
364 | objset_t *mos = spa->spa_meta_objset; | |
365 | ||
366 | for (int i = 0; i < TXG_SIZE; i++) { | |
367 | list_create(&sci->sci_new_mapping_entries[i], | |
368 | sizeof (vdev_indirect_mapping_entry_t), | |
369 | offsetof(vdev_indirect_mapping_entry_t, vime_node)); | |
370 | } | |
371 | ||
372 | sci->sci_new_mapping = | |
373 | vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object); | |
374 | ||
375 | return (sci); | |
376 | } | |
377 | ||
378 | static void | |
379 | spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci) | |
380 | { | |
381 | for (int i = 0; i < TXG_SIZE; i++) | |
382 | list_destroy(&sci->sci_new_mapping_entries[i]); | |
383 | ||
384 | if (sci->sci_new_mapping != NULL) | |
385 | vdev_indirect_mapping_close(sci->sci_new_mapping); | |
386 | ||
387 | kmem_free(sci, sizeof (*sci)); | |
388 | } | |
389 | ||
390 | boolean_t | |
391 | vdev_indirect_should_condense(vdev_t *vd) | |
392 | { | |
393 | vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; | |
394 | spa_t *spa = vd->vdev_spa; | |
395 | ||
396 | ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool)); | |
397 | ||
398 | if (!zfs_condense_indirect_vdevs_enable) | |
399 | return (B_FALSE); | |
400 | ||
401 | /* | |
402 | * We can only condense one indirect vdev at a time. | |
403 | */ | |
404 | if (spa->spa_condensing_indirect != NULL) | |
405 | return (B_FALSE); | |
406 | ||
407 | if (spa_shutting_down(spa)) | |
408 | return (B_FALSE); | |
409 | ||
410 | /* | |
411 | * The mapping object size must not change while we are | |
412 | * condensing, so we can only condense indirect vdevs | |
413 | * (not vdevs that are still in the middle of being removed). | |
414 | */ | |
415 | if (vd->vdev_ops != &vdev_indirect_ops) | |
416 | return (B_FALSE); | |
417 | ||
418 | /* | |
419 | * If nothing new has been marked obsolete, there is no | |
420 | * point in condensing. | |
421 | */ | |
27f80e85 BB |
422 | ASSERTV(uint64_t obsolete_sm_obj); |
423 | ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj)); | |
a1d477c2 | 424 | if (vd->vdev_obsolete_sm == NULL) { |
27f80e85 | 425 | ASSERT0(obsolete_sm_obj); |
a1d477c2 MA |
426 | return (B_FALSE); |
427 | } | |
428 | ||
429 | ASSERT(vd->vdev_obsolete_sm != NULL); | |
430 | ||
27f80e85 | 431 | ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm)); |
a1d477c2 MA |
432 | |
433 | uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim); | |
434 | uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm); | |
435 | uint64_t mapping_size = vdev_indirect_mapping_size(vim); | |
436 | uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm); | |
437 | ||
438 | ASSERT3U(bytes_obsolete, <=, bytes_mapped); | |
439 | ||
440 | /* | |
441 | * If a high percentage of the bytes that are mapped have become | |
442 | * obsolete, condense (unless the mapping is already small enough). | |
443 | * This has a good chance of reducing the amount of memory used | |
444 | * by the mapping. | |
445 | */ | |
446 | if (bytes_obsolete * 100 / bytes_mapped >= | |
447 | zfs_indirect_condense_obsolete_pct && | |
448 | mapping_size > zfs_condense_min_mapping_bytes) { | |
449 | zfs_dbgmsg("should condense vdev %llu because obsolete " | |
450 | "spacemap covers %d%% of %lluMB mapping", | |
451 | (u_longlong_t)vd->vdev_id, | |
452 | (int)(bytes_obsolete * 100 / bytes_mapped), | |
453 | (u_longlong_t)bytes_mapped / 1024 / 1024); | |
454 | return (B_TRUE); | |
455 | } | |
456 | ||
457 | /* | |
458 | * If the obsolete space map takes up too much space on disk, | |
459 | * condense in order to free up this disk space. | |
460 | */ | |
461 | if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) { | |
462 | zfs_dbgmsg("should condense vdev %llu because obsolete sm " | |
463 | "length %lluMB >= max size %lluMB", | |
464 | (u_longlong_t)vd->vdev_id, | |
465 | (u_longlong_t)obsolete_sm_size / 1024 / 1024, | |
466 | (u_longlong_t)zfs_condense_max_obsolete_bytes / | |
467 | 1024 / 1024); | |
468 | return (B_TRUE); | |
469 | } | |
470 | ||
471 | return (B_FALSE); | |
472 | } | |
473 | ||
474 | /* | |
475 | * This sync task completes (finishes) a condense, deleting the old | |
476 | * mapping and replacing it with the new one. | |
477 | */ | |
478 | static void | |
479 | spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx) | |
480 | { | |
481 | spa_condensing_indirect_t *sci = arg; | |
482 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
483 | spa_condensing_indirect_phys_t *scip = | |
484 | &spa->spa_condensing_indirect_phys; | |
485 | vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev); | |
486 | vdev_indirect_config_t *vic = &vd->vdev_indirect_config; | |
487 | objset_t *mos = spa->spa_meta_objset; | |
488 | vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; | |
489 | uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping); | |
490 | uint64_t new_count = | |
491 | vdev_indirect_mapping_num_entries(sci->sci_new_mapping); | |
492 | ||
493 | ASSERT(dmu_tx_is_syncing(tx)); | |
494 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
495 | ASSERT3P(sci, ==, spa->spa_condensing_indirect); | |
496 | for (int i = 0; i < TXG_SIZE; i++) { | |
497 | ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); | |
498 | } | |
499 | ASSERT(vic->vic_mapping_object != 0); | |
500 | ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); | |
501 | ASSERT(scip->scip_next_mapping_object != 0); | |
502 | ASSERT(scip->scip_prev_obsolete_sm_object != 0); | |
503 | ||
504 | /* | |
505 | * Reset vdev_indirect_mapping to refer to the new object. | |
506 | */ | |
507 | rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER); | |
508 | vdev_indirect_mapping_close(vd->vdev_indirect_mapping); | |
509 | vd->vdev_indirect_mapping = sci->sci_new_mapping; | |
510 | rw_exit(&vd->vdev_indirect_rwlock); | |
511 | ||
512 | sci->sci_new_mapping = NULL; | |
513 | vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); | |
514 | vic->vic_mapping_object = scip->scip_next_mapping_object; | |
515 | scip->scip_next_mapping_object = 0; | |
516 | ||
517 | space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx); | |
518 | spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
519 | scip->scip_prev_obsolete_sm_object = 0; | |
520 | ||
521 | scip->scip_vdev = 0; | |
522 | ||
523 | VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, | |
524 | DMU_POOL_CONDENSING_INDIRECT, tx)); | |
525 | spa_condensing_indirect_destroy(spa->spa_condensing_indirect); | |
526 | spa->spa_condensing_indirect = NULL; | |
527 | ||
528 | zfs_dbgmsg("finished condense of vdev %llu in txg %llu: " | |
529 | "new mapping object %llu has %llu entries " | |
530 | "(was %llu entries)", | |
531 | vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object, | |
532 | new_count, old_count); | |
533 | ||
534 | vdev_config_dirty(spa->spa_root_vdev); | |
535 | } | |
536 | ||
537 | /* | |
538 | * This sync task appends entries to the new mapping object. | |
539 | */ | |
540 | static void | |
541 | spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx) | |
542 | { | |
543 | spa_condensing_indirect_t *sci = arg; | |
544 | uint64_t txg = dmu_tx_get_txg(tx); | |
545 | ASSERTV(spa_t *spa = dmu_tx_pool(tx)->dp_spa); | |
546 | ||
547 | ASSERT(dmu_tx_is_syncing(tx)); | |
548 | ASSERT3P(sci, ==, spa->spa_condensing_indirect); | |
549 | ||
550 | vdev_indirect_mapping_add_entries(sci->sci_new_mapping, | |
551 | &sci->sci_new_mapping_entries[txg & TXG_MASK], tx); | |
552 | ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK])); | |
553 | } | |
554 | ||
555 | /* | |
556 | * Open-context function to add one entry to the new mapping. The new | |
557 | * entry will be remembered and written from syncing context. | |
558 | */ | |
559 | static void | |
560 | spa_condense_indirect_commit_entry(spa_t *spa, | |
561 | vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count) | |
562 | { | |
563 | spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; | |
564 | ||
565 | ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst)); | |
566 | ||
567 | dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); | |
568 | dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count)); | |
569 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); | |
570 | int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; | |
571 | ||
572 | /* | |
573 | * If we are the first entry committed this txg, kick off the sync | |
574 | * task to write to the MOS on our behalf. | |
575 | */ | |
576 | if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) { | |
577 | dsl_sync_task_nowait(dmu_tx_pool(tx), | |
578 | spa_condense_indirect_commit_sync, sci, | |
579 | 0, ZFS_SPACE_CHECK_NONE, tx); | |
580 | } | |
581 | ||
582 | vdev_indirect_mapping_entry_t *vime = | |
583 | kmem_alloc(sizeof (*vime), KM_SLEEP); | |
584 | vime->vime_mapping = *vimep; | |
585 | vime->vime_obsolete_count = count; | |
586 | list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime); | |
587 | ||
588 | dmu_tx_commit(tx); | |
589 | } | |
590 | ||
591 | static void | |
592 | spa_condense_indirect_generate_new_mapping(vdev_t *vd, | |
9d5b5245 | 593 | uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr) |
a1d477c2 MA |
594 | { |
595 | spa_t *spa = vd->vdev_spa; | |
596 | uint64_t mapi = start_index; | |
597 | vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; | |
598 | uint64_t old_num_entries = | |
599 | vdev_indirect_mapping_num_entries(old_mapping); | |
600 | ||
601 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
602 | ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev); | |
603 | ||
604 | zfs_dbgmsg("starting condense of vdev %llu from index %llu", | |
605 | (u_longlong_t)vd->vdev_id, | |
606 | (u_longlong_t)mapi); | |
607 | ||
9d5b5245 SD |
608 | while (mapi < old_num_entries) { |
609 | ||
610 | if (zthr_iscancelled(zthr)) { | |
611 | zfs_dbgmsg("pausing condense of vdev %llu " | |
612 | "at index %llu", (u_longlong_t)vd->vdev_id, | |
613 | (u_longlong_t)mapi); | |
614 | break; | |
615 | } | |
616 | ||
a1d477c2 MA |
617 | vdev_indirect_mapping_entry_phys_t *entry = |
618 | &old_mapping->vim_entries[mapi]; | |
619 | uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst); | |
620 | ASSERT3U(obsolete_counts[mapi], <=, entry_size); | |
621 | if (obsolete_counts[mapi] < entry_size) { | |
622 | spa_condense_indirect_commit_entry(spa, entry, | |
623 | obsolete_counts[mapi]); | |
624 | ||
625 | /* | |
626 | * This delay may be requested for testing, debugging, | |
627 | * or performance reasons. | |
628 | */ | |
629 | hrtime_t now = gethrtime(); | |
630 | hrtime_t sleep_until = now + MSEC2NSEC( | |
631 | zfs_condense_indirect_commit_entry_delay_ms); | |
632 | zfs_sleep_until(sleep_until); | |
633 | } | |
634 | ||
635 | mapi++; | |
636 | } | |
a1d477c2 MA |
637 | } |
638 | ||
9d5b5245 SD |
639 | /* ARGSUSED */ |
640 | static boolean_t | |
641 | spa_condense_indirect_thread_check(void *arg, zthr_t *zthr) | |
a1d477c2 | 642 | { |
9d5b5245 SD |
643 | spa_t *spa = arg; |
644 | ||
645 | return (spa->spa_condensing_indirect != NULL); | |
646 | } | |
647 | ||
648 | /* ARGSUSED */ | |
61c3391a | 649 | static void |
9d5b5245 SD |
650 | spa_condense_indirect_thread(void *arg, zthr_t *zthr) |
651 | { | |
652 | spa_t *spa = arg; | |
653 | vdev_t *vd; | |
654 | ||
655 | ASSERT3P(spa->spa_condensing_indirect, !=, NULL); | |
656 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
657 | vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev); | |
658 | ASSERT3P(vd, !=, NULL); | |
659 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
660 | ||
a1d477c2 MA |
661 | spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; |
662 | spa_condensing_indirect_phys_t *scip = | |
663 | &spa->spa_condensing_indirect_phys; | |
664 | uint32_t *counts; | |
665 | uint64_t start_index; | |
666 | vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; | |
667 | space_map_t *prev_obsolete_sm = NULL; | |
668 | ||
669 | ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); | |
670 | ASSERT(scip->scip_next_mapping_object != 0); | |
671 | ASSERT(scip->scip_prev_obsolete_sm_object != 0); | |
672 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
673 | ||
674 | for (int i = 0; i < TXG_SIZE; i++) { | |
675 | /* | |
676 | * The list must start out empty in order for the | |
677 | * _commit_sync() sync task to be properly registered | |
678 | * on the first call to _commit_entry(); so it's wise | |
679 | * to double check and ensure we actually are starting | |
680 | * with empty lists. | |
681 | */ | |
682 | ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); | |
683 | } | |
684 | ||
685 | VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset, | |
686 | scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0)); | |
a1d477c2 MA |
687 | counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping); |
688 | if (prev_obsolete_sm != NULL) { | |
689 | vdev_indirect_mapping_load_obsolete_spacemap(old_mapping, | |
690 | counts, prev_obsolete_sm); | |
691 | } | |
692 | space_map_close(prev_obsolete_sm); | |
693 | ||
694 | /* | |
695 | * Generate new mapping. Determine what index to continue from | |
696 | * based on the max offset that we've already written in the | |
697 | * new mapping. | |
698 | */ | |
699 | uint64_t max_offset = | |
700 | vdev_indirect_mapping_max_offset(sci->sci_new_mapping); | |
701 | if (max_offset == 0) { | |
702 | /* We haven't written anything to the new mapping yet. */ | |
703 | start_index = 0; | |
704 | } else { | |
705 | /* | |
706 | * Pick up from where we left off. _entry_for_offset() | |
707 | * returns a pointer into the vim_entries array. If | |
708 | * max_offset is greater than any of the mappings | |
709 | * contained in the table NULL will be returned and | |
710 | * that indicates we've exhausted our iteration of the | |
711 | * old_mapping. | |
712 | */ | |
713 | ||
714 | vdev_indirect_mapping_entry_phys_t *entry = | |
715 | vdev_indirect_mapping_entry_for_offset_or_next(old_mapping, | |
716 | max_offset); | |
717 | ||
718 | if (entry == NULL) { | |
719 | /* | |
720 | * We've already written the whole new mapping. | |
721 | * This special value will cause us to skip the | |
722 | * generate_new_mapping step and just do the sync | |
723 | * task to complete the condense. | |
724 | */ | |
725 | start_index = UINT64_MAX; | |
726 | } else { | |
727 | start_index = entry - old_mapping->vim_entries; | |
728 | ASSERT3U(start_index, <, | |
729 | vdev_indirect_mapping_num_entries(old_mapping)); | |
730 | } | |
731 | } | |
732 | ||
9d5b5245 SD |
733 | spa_condense_indirect_generate_new_mapping(vd, counts, |
734 | start_index, zthr); | |
a1d477c2 MA |
735 | |
736 | vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts); | |
737 | ||
738 | /* | |
9d5b5245 SD |
739 | * If the zthr has received a cancellation signal while running |
740 | * in generate_new_mapping() or at any point after that, then bail | |
741 | * early. We don't want to complete the condense if the spa is | |
742 | * shutting down. | |
a1d477c2 | 743 | */ |
9d5b5245 | 744 | if (zthr_iscancelled(zthr)) |
61c3391a | 745 | return; |
9d5b5245 SD |
746 | |
747 | VERIFY0(dsl_sync_task(spa_name(spa), NULL, | |
d2734cce SD |
748 | spa_condense_indirect_complete_sync, sci, 0, |
749 | ZFS_SPACE_CHECK_EXTRA_RESERVED)); | |
a1d477c2 MA |
750 | } |
751 | ||
752 | /* | |
753 | * Sync task to begin the condensing process. | |
754 | */ | |
755 | void | |
756 | spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx) | |
757 | { | |
758 | spa_t *spa = vd->vdev_spa; | |
759 | spa_condensing_indirect_phys_t *scip = | |
760 | &spa->spa_condensing_indirect_phys; | |
761 | ||
762 | ASSERT0(scip->scip_next_mapping_object); | |
763 | ASSERT0(scip->scip_prev_obsolete_sm_object); | |
764 | ASSERT0(scip->scip_vdev); | |
765 | ASSERT(dmu_tx_is_syncing(tx)); | |
766 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
767 | ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS)); | |
768 | ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping)); | |
769 | ||
27f80e85 BB |
770 | uint64_t obsolete_sm_obj; |
771 | VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj)); | |
772 | ASSERT3U(obsolete_sm_obj, !=, 0); | |
a1d477c2 MA |
773 | |
774 | scip->scip_vdev = vd->vdev_id; | |
775 | scip->scip_next_mapping_object = | |
776 | vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx); | |
777 | ||
778 | scip->scip_prev_obsolete_sm_object = obsolete_sm_obj; | |
779 | ||
780 | /* | |
781 | * We don't need to allocate a new space map object, since | |
782 | * vdev_indirect_sync_obsolete will allocate one when needed. | |
783 | */ | |
784 | space_map_close(vd->vdev_obsolete_sm); | |
785 | vd->vdev_obsolete_sm = NULL; | |
786 | VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, | |
787 | VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); | |
788 | ||
789 | VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset, | |
790 | DMU_POOL_DIRECTORY_OBJECT, | |
791 | DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), | |
792 | sizeof (*scip) / sizeof (uint64_t), scip, tx)); | |
793 | ||
794 | ASSERT3P(spa->spa_condensing_indirect, ==, NULL); | |
795 | spa->spa_condensing_indirect = spa_condensing_indirect_create(spa); | |
796 | ||
797 | zfs_dbgmsg("starting condense of vdev %llu in txg %llu: " | |
798 | "posm=%llu nm=%llu", | |
799 | vd->vdev_id, dmu_tx_get_txg(tx), | |
800 | (u_longlong_t)scip->scip_prev_obsolete_sm_object, | |
801 | (u_longlong_t)scip->scip_next_mapping_object); | |
802 | ||
9d5b5245 | 803 | zthr_wakeup(spa->spa_condense_zthr); |
a1d477c2 MA |
804 | } |
805 | ||
806 | /* | |
807 | * Sync to the given vdev's obsolete space map any segments that are no longer | |
808 | * referenced as of the given txg. | |
809 | * | |
810 | * If the obsolete space map doesn't exist yet, create and open it. | |
811 | */ | |
812 | void | |
813 | vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx) | |
814 | { | |
815 | spa_t *spa = vd->vdev_spa; | |
816 | ASSERTV(vdev_indirect_config_t *vic = &vd->vdev_indirect_config); | |
817 | ||
818 | ASSERT3U(vic->vic_mapping_object, !=, 0); | |
819 | ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0); | |
820 | ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); | |
821 | ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)); | |
822 | ||
27f80e85 BB |
823 | uint64_t obsolete_sm_object; |
824 | VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); | |
825 | if (obsolete_sm_object == 0) { | |
826 | obsolete_sm_object = space_map_alloc(spa->spa_meta_objset, | |
d2734cce | 827 | vdev_standard_sm_blksz, tx); |
a1d477c2 MA |
828 | |
829 | ASSERT(vd->vdev_top_zap != 0); | |
830 | VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, | |
831 | VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, | |
832 | sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx)); | |
27f80e85 BB |
833 | ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); |
834 | ASSERT3U(obsolete_sm_object, !=, 0); | |
a1d477c2 MA |
835 | |
836 | spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
837 | VERIFY0(space_map_open(&vd->vdev_obsolete_sm, | |
838 | spa->spa_meta_objset, obsolete_sm_object, | |
839 | 0, vd->vdev_asize, 0)); | |
a1d477c2 MA |
840 | } |
841 | ||
842 | ASSERT(vd->vdev_obsolete_sm != NULL); | |
27f80e85 | 843 | ASSERT3U(obsolete_sm_object, ==, |
a1d477c2 MA |
844 | space_map_object(vd->vdev_obsolete_sm)); |
845 | ||
846 | space_map_write(vd->vdev_obsolete_sm, | |
4d044c4c | 847 | vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx); |
a1d477c2 MA |
848 | range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); |
849 | } | |
850 | ||
851 | int | |
852 | spa_condense_init(spa_t *spa) | |
853 | { | |
854 | int error = zap_lookup(spa->spa_meta_objset, | |
855 | DMU_POOL_DIRECTORY_OBJECT, | |
856 | DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), | |
857 | sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t), | |
858 | &spa->spa_condensing_indirect_phys); | |
859 | if (error == 0) { | |
860 | if (spa_writeable(spa)) { | |
861 | spa->spa_condensing_indirect = | |
862 | spa_condensing_indirect_create(spa); | |
863 | } | |
864 | return (0); | |
865 | } else if (error == ENOENT) { | |
866 | return (0); | |
867 | } else { | |
868 | return (error); | |
869 | } | |
870 | } | |
871 | ||
872 | void | |
873 | spa_condense_fini(spa_t *spa) | |
874 | { | |
875 | if (spa->spa_condensing_indirect != NULL) { | |
876 | spa_condensing_indirect_destroy(spa->spa_condensing_indirect); | |
877 | spa->spa_condensing_indirect = NULL; | |
878 | } | |
879 | } | |
880 | ||
a1d477c2 | 881 | void |
9d5b5245 | 882 | spa_start_indirect_condensing_thread(spa_t *spa) |
a1d477c2 | 883 | { |
9d5b5245 SD |
884 | ASSERT3P(spa->spa_condense_zthr, ==, NULL); |
885 | spa->spa_condense_zthr = zthr_create(spa_condense_indirect_thread_check, | |
886 | spa_condense_indirect_thread, spa); | |
a1d477c2 MA |
887 | } |
888 | ||
889 | /* | |
27f80e85 BB |
890 | * Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj |
891 | * will contain either the obsolete spacemap object or zero if none exists. | |
892 | * All other errors are returned to the caller. | |
a1d477c2 MA |
893 | */ |
894 | int | |
27f80e85 | 895 | vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj) |
a1d477c2 MA |
896 | { |
897 | ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); | |
27f80e85 | 898 | |
a1d477c2 | 899 | if (vd->vdev_top_zap == 0) { |
27f80e85 | 900 | *sm_obj = 0; |
a1d477c2 MA |
901 | return (0); |
902 | } | |
903 | ||
27f80e85 BB |
904 | int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, |
905 | VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, sm_obj); | |
906 | if (error == ENOENT) { | |
907 | *sm_obj = 0; | |
908 | error = 0; | |
909 | } | |
a1d477c2 | 910 | |
27f80e85 | 911 | return (error); |
a1d477c2 MA |
912 | } |
913 | ||
27f80e85 BB |
914 | /* |
915 | * Gets the obsolete count are precise spacemap object from the vdev's ZAP. | |
916 | * On success are_precise will be set to reflect if the counts are precise. | |
917 | * All other errors are returned to the caller. | |
918 | */ | |
919 | int | |
920 | vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise) | |
a1d477c2 MA |
921 | { |
922 | ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); | |
27f80e85 | 923 | |
a1d477c2 | 924 | if (vd->vdev_top_zap == 0) { |
27f80e85 BB |
925 | *are_precise = B_FALSE; |
926 | return (0); | |
a1d477c2 MA |
927 | } |
928 | ||
929 | uint64_t val = 0; | |
27f80e85 | 930 | int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, |
a1d477c2 | 931 | VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val); |
27f80e85 BB |
932 | if (error == 0) { |
933 | *are_precise = (val != 0); | |
934 | } else if (error == ENOENT) { | |
935 | *are_precise = B_FALSE; | |
936 | error = 0; | |
937 | } | |
a1d477c2 | 938 | |
27f80e85 | 939 | return (error); |
a1d477c2 MA |
940 | } |
941 | ||
942 | /* ARGSUSED */ | |
943 | static void | |
944 | vdev_indirect_close(vdev_t *vd) | |
945 | { | |
946 | } | |
947 | ||
a1d477c2 MA |
948 | /* ARGSUSED */ |
949 | static int | |
950 | vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, | |
951 | uint64_t *ashift) | |
952 | { | |
953 | *psize = *max_psize = vd->vdev_asize + | |
954 | VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; | |
955 | *ashift = vd->vdev_ashift; | |
956 | return (0); | |
957 | } | |
958 | ||
959 | typedef struct remap_segment { | |
960 | vdev_t *rs_vd; | |
961 | uint64_t rs_offset; | |
962 | uint64_t rs_asize; | |
963 | uint64_t rs_split_offset; | |
964 | list_node_t rs_node; | |
965 | } remap_segment_t; | |
966 | ||
967 | remap_segment_t * | |
968 | rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) | |
969 | { | |
970 | remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP); | |
971 | rs->rs_vd = vd; | |
972 | rs->rs_offset = offset; | |
973 | rs->rs_asize = asize; | |
974 | rs->rs_split_offset = split_offset; | |
975 | return (rs); | |
976 | } | |
977 | ||
4bf8108e SD |
978 | /* |
979 | * Given an indirect vdev and an extent on that vdev, it duplicates the | |
980 | * physical entries of the indirect mapping that correspond to the extent | |
981 | * to a new array and returns a pointer to it. In addition, copied_entries | |
982 | * is populated with the number of mapping entries that were duplicated. | |
983 | * | |
984 | * Note that the function assumes that the caller holds vdev_indirect_rwlock. | |
985 | * This ensures that the mapping won't change due to condensing as we | |
986 | * copy over its contents. | |
987 | * | |
988 | * Finally, since we are doing an allocation, it is up to the caller to | |
989 | * free the array allocated in this function. | |
990 | */ | |
991 | vdev_indirect_mapping_entry_phys_t * | |
992 | vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, | |
993 | uint64_t asize, uint64_t *copied_entries) | |
994 | { | |
995 | vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; | |
996 | vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; | |
997 | uint64_t entries = 0; | |
998 | ||
999 | ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock)); | |
1000 | ||
1001 | vdev_indirect_mapping_entry_phys_t *first_mapping = | |
1002 | vdev_indirect_mapping_entry_for_offset(vim, offset); | |
1003 | ASSERT3P(first_mapping, !=, NULL); | |
1004 | ||
1005 | vdev_indirect_mapping_entry_phys_t *m = first_mapping; | |
1006 | while (asize > 0) { | |
1007 | uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); | |
1008 | ||
1009 | ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m)); | |
1010 | ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size); | |
1011 | ||
1012 | uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); | |
1013 | uint64_t inner_size = MIN(asize, size - inner_offset); | |
1014 | ||
1015 | offset += inner_size; | |
1016 | asize -= inner_size; | |
1017 | entries++; | |
1018 | m++; | |
1019 | } | |
1020 | ||
1021 | size_t copy_length = entries * sizeof (*first_mapping); | |
1022 | duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP); | |
1023 | bcopy(first_mapping, duplicate_mappings, copy_length); | |
1024 | *copied_entries = entries; | |
1025 | ||
1026 | return (duplicate_mappings); | |
1027 | } | |
1028 | ||
a1d477c2 MA |
1029 | /* |
1030 | * Goes through the relevant indirect mappings until it hits a concrete vdev | |
1031 | * and issues the callback. On the way to the concrete vdev, if any other | |
1032 | * indirect vdevs are encountered, then the callback will also be called on | |
1033 | * each of those indirect vdevs. For example, if the segment is mapped to | |
1034 | * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is | |
1035 | * mapped to segment B on concrete vdev 2, then the callback will be called on | |
1036 | * both vdev 1 and vdev 2. | |
1037 | * | |
1038 | * While the callback passed to vdev_indirect_remap() is called on every vdev | |
1039 | * the function encounters, certain callbacks only care about concrete vdevs. | |
1040 | * These types of callbacks should return immediately and explicitly when they | |
1041 | * are called on an indirect vdev. | |
1042 | * | |
1043 | * Because there is a possibility that a DVA section in the indirect device | |
1044 | * has been split into multiple sections in our mapping, we keep track | |
1045 | * of the relevant contiguous segments of the new location (remap_segment_t) | |
1046 | * in a stack. This way we can call the callback for each of the new sections | |
1047 | * created by a single section of the indirect device. Note though, that in | |
1048 | * this scenario the callbacks in each split block won't occur in-order in | |
1049 | * terms of offset, so callers should not make any assumptions about that. | |
1050 | * | |
1051 | * For callbacks that don't handle split blocks and immediately return when | |
1052 | * they encounter them (as is the case for remap_blkptr_cb), the caller can | |
1053 | * assume that its callback will be applied from the first indirect vdev | |
1054 | * encountered to the last one and then the concrete vdev, in that order. | |
1055 | */ | |
1056 | static void | |
1057 | vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, | |
1058 | void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg) | |
1059 | { | |
1060 | list_t stack; | |
1061 | spa_t *spa = vd->vdev_spa; | |
1062 | ||
1063 | list_create(&stack, sizeof (remap_segment_t), | |
1064 | offsetof(remap_segment_t, rs_node)); | |
1065 | ||
1066 | for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0); | |
1067 | rs != NULL; rs = list_remove_head(&stack)) { | |
1068 | vdev_t *v = rs->rs_vd; | |
4bf8108e SD |
1069 | uint64_t num_entries = 0; |
1070 | ||
1071 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); | |
1072 | ASSERT(rs->rs_asize > 0); | |
a1d477c2 MA |
1073 | |
1074 | /* | |
4bf8108e SD |
1075 | * Note: As this function can be called from open context |
1076 | * (e.g. zio_read()), we need the following rwlock to | |
1077 | * prevent the mapping from being changed by condensing. | |
1078 | * | |
1079 | * So we grab the lock and we make a copy of the entries | |
1080 | * that are relevant to the extent that we are working on. | |
1081 | * Once that is done, we drop the lock and iterate over | |
1082 | * our copy of the mapping. Once we are done with the with | |
1083 | * the remap segment and we free it, we also free our copy | |
1084 | * of the indirect mapping entries that are relevant to it. | |
1085 | * | |
1086 | * This way we don't need to wait until the function is | |
1087 | * finished with a segment, to condense it. In addition, we | |
1088 | * don't need a recursive rwlock for the case that a call to | |
1089 | * vdev_indirect_remap() needs to call itself (through the | |
1090 | * codepath of its callback) for the same vdev in the middle | |
1091 | * of its execution. | |
a1d477c2 MA |
1092 | */ |
1093 | rw_enter(&v->vdev_indirect_rwlock, RW_READER); | |
4bf8108e | 1094 | ASSERT3P(v->vdev_indirect_mapping, !=, NULL); |
a1d477c2 MA |
1095 | |
1096 | vdev_indirect_mapping_entry_phys_t *mapping = | |
4bf8108e SD |
1097 | vdev_indirect_mapping_duplicate_adjacent_entries(v, |
1098 | rs->rs_offset, rs->rs_asize, &num_entries); | |
a1d477c2 | 1099 | ASSERT3P(mapping, !=, NULL); |
4bf8108e SD |
1100 | ASSERT3U(num_entries, >, 0); |
1101 | rw_exit(&v->vdev_indirect_rwlock); | |
a1d477c2 | 1102 | |
4bf8108e | 1103 | for (uint64_t i = 0; i < num_entries; i++) { |
a1d477c2 MA |
1104 | /* |
1105 | * Note: the vdev_indirect_mapping can not change | |
1106 | * while we are running. It only changes while the | |
1107 | * removal is in progress, and then only from syncing | |
1108 | * context. While a removal is in progress, this | |
1109 | * function is only called for frees, which also only | |
1110 | * happen from syncing context. | |
1111 | */ | |
4bf8108e SD |
1112 | vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; |
1113 | ||
1114 | ASSERT3P(m, !=, NULL); | |
1115 | ASSERT3U(rs->rs_asize, >, 0); | |
a1d477c2 | 1116 | |
4bf8108e SD |
1117 | uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); |
1118 | uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); | |
1119 | uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); | |
a1d477c2 MA |
1120 | |
1121 | ASSERT3U(rs->rs_offset, >=, | |
4bf8108e | 1122 | DVA_MAPPING_GET_SRC_OFFSET(m)); |
a1d477c2 | 1123 | ASSERT3U(rs->rs_offset, <, |
4bf8108e | 1124 | DVA_MAPPING_GET_SRC_OFFSET(m) + size); |
a1d477c2 MA |
1125 | ASSERT3U(dst_vdev, !=, v->vdev_id); |
1126 | ||
1127 | uint64_t inner_offset = rs->rs_offset - | |
4bf8108e | 1128 | DVA_MAPPING_GET_SRC_OFFSET(m); |
a1d477c2 MA |
1129 | uint64_t inner_size = |
1130 | MIN(rs->rs_asize, size - inner_offset); | |
1131 | ||
1132 | vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); | |
1133 | ASSERT3P(dst_v, !=, NULL); | |
1134 | ||
1135 | if (dst_v->vdev_ops == &vdev_indirect_ops) { | |
1136 | list_insert_head(&stack, | |
1137 | rs_alloc(dst_v, dst_offset + inner_offset, | |
1138 | inner_size, rs->rs_split_offset)); | |
1139 | ||
1140 | } | |
1141 | ||
1142 | if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) && | |
1143 | IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) { | |
1144 | /* | |
1145 | * Note: This clause exists only solely for | |
1146 | * testing purposes. We use it to ensure that | |
1147 | * split blocks work and that the callbacks | |
1148 | * using them yield the same result if issued | |
1149 | * in reverse order. | |
1150 | */ | |
1151 | uint64_t inner_half = inner_size / 2; | |
1152 | ||
1153 | func(rs->rs_split_offset + inner_half, dst_v, | |
1154 | dst_offset + inner_offset + inner_half, | |
1155 | inner_half, arg); | |
1156 | ||
1157 | func(rs->rs_split_offset, dst_v, | |
1158 | dst_offset + inner_offset, | |
1159 | inner_half, arg); | |
1160 | } else { | |
1161 | func(rs->rs_split_offset, dst_v, | |
1162 | dst_offset + inner_offset, | |
1163 | inner_size, arg); | |
1164 | } | |
1165 | ||
1166 | rs->rs_offset += inner_size; | |
1167 | rs->rs_asize -= inner_size; | |
1168 | rs->rs_split_offset += inner_size; | |
a1d477c2 | 1169 | } |
4bf8108e | 1170 | VERIFY0(rs->rs_asize); |
a1d477c2 | 1171 | |
4bf8108e | 1172 | kmem_free(mapping, num_entries * sizeof (*mapping)); |
a1d477c2 MA |
1173 | kmem_free(rs, sizeof (remap_segment_t)); |
1174 | } | |
1175 | list_destroy(&stack); | |
1176 | } | |
1177 | ||
1178 | static void | |
1179 | vdev_indirect_child_io_done(zio_t *zio) | |
1180 | { | |
1181 | zio_t *pio = zio->io_private; | |
1182 | ||
1183 | mutex_enter(&pio->io_lock); | |
1184 | pio->io_error = zio_worst_error(pio->io_error, zio->io_error); | |
1185 | mutex_exit(&pio->io_lock); | |
1186 | ||
1187 | abd_put(zio->io_abd); | |
1188 | } | |
1189 | ||
9e052db4 MA |
1190 | /* |
1191 | * This is a callback for vdev_indirect_remap() which allocates an | |
1192 | * indirect_split_t for each split segment and adds it to iv_splits. | |
1193 | */ | |
a1d477c2 | 1194 | static void |
9e052db4 | 1195 | vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, |
a1d477c2 MA |
1196 | uint64_t size, void *arg) |
1197 | { | |
1198 | zio_t *zio = arg; | |
9e052db4 | 1199 | indirect_vsd_t *iv = zio->io_vsd; |
a1d477c2 MA |
1200 | |
1201 | ASSERT3P(vd, !=, NULL); | |
1202 | ||
1203 | if (vd->vdev_ops == &vdev_indirect_ops) | |
1204 | return; | |
1205 | ||
9e052db4 MA |
1206 | int n = 1; |
1207 | if (vd->vdev_ops == &vdev_mirror_ops) | |
1208 | n = vd->vdev_children; | |
1209 | ||
1210 | indirect_split_t *is = | |
1211 | kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP); | |
1212 | ||
1213 | is->is_children = n; | |
1214 | is->is_size = size; | |
1215 | is->is_split_offset = split_offset; | |
1216 | is->is_target_offset = offset; | |
1217 | is->is_vdev = vd; | |
1258bd77 BB |
1218 | list_create(&is->is_unique_child, sizeof (indirect_child_t), |
1219 | offsetof(indirect_child_t, ic_node)); | |
9e052db4 MA |
1220 | |
1221 | /* | |
1222 | * Note that we only consider multiple copies of the data for | |
1223 | * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even | |
1224 | * though they use the same ops as mirror, because there's only one | |
1225 | * "good" copy under the replacing/spare. | |
1226 | */ | |
1227 | if (vd->vdev_ops == &vdev_mirror_ops) { | |
1228 | for (int i = 0; i < n; i++) { | |
1229 | is->is_child[i].ic_vdev = vd->vdev_child[i]; | |
1258bd77 | 1230 | list_link_init(&is->is_child[i].ic_node); |
9e052db4 MA |
1231 | } |
1232 | } else { | |
1233 | is->is_child[0].ic_vdev = vd; | |
1234 | } | |
1235 | ||
1236 | list_insert_tail(&iv->iv_splits, is); | |
1237 | } | |
1238 | ||
1239 | static void | |
1240 | vdev_indirect_read_split_done(zio_t *zio) | |
1241 | { | |
1242 | indirect_child_t *ic = zio->io_private; | |
1243 | ||
1244 | if (zio->io_error != 0) { | |
1245 | /* | |
1246 | * Clear ic_data to indicate that we do not have data for this | |
1247 | * child. | |
1248 | */ | |
1249 | abd_free(ic->ic_data); | |
1250 | ic->ic_data = NULL; | |
1251 | } | |
1252 | } | |
1253 | ||
1254 | /* | |
1255 | * Issue reads for all copies (mirror children) of all splits. | |
1256 | */ | |
1257 | static void | |
1258 | vdev_indirect_read_all(zio_t *zio) | |
1259 | { | |
1260 | indirect_vsd_t *iv = zio->io_vsd; | |
1261 | ||
5aa95ba0 TC |
1262 | ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); |
1263 | ||
9e052db4 MA |
1264 | for (indirect_split_t *is = list_head(&iv->iv_splits); |
1265 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1266 | for (int i = 0; i < is->is_children; i++) { | |
1267 | indirect_child_t *ic = &is->is_child[i]; | |
1268 | ||
1269 | if (!vdev_readable(ic->ic_vdev)) | |
1270 | continue; | |
1271 | ||
1272 | /* | |
1273 | * Note, we may read from a child whose DTL | |
1274 | * indicates that the data may not be present here. | |
1275 | * While this might result in a few i/os that will | |
1276 | * likely return incorrect data, it simplifies the | |
1277 | * code since we can treat scrub and resilver | |
1278 | * identically. (The incorrect data will be | |
1279 | * detected and ignored when we verify the | |
1280 | * checksum.) | |
1281 | */ | |
1282 | ||
1283 | ic->ic_data = abd_alloc_sametype(zio->io_abd, | |
1284 | is->is_size); | |
1258bd77 | 1285 | ic->ic_duplicate = NULL; |
9e052db4 MA |
1286 | |
1287 | zio_nowait(zio_vdev_child_io(zio, NULL, | |
1288 | ic->ic_vdev, is->is_target_offset, ic->ic_data, | |
1289 | is->is_size, zio->io_type, zio->io_priority, 0, | |
1290 | vdev_indirect_read_split_done, ic)); | |
1291 | } | |
1292 | } | |
1293 | iv->iv_reconstruct = B_TRUE; | |
a1d477c2 MA |
1294 | } |
1295 | ||
1296 | static void | |
1297 | vdev_indirect_io_start(zio_t *zio) | |
1298 | { | |
1299 | ASSERTV(spa_t *spa = zio->io_spa); | |
9e052db4 MA |
1300 | indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP); |
1301 | list_create(&iv->iv_splits, | |
1302 | sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); | |
1303 | ||
1304 | zio->io_vsd = iv; | |
1305 | zio->io_vsd_ops = &vdev_indirect_vsd_ops; | |
a1d477c2 MA |
1306 | |
1307 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); | |
1308 | if (zio->io_type != ZIO_TYPE_READ) { | |
1309 | ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); | |
9e052db4 MA |
1310 | /* |
1311 | * Note: this code can handle other kinds of writes, | |
1312 | * but we don't expect them. | |
1313 | */ | |
1314 | ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL | | |
1315 | ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0); | |
a1d477c2 MA |
1316 | } |
1317 | ||
1318 | vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size, | |
9e052db4 MA |
1319 | vdev_indirect_gather_splits, zio); |
1320 | ||
1321 | indirect_split_t *first = list_head(&iv->iv_splits); | |
1322 | if (first->is_size == zio->io_size) { | |
1323 | /* | |
1324 | * This is not a split block; we are pointing to the entire | |
1325 | * data, which will checksum the same as the original data. | |
1326 | * Pass the BP down so that the child i/o can verify the | |
1327 | * checksum, and try a different location if available | |
1328 | * (e.g. on a mirror). | |
1329 | * | |
1330 | * While this special case could be handled the same as the | |
1331 | * general (split block) case, doing it this way ensures | |
1332 | * that the vast majority of blocks on indirect vdevs | |
1333 | * (which are not split) are handled identically to blocks | |
1334 | * on non-indirect vdevs. This allows us to be less strict | |
1335 | * about performance in the general (but rare) case. | |
1336 | */ | |
1337 | ASSERT0(first->is_split_offset); | |
1338 | ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL); | |
1339 | zio_nowait(zio_vdev_child_io(zio, zio->io_bp, | |
1340 | first->is_vdev, first->is_target_offset, | |
1341 | abd_get_offset(zio->io_abd, 0), | |
1342 | zio->io_size, zio->io_type, zio->io_priority, 0, | |
1343 | vdev_indirect_child_io_done, zio)); | |
1344 | } else { | |
1345 | iv->iv_split_block = B_TRUE; | |
5aa95ba0 TC |
1346 | if (zio->io_type == ZIO_TYPE_READ && |
1347 | zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) { | |
9e052db4 MA |
1348 | /* |
1349 | * Read all copies. Note that for simplicity, | |
1350 | * we don't bother consulting the DTL in the | |
1351 | * resilver case. | |
1352 | */ | |
1353 | vdev_indirect_read_all(zio); | |
1354 | } else { | |
1355 | /* | |
5aa95ba0 TC |
1356 | * If this is a read zio, we read one copy of each |
1357 | * split segment, from the top-level vdev. Since | |
1358 | * we don't know the checksum of each split | |
1359 | * individually, the child zio can't ensure that | |
1360 | * we get the right data. E.g. if it's a mirror, | |
1361 | * it will just read from a random (healthy) leaf | |
1362 | * vdev. We have to verify the checksum in | |
1363 | * vdev_indirect_io_done(). | |
1364 | * | |
1365 | * For write zios, the vdev code will ensure we write | |
1366 | * to all children. | |
9e052db4 MA |
1367 | */ |
1368 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1369 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1370 | zio_nowait(zio_vdev_child_io(zio, NULL, | |
1371 | is->is_vdev, is->is_target_offset, | |
1372 | abd_get_offset(zio->io_abd, | |
1373 | is->is_split_offset), is->is_size, | |
1374 | zio->io_type, zio->io_priority, 0, | |
1375 | vdev_indirect_child_io_done, zio)); | |
1376 | } | |
1377 | ||
1378 | } | |
1379 | } | |
a1d477c2 MA |
1380 | |
1381 | zio_execute(zio); | |
1382 | } | |
1383 | ||
9e052db4 MA |
1384 | /* |
1385 | * Report a checksum error for a child. | |
1386 | */ | |
1387 | static void | |
1388 | vdev_indirect_checksum_error(zio_t *zio, | |
1389 | indirect_split_t *is, indirect_child_t *ic) | |
1390 | { | |
1391 | vdev_t *vd = ic->ic_vdev; | |
1392 | ||
1393 | if (zio->io_flags & ZIO_FLAG_SPECULATIVE) | |
1394 | return; | |
1395 | ||
1396 | mutex_enter(&vd->vdev_stat_lock); | |
1397 | vd->vdev_stat.vs_checksum_errors++; | |
1398 | mutex_exit(&vd->vdev_stat_lock); | |
1399 | ||
1400 | zio_bad_cksum_t zbc = {{{ 0 }}}; | |
1401 | abd_t *bad_abd = ic->ic_data; | |
1258bd77 | 1402 | abd_t *good_abd = is->is_good_child->ic_data; |
9e052db4 MA |
1403 | zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio, |
1404 | is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc); | |
1405 | } | |
1406 | ||
1407 | /* | |
1408 | * Issue repair i/os for any incorrect copies. We do this by comparing | |
1409 | * each split segment's correct data (is_good_child's ic_data) with each | |
1410 | * other copy of the data. If they differ, then we overwrite the bad data | |
1411 | * with the good copy. Note that we do this without regard for the DTL's, | |
1412 | * which simplifies this code and also issues the optimal number of writes | |
1413 | * (based on which copies actually read bad data, as opposed to which we | |
1414 | * think might be wrong). For the same reason, we always use | |
1415 | * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start(). | |
1416 | */ | |
1417 | static void | |
1418 | vdev_indirect_repair(zio_t *zio) | |
1419 | { | |
1420 | indirect_vsd_t *iv = zio->io_vsd; | |
1421 | ||
1422 | enum zio_flag flags = ZIO_FLAG_IO_REPAIR; | |
1423 | ||
1424 | if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) | |
1425 | flags |= ZIO_FLAG_SELF_HEAL; | |
1426 | ||
1427 | if (!spa_writeable(zio->io_spa)) | |
1428 | return; | |
1429 | ||
1430 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1431 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
9e052db4 MA |
1432 | for (int c = 0; c < is->is_children; c++) { |
1433 | indirect_child_t *ic = &is->is_child[c]; | |
1258bd77 | 1434 | if (ic == is->is_good_child) |
9e052db4 MA |
1435 | continue; |
1436 | if (ic->ic_data == NULL) | |
1437 | continue; | |
4589f3ae | 1438 | if (ic->ic_duplicate == is->is_good_child) |
9e052db4 MA |
1439 | continue; |
1440 | ||
1441 | zio_nowait(zio_vdev_child_io(zio, NULL, | |
1442 | ic->ic_vdev, is->is_target_offset, | |
1258bd77 | 1443 | is->is_good_child->ic_data, is->is_size, |
9e052db4 MA |
1444 | ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE, |
1445 | ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL, | |
1446 | NULL, NULL)); | |
1447 | ||
1448 | vdev_indirect_checksum_error(zio, is, ic); | |
1449 | } | |
1450 | } | |
1451 | } | |
1452 | ||
1453 | /* | |
1454 | * Report checksum errors on all children that we read from. | |
1455 | */ | |
1456 | static void | |
1457 | vdev_indirect_all_checksum_errors(zio_t *zio) | |
1458 | { | |
1459 | indirect_vsd_t *iv = zio->io_vsd; | |
1460 | ||
1461 | if (zio->io_flags & ZIO_FLAG_SPECULATIVE) | |
1462 | return; | |
1463 | ||
1464 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1465 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1466 | for (int c = 0; c < is->is_children; c++) { | |
1467 | indirect_child_t *ic = &is->is_child[c]; | |
1468 | ||
1469 | if (ic->ic_data == NULL) | |
1470 | continue; | |
1471 | ||
1472 | vdev_t *vd = ic->ic_vdev; | |
1473 | ||
1474 | mutex_enter(&vd->vdev_stat_lock); | |
1475 | vd->vdev_stat.vs_checksum_errors++; | |
1476 | mutex_exit(&vd->vdev_stat_lock); | |
1477 | ||
1478 | zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio, | |
1479 | is->is_target_offset, is->is_size, | |
1480 | NULL, NULL, NULL); | |
1481 | } | |
1482 | } | |
1483 | } | |
1484 | ||
1258bd77 BB |
1485 | /* |
1486 | * Copy data from all the splits to a main zio then validate the checksum. | |
1487 | * If then checksum is successfully validated return success. | |
1488 | */ | |
1489 | static int | |
1490 | vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio) | |
1491 | { | |
1492 | zio_bad_cksum_t zbc; | |
1493 | ||
1494 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1495 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1496 | ||
1497 | ASSERT3P(is->is_good_child->ic_data, !=, NULL); | |
1498 | ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL); | |
1499 | ||
1500 | abd_copy_off(zio->io_abd, is->is_good_child->ic_data, | |
1501 | is->is_split_offset, 0, is->is_size); | |
1502 | } | |
1503 | ||
1504 | return (zio_checksum_error(zio, &zbc)); | |
1505 | } | |
1506 | ||
1507 | /* | |
1508 | * There are relatively few possible combinations making it feasible to | |
1509 | * deterministically check them all. We do this by setting the good_child | |
1510 | * to the next unique split version. If we reach the end of the list then | |
1511 | * "carry over" to the next unique split version (like counting in base | |
1512 | * is_unique_children, but each digit can have a different base). | |
1513 | */ | |
1514 | static int | |
1515 | vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio) | |
1516 | { | |
1517 | boolean_t more = B_TRUE; | |
1518 | ||
1519 | iv->iv_attempts = 0; | |
1520 | ||
1521 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1522 | is != NULL; is = list_next(&iv->iv_splits, is)) | |
1523 | is->is_good_child = list_head(&is->is_unique_child); | |
1524 | ||
1525 | while (more == B_TRUE) { | |
1526 | iv->iv_attempts++; | |
1527 | more = B_FALSE; | |
1528 | ||
1529 | if (vdev_indirect_splits_checksum_validate(iv, zio) == 0) | |
1530 | return (0); | |
1531 | ||
1532 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1533 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1534 | is->is_good_child = list_next(&is->is_unique_child, | |
1535 | is->is_good_child); | |
1536 | if (is->is_good_child != NULL) { | |
1537 | more = B_TRUE; | |
1538 | break; | |
1539 | } | |
1540 | ||
1541 | is->is_good_child = list_head(&is->is_unique_child); | |
1542 | } | |
1543 | } | |
1544 | ||
1545 | ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations); | |
1546 | ||
1547 | return (SET_ERROR(ECKSUM)); | |
1548 | } | |
1549 | ||
1550 | /* | |
1551 | * There are too many combinations to try all of them in a reasonable amount | |
1552 | * of time. So try a fixed number of random combinations from the unique | |
1553 | * split versions, after which we'll consider the block unrecoverable. | |
1554 | */ | |
1555 | static int | |
1556 | vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio) | |
1557 | { | |
1558 | iv->iv_attempts = 0; | |
1559 | ||
1560 | while (iv->iv_attempts < iv->iv_attempts_max) { | |
1561 | iv->iv_attempts++; | |
1562 | ||
1563 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1564 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1565 | indirect_child_t *ic = list_head(&is->is_unique_child); | |
1566 | int children = is->is_unique_children; | |
1567 | ||
1568 | for (int i = spa_get_random(children); i > 0; i--) | |
1569 | ic = list_next(&is->is_unique_child, ic); | |
1570 | ||
1571 | ASSERT3P(ic, !=, NULL); | |
1572 | is->is_good_child = ic; | |
1573 | } | |
1574 | ||
1575 | if (vdev_indirect_splits_checksum_validate(iv, zio) == 0) | |
1576 | return (0); | |
1577 | } | |
1578 | ||
1579 | return (SET_ERROR(ECKSUM)); | |
1580 | } | |
1581 | ||
1582 | /* | |
1583 | * This is a validation function for reconstruction. It randomly selects | |
1584 | * a good combination, if one can be found, and then it intentionally | |
1585 | * damages all other segment copes by zeroing them. This forces the | |
1586 | * reconstruction algorithm to locate the one remaining known good copy. | |
1587 | */ | |
1588 | static int | |
1589 | vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio) | |
1590 | { | |
20eb30d0 TC |
1591 | int error; |
1592 | ||
1258bd77 BB |
1593 | /* Presume all the copies are unique for initial selection. */ |
1594 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1595 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1596 | is->is_unique_children = 0; | |
1597 | ||
1598 | for (int i = 0; i < is->is_children; i++) { | |
1599 | indirect_child_t *ic = &is->is_child[i]; | |
1600 | if (ic->ic_data != NULL) { | |
1601 | is->is_unique_children++; | |
1602 | list_insert_tail(&is->is_unique_child, ic); | |
1603 | } | |
1604 | } | |
20eb30d0 TC |
1605 | |
1606 | if (list_is_empty(&is->is_unique_child)) { | |
1607 | error = SET_ERROR(EIO); | |
1608 | goto out; | |
1609 | } | |
1258bd77 BB |
1610 | } |
1611 | ||
1612 | /* | |
1613 | * Set each is_good_child to a randomly-selected child which | |
1614 | * is known to contain validated data. | |
1615 | */ | |
20eb30d0 | 1616 | error = vdev_indirect_splits_enumerate_randomly(iv, zio); |
1258bd77 BB |
1617 | if (error) |
1618 | goto out; | |
1619 | ||
1620 | /* | |
1621 | * Damage all but the known good copy by zeroing it. This will | |
1622 | * result in two or less unique copies per indirect_child_t. | |
1623 | * Both may need to be checked in order to reconstruct the block. | |
1624 | * Set iv->iv_attempts_max such that all unique combinations will | |
4a7eb69a | 1625 | * enumerated, but limit the damage to at most 12 indirect splits. |
1258bd77 BB |
1626 | */ |
1627 | iv->iv_attempts_max = 1; | |
1628 | ||
1629 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1630 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1631 | for (int c = 0; c < is->is_children; c++) { | |
1632 | indirect_child_t *ic = &is->is_child[c]; | |
1633 | ||
1634 | if (ic == is->is_good_child) | |
1635 | continue; | |
1636 | if (ic->ic_data == NULL) | |
1637 | continue; | |
1638 | ||
1639 | abd_zero(ic->ic_data, ic->ic_data->abd_size); | |
1640 | } | |
1641 | ||
1642 | iv->iv_attempts_max *= 2; | |
4a7eb69a | 1643 | if (iv->iv_attempts_max >= (1ULL << 12)) { |
1258bd77 BB |
1644 | iv->iv_attempts_max = UINT64_MAX; |
1645 | break; | |
1646 | } | |
1647 | } | |
1648 | ||
1649 | out: | |
1650 | /* Empty the unique children lists so they can be reconstructed. */ | |
1651 | for (indirect_split_t *is = list_head(&iv->iv_splits); | |
1652 | is != NULL; is = list_next(&iv->iv_splits, is)) { | |
1653 | indirect_child_t *ic; | |
1654 | while ((ic = list_head(&is->is_unique_child)) != NULL) | |
1655 | list_remove(&is->is_unique_child, ic); | |
1656 | ||
1657 | is->is_unique_children = 0; | |
1658 | } | |
1659 | ||
1660 | return (error); | |
1661 | } | |
1662 | ||
9e052db4 MA |
1663 | /* |
1664 | * This function is called when we have read all copies of the data and need | |
1665 | * to try to find a combination of copies that gives us the right checksum. | |
1666 | * | |
1667 | * If we pointed to any mirror vdevs, this effectively does the job of the | |
1668 | * mirror. The mirror vdev code can't do its own job because we don't know | |
4589f3ae | 1669 | * the checksum of each split segment individually. |
9e052db4 | 1670 | * |
4589f3ae BB |
1671 | * We have to try every unique combination of copies of split segments, until |
1672 | * we find one that checksums correctly. Duplicate segment copies are first | |
1258bd77 BB |
1673 | * identified and latter skipped during reconstruction. This optimization |
1674 | * reduces the search space and ensures that of the remaining combinations | |
1675 | * at most one is correct. | |
4589f3ae BB |
1676 | * |
1677 | * When the total number of combinations is small they can all be checked. | |
1678 | * For example, if we have 3 segments in the split, and each points to a | |
1679 | * 2-way mirror with unique copies, we will have the following pieces of data: | |
9e052db4 MA |
1680 | * |
1681 | * | mirror child | |
1682 | * split | [0] [1] | |
1683 | * ======|===================== | |
1684 | * A | data_A_0 data_A_1 | |
1685 | * B | data_B_0 data_B_1 | |
1686 | * C | data_C_0 data_C_1 | |
1687 | * | |
1688 | * We will try the following (mirror children)^(number of splits) (2^3=8) | |
1689 | * combinations, which is similar to bitwise-little-endian counting in | |
1690 | * binary. In general each "digit" corresponds to a split segment, and the | |
1691 | * base of each digit is is_children, which can be different for each | |
1692 | * digit. | |
1693 | * | |
1694 | * "low bit" "high bit" | |
1695 | * v v | |
1696 | * data_A_0 data_B_0 data_C_0 | |
1697 | * data_A_1 data_B_0 data_C_0 | |
1698 | * data_A_0 data_B_1 data_C_0 | |
1699 | * data_A_1 data_B_1 data_C_0 | |
1700 | * data_A_0 data_B_0 data_C_1 | |
1701 | * data_A_1 data_B_0 data_C_1 | |
1702 | * data_A_0 data_B_1 data_C_1 | |
1703 | * data_A_1 data_B_1 data_C_1 | |
1704 | * | |
1705 | * Note that the split segments may be on the same or different top-level | |
1258bd77 BB |
1706 | * vdevs. In either case, we may need to try lots of combinations (see |
1707 | * zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror | |
1708 | * has small silent errors on all of its children, we can still reconstruct | |
1709 | * the correct data, as long as those errors are at sufficiently-separated | |
9e052db4 MA |
1710 | * offsets (specifically, separated by the largest block size - default of |
1711 | * 128KB, but up to 16MB). | |
1712 | */ | |
1713 | static void | |
1714 | vdev_indirect_reconstruct_io_done(zio_t *zio) | |
1715 | { | |
1716 | indirect_vsd_t *iv = zio->io_vsd; | |
1258bd77 BB |
1717 | boolean_t known_good = B_FALSE; |
1718 | int error; | |
1719 | ||
1720 | iv->iv_unique_combinations = 1; | |
1721 | iv->iv_attempts_max = UINT64_MAX; | |
4589f3ae BB |
1722 | |
1723 | if (zfs_reconstruct_indirect_combinations_max > 0) | |
1258bd77 BB |
1724 | iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max; |
1725 | ||
1726 | /* | |
1727 | * If nonzero, every 1/x blocks will be damaged, in order to validate | |
1728 | * reconstruction when there are split segments with damaged copies. | |
4a7eb69a | 1729 | * Known_good will be TRUE when reconstruction is known to be possible. |
1258bd77 BB |
1730 | */ |
1731 | if (zfs_reconstruct_indirect_damage_fraction != 0 && | |
1732 | spa_get_random(zfs_reconstruct_indirect_damage_fraction) == 0) | |
1733 | known_good = (vdev_indirect_splits_damage(iv, zio) == 0); | |
9e052db4 | 1734 | |
4589f3ae | 1735 | /* |
1258bd77 BB |
1736 | * Determine the unique children for a split segment and add them |
1737 | * to the is_unique_child list. By restricting reconstruction | |
1738 | * to these children, only unique combinations will be considered. | |
1739 | * This can vastly reduce the search space when there are a large | |
1740 | * number of indirect splits. | |
4589f3ae | 1741 | */ |
9e052db4 | 1742 | for (indirect_split_t *is = list_head(&iv->iv_splits); |
4589f3ae | 1743 | is != NULL; is = list_next(&iv->iv_splits, is)) { |
1258bd77 | 1744 | is->is_unique_children = 0; |
4589f3ae BB |
1745 | |
1746 | for (int i = 0; i < is->is_children; i++) { | |
1258bd77 BB |
1747 | indirect_child_t *ic_i = &is->is_child[i]; |
1748 | ||
1749 | if (ic_i->ic_data == NULL || | |
1750 | ic_i->ic_duplicate != NULL) | |
4589f3ae BB |
1751 | continue; |
1752 | ||
1753 | for (int j = i + 1; j < is->is_children; j++) { | |
1258bd77 BB |
1754 | indirect_child_t *ic_j = &is->is_child[j]; |
1755 | ||
1756 | if (ic_j->ic_data == NULL || | |
1757 | ic_j->ic_duplicate != NULL) | |
4589f3ae BB |
1758 | continue; |
1759 | ||
1258bd77 BB |
1760 | if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0) |
1761 | ic_j->ic_duplicate = ic_i; | |
4589f3ae BB |
1762 | } |
1763 | ||
1258bd77 BB |
1764 | is->is_unique_children++; |
1765 | list_insert_tail(&is->is_unique_child, ic_i); | |
4589f3ae BB |
1766 | } |
1767 | ||
1258bd77 BB |
1768 | /* Reconstruction is impossible, no valid children */ |
1769 | EQUIV(list_is_empty(&is->is_unique_child), | |
1770 | is->is_unique_children == 0); | |
1771 | if (list_is_empty(&is->is_unique_child)) { | |
4589f3ae BB |
1772 | zio->io_error = EIO; |
1773 | vdev_indirect_all_checksum_errors(zio); | |
1774 | zio_checksum_verified(zio); | |
1775 | return; | |
1776 | } | |
1777 | ||
1258bd77 | 1778 | iv->iv_unique_combinations *= is->is_unique_children; |
4589f3ae | 1779 | } |
9e052db4 | 1780 | |
1258bd77 BB |
1781 | if (iv->iv_unique_combinations <= iv->iv_attempts_max) |
1782 | error = vdev_indirect_splits_enumerate_all(iv, zio); | |
1783 | else | |
1784 | error = vdev_indirect_splits_enumerate_randomly(iv, zio); | |
9e052db4 | 1785 | |
1258bd77 BB |
1786 | if (error != 0) { |
1787 | /* All attempted combinations failed. */ | |
1788 | ASSERT3B(known_good, ==, B_FALSE); | |
1789 | zio->io_error = error; | |
1790 | vdev_indirect_all_checksum_errors(zio); | |
1791 | } else { | |
9e052db4 | 1792 | /* |
1258bd77 BB |
1793 | * The checksum has been successfully validated. Issue |
1794 | * repair I/Os to any copies of splits which don't match | |
1795 | * the validated version. | |
9e052db4 | 1796 | */ |
1258bd77 BB |
1797 | ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio)); |
1798 | vdev_indirect_repair(zio); | |
1799 | zio_checksum_verified(zio); | |
9e052db4 MA |
1800 | } |
1801 | } | |
1802 | ||
1803 | static void | |
1804 | vdev_indirect_io_done(zio_t *zio) | |
1805 | { | |
1806 | indirect_vsd_t *iv = zio->io_vsd; | |
1807 | ||
1808 | if (iv->iv_reconstruct) { | |
1809 | /* | |
1810 | * We have read all copies of the data (e.g. from mirrors), | |
1811 | * either because this was a scrub/resilver, or because the | |
1812 | * one-copy read didn't checksum correctly. | |
1813 | */ | |
1814 | vdev_indirect_reconstruct_io_done(zio); | |
1815 | return; | |
1816 | } | |
1817 | ||
1818 | if (!iv->iv_split_block) { | |
1819 | /* | |
1820 | * This was not a split block, so we passed the BP down, | |
1821 | * and the checksum was handled by the (one) child zio. | |
1822 | */ | |
1823 | return; | |
1824 | } | |
1825 | ||
1826 | zio_bad_cksum_t zbc; | |
1827 | int ret = zio_checksum_error(zio, &zbc); | |
1828 | if (ret == 0) { | |
1829 | zio_checksum_verified(zio); | |
1830 | return; | |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * The checksum didn't match. Read all copies of all splits, and | |
1835 | * then we will try to reconstruct. The next time | |
1836 | * vdev_indirect_io_done() is called, iv_reconstruct will be set. | |
1837 | */ | |
1838 | vdev_indirect_read_all(zio); | |
1839 | ||
1840 | zio_vdev_io_redone(zio); | |
1841 | } | |
1842 | ||
a1d477c2 MA |
1843 | vdev_ops_t vdev_indirect_ops = { |
1844 | vdev_indirect_open, | |
1845 | vdev_indirect_close, | |
1846 | vdev_default_asize, | |
1847 | vdev_indirect_io_start, | |
1848 | vdev_indirect_io_done, | |
1849 | NULL, | |
1850 | NULL, | |
1851 | NULL, | |
1852 | NULL, | |
1853 | vdev_indirect_remap, | |
619f0976 | 1854 | NULL, |
a1d477c2 MA |
1855 | VDEV_TYPE_INDIRECT, /* name of this vdev type */ |
1856 | B_FALSE /* leaf vdev */ | |
1857 | }; | |
1858 | ||
93ce2b4c | 1859 | #if defined(_KERNEL) |
a1d477c2 MA |
1860 | EXPORT_SYMBOL(rs_alloc); |
1861 | EXPORT_SYMBOL(spa_condense_fini); | |
9d5b5245 | 1862 | EXPORT_SYMBOL(spa_start_indirect_condensing_thread); |
a1d477c2 MA |
1863 | EXPORT_SYMBOL(spa_condense_indirect_start_sync); |
1864 | EXPORT_SYMBOL(spa_condense_init); | |
1865 | EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete); | |
1866 | EXPORT_SYMBOL(vdev_indirect_mark_obsolete); | |
1867 | EXPORT_SYMBOL(vdev_indirect_should_condense); | |
1868 | EXPORT_SYMBOL(vdev_indirect_sync_obsolete); | |
1869 | EXPORT_SYMBOL(vdev_obsolete_counts_are_precise); | |
1870 | EXPORT_SYMBOL(vdev_obsolete_sm_object); | |
1871 | ||
0dc2f70c MA |
1872 | module_param(zfs_condense_indirect_vdevs_enable, int, 0644); |
1873 | MODULE_PARM_DESC(zfs_condense_indirect_vdevs_enable, | |
1874 | "Whether to attempt condensing indirect vdev mappings"); | |
1875 | ||
a1d477c2 MA |
1876 | /* CSTYLED */ |
1877 | module_param(zfs_condense_min_mapping_bytes, ulong, 0644); | |
1878 | MODULE_PARM_DESC(zfs_condense_min_mapping_bytes, | |
1879 | "Minimum size of vdev mapping to condense"); | |
1880 | ||
0dc2f70c MA |
1881 | /* CSTYLED */ |
1882 | module_param(zfs_condense_max_obsolete_bytes, ulong, 0644); | |
1883 | MODULE_PARM_DESC(zfs_condense_max_obsolete_bytes, | |
1884 | "Minimum size obsolete spacemap to attempt condensing"); | |
1885 | ||
a1d477c2 MA |
1886 | module_param(zfs_condense_indirect_commit_entry_delay_ms, int, 0644); |
1887 | MODULE_PARM_DESC(zfs_condense_indirect_commit_entry_delay_ms, | |
1888 | "Delay while condensing vdev mapping"); | |
9e052db4 | 1889 | |
4589f3ae BB |
1890 | module_param(zfs_reconstruct_indirect_combinations_max, int, 0644); |
1891 | MODULE_PARM_DESC(zfs_reconstruct_indirect_combinations_max, | |
1892 | "Maximum number of combinations when reconstructing split segments"); | |
a1d477c2 | 1893 | #endif |