]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev_indirect.c
Clean up CSTYLEDs
[mirror_zfs.git] / module / zfs / vdev_indirect.c
1 /*
2 * CDDL HEADER START
3 *
4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
7 * 1.0 of the CDDL.
8 *
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
12 *
13 * CDDL HEADER END
14 */
15
16 /*
17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
18 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
19 * Copyright (c) 2014, 2020 by Delphix. All rights reserved.
20 */
21
22 #include <sys/zfs_context.h>
23 #include <sys/spa.h>
24 #include <sys/spa_impl.h>
25 #include <sys/vdev_impl.h>
26 #include <sys/fs/zfs.h>
27 #include <sys/zio.h>
28 #include <sys/zio_checksum.h>
29 #include <sys/metaslab.h>
30 #include <sys/dmu.h>
31 #include <sys/vdev_indirect_mapping.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/zap.h>
35 #include <sys/abd.h>
36 #include <sys/zthr.h>
37
38 /*
39 * An indirect vdev corresponds to a vdev that has been removed. Since
40 * we cannot rewrite block pointers of snapshots, etc., we keep a
41 * mapping from old location on the removed device to the new location
42 * on another device in the pool and use this mapping whenever we need
43 * to access the DVA. Unfortunately, this mapping did not respect
44 * logical block boundaries when it was first created, and so a DVA on
45 * this indirect vdev may be "split" into multiple sections that each
46 * map to a different location. As a consequence, not all DVAs can be
47 * translated to an equivalent new DVA. Instead we must provide a
48 * "vdev_remap" operation that executes a callback on each contiguous
49 * segment of the new location. This function is used in multiple ways:
50 *
51 * - i/os to this vdev use the callback to determine where the
52 * data is now located, and issue child i/os for each segment's new
53 * location.
54 *
55 * - frees and claims to this vdev use the callback to free or claim
56 * each mapped segment. (Note that we don't actually need to claim
57 * log blocks on indirect vdevs, because we don't allocate to
58 * removing vdevs. However, zdb uses zio_claim() for its leak
59 * detection.)
60 */
61
62 /*
63 * "Big theory statement" for how we mark blocks obsolete.
64 *
65 * When a block on an indirect vdev is freed or remapped, a section of
66 * that vdev's mapping may no longer be referenced (aka "obsolete"). We
67 * keep track of how much of each mapping entry is obsolete. When
68 * an entry becomes completely obsolete, we can remove it, thus reducing
69 * the memory used by the mapping. The complete picture of obsolescence
70 * is given by the following data structures, described below:
71 * - the entry-specific obsolete count
72 * - the vdev-specific obsolete spacemap
73 * - the pool-specific obsolete bpobj
74 *
75 * == On disk data structures used ==
76 *
77 * We track the obsolete space for the pool using several objects. Each
78 * of these objects is created on demand and freed when no longer
79 * needed, and is assumed to be empty if it does not exist.
80 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
81 *
82 * - Each vic_mapping_object (associated with an indirect vdev) can
83 * have a vimp_counts_object. This is an array of uint32_t's
84 * with the same number of entries as the vic_mapping_object. When
85 * the mapping is condensed, entries from the vic_obsolete_sm_object
86 * (see below) are folded into the counts. Therefore, each
87 * obsolete_counts entry tells us the number of bytes in the
88 * corresponding mapping entry that were not referenced when the
89 * mapping was last condensed.
90 *
91 * - Each indirect or removing vdev can have a vic_obsolete_sm_object.
92 * This is a space map containing an alloc entry for every DVA that
93 * has been obsoleted since the last time this indirect vdev was
94 * condensed. We use this object in order to improve performance
95 * when marking a DVA as obsolete. Instead of modifying an arbitrary
96 * offset of the vimp_counts_object, we only need to append an entry
97 * to the end of this object. When a DVA becomes obsolete, it is
98 * added to the obsolete space map. This happens when the DVA is
99 * freed, remapped and not referenced by a snapshot, or the last
100 * snapshot referencing it is destroyed.
101 *
102 * - Each dataset can have a ds_remap_deadlist object. This is a
103 * deadlist object containing all blocks that were remapped in this
104 * dataset but referenced in a previous snapshot. Blocks can *only*
105 * appear on this list if they were remapped (dsl_dataset_block_remapped);
106 * blocks that were killed in a head dataset are put on the normal
107 * ds_deadlist and marked obsolete when they are freed.
108 *
109 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks
110 * in the pool that need to be marked obsolete. When a snapshot is
111 * destroyed, we move some of the ds_remap_deadlist to the obsolete
112 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
113 * asynchronously process the obsolete bpobj, moving its entries to
114 * the specific vdevs' obsolete space maps.
115 *
116 * == Summary of how we mark blocks as obsolete ==
117 *
118 * - When freeing a block: if any DVA is on an indirect vdev, append to
119 * vic_obsolete_sm_object.
120 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
121 * references; otherwise append to vic_obsolete_sm_object).
122 * - When freeing a snapshot: move parts of ds_remap_deadlist to
123 * dp_obsolete_bpobj (same algorithm as ds_deadlist).
124 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
125 * individual vdev's vic_obsolete_sm_object.
126 */
127
128 /*
129 * "Big theory statement" for how we condense indirect vdevs.
130 *
131 * Condensing an indirect vdev's mapping is the process of determining
132 * the precise counts of obsolete space for each mapping entry (by
133 * integrating the obsolete spacemap into the obsolete counts) and
134 * writing out a new mapping that contains only referenced entries.
135 *
136 * We condense a vdev when we expect the mapping to shrink (see
137 * vdev_indirect_should_condense()), but only perform one condense at a
138 * time to limit the memory usage. In addition, we use a separate
139 * open-context thread (spa_condense_indirect_thread) to incrementally
140 * create the new mapping object in a way that minimizes the impact on
141 * the rest of the system.
142 *
143 * == Generating a new mapping ==
144 *
145 * To generate a new mapping, we follow these steps:
146 *
147 * 1. Save the old obsolete space map and create a new mapping object
148 * (see spa_condense_indirect_start_sync()). This initializes the
149 * spa_condensing_indirect_phys with the "previous obsolete space map",
150 * which is now read only. Newly obsolete DVAs will be added to a
151 * new (initially empty) obsolete space map, and will not be
152 * considered as part of this condense operation.
153 *
154 * 2. Construct in memory the precise counts of obsolete space for each
155 * mapping entry, by incorporating the obsolete space map into the
156 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
157 *
158 * 3. Iterate through each mapping entry, writing to the new mapping any
159 * entries that are not completely obsolete (i.e. which don't have
160 * obsolete count == mapping length). (See
161 * spa_condense_indirect_generate_new_mapping().)
162 *
163 * 4. Destroy the old mapping object and switch over to the new one
164 * (spa_condense_indirect_complete_sync).
165 *
166 * == Restarting from failure ==
167 *
168 * To restart the condense when we import/open the pool, we must start
169 * at the 2nd step above: reconstruct the precise counts in memory,
170 * based on the space map + counts. Then in the 3rd step, we start
171 * iterating where we left off: at vimp_max_offset of the new mapping
172 * object.
173 */
174
175 static int zfs_condense_indirect_vdevs_enable = B_TRUE;
176
177 /*
178 * Condense if at least this percent of the bytes in the mapping is
179 * obsolete. With the default of 25%, the amount of space mapped
180 * will be reduced to 1% of its original size after at most 16
181 * condenses. Higher values will condense less often (causing less
182 * i/o); lower values will reduce the mapping size more quickly.
183 */
184 static int zfs_condense_indirect_obsolete_pct = 25;
185
186 /*
187 * Condense if the obsolete space map takes up more than this amount of
188 * space on disk (logically). This limits the amount of disk space
189 * consumed by the obsolete space map; the default of 1GB is small enough
190 * that we typically don't mind "wasting" it.
191 */
192 static unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
193
194 /*
195 * Don't bother condensing if the mapping uses less than this amount of
196 * memory. The default of 128KB is considered a "trivial" amount of
197 * memory and not worth reducing.
198 */
199 static unsigned long zfs_condense_min_mapping_bytes = 128 * 1024;
200
201 /*
202 * This is used by the test suite so that it can ensure that certain
203 * actions happen while in the middle of a condense (which might otherwise
204 * complete too quickly). If used to reduce the performance impact of
205 * condensing in production, a maximum value of 1 should be sufficient.
206 */
207 static int zfs_condense_indirect_commit_entry_delay_ms = 0;
208
209 /*
210 * If an indirect split block contains more than this many possible unique
211 * combinations when being reconstructed, consider it too computationally
212 * expensive to check them all. Instead, try at most 100 randomly-selected
213 * combinations each time the block is accessed. This allows all segment
214 * copies to participate fairly in the reconstruction when all combinations
215 * cannot be checked and prevents repeated use of one bad copy.
216 */
217 int zfs_reconstruct_indirect_combinations_max = 4096;
218
219 /*
220 * Enable to simulate damaged segments and validate reconstruction. This
221 * is intentionally not exposed as a module parameter.
222 */
223 unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
224
225 /*
226 * The indirect_child_t represents the vdev that we will read from, when we
227 * need to read all copies of the data (e.g. for scrub or reconstruction).
228 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
229 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
230 * ic_vdev is a child of the mirror.
231 */
232 typedef struct indirect_child {
233 abd_t *ic_data;
234 vdev_t *ic_vdev;
235
236 /*
237 * ic_duplicate is NULL when the ic_data contents are unique, when it
238 * is determined to be a duplicate it references the primary child.
239 */
240 struct indirect_child *ic_duplicate;
241 list_node_t ic_node; /* node on is_unique_child */
242 int ic_error; /* set when a child does not contain the data */
243 } indirect_child_t;
244
245 /*
246 * The indirect_split_t represents one mapped segment of an i/o to the
247 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
248 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
249 * For split blocks, there will be several of these.
250 */
251 typedef struct indirect_split {
252 list_node_t is_node; /* link on iv_splits */
253
254 /*
255 * is_split_offset is the offset into the i/o.
256 * This is the sum of the previous splits' is_size's.
257 */
258 uint64_t is_split_offset;
259
260 vdev_t *is_vdev; /* top-level vdev */
261 uint64_t is_target_offset; /* offset on is_vdev */
262 uint64_t is_size;
263 int is_children; /* number of entries in is_child[] */
264 int is_unique_children; /* number of entries in is_unique_child */
265 list_t is_unique_child;
266
267 /*
268 * is_good_child is the child that we are currently using to
269 * attempt reconstruction.
270 */
271 indirect_child_t *is_good_child;
272
273 indirect_child_t is_child[1]; /* variable-length */
274 } indirect_split_t;
275
276 /*
277 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
278 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
279 */
280 typedef struct indirect_vsd {
281 boolean_t iv_split_block;
282 boolean_t iv_reconstruct;
283 uint64_t iv_unique_combinations;
284 uint64_t iv_attempts;
285 uint64_t iv_attempts_max;
286
287 list_t iv_splits; /* list of indirect_split_t's */
288 } indirect_vsd_t;
289
290 static void
291 vdev_indirect_map_free(zio_t *zio)
292 {
293 indirect_vsd_t *iv = zio->io_vsd;
294
295 indirect_split_t *is;
296 while ((is = list_head(&iv->iv_splits)) != NULL) {
297 for (int c = 0; c < is->is_children; c++) {
298 indirect_child_t *ic = &is->is_child[c];
299 if (ic->ic_data != NULL)
300 abd_free(ic->ic_data);
301 }
302 list_remove(&iv->iv_splits, is);
303
304 indirect_child_t *ic;
305 while ((ic = list_head(&is->is_unique_child)) != NULL)
306 list_remove(&is->is_unique_child, ic);
307
308 list_destroy(&is->is_unique_child);
309
310 kmem_free(is,
311 offsetof(indirect_split_t, is_child[is->is_children]));
312 }
313 kmem_free(iv, sizeof (*iv));
314 }
315
316 static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
317 .vsd_free = vdev_indirect_map_free,
318 };
319
320 /*
321 * Mark the given offset and size as being obsolete.
322 */
323 void
324 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
325 {
326 spa_t *spa = vd->vdev_spa;
327
328 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
329 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
330 ASSERT(size > 0);
331 VERIFY(vdev_indirect_mapping_entry_for_offset(
332 vd->vdev_indirect_mapping, offset) != NULL);
333
334 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
335 mutex_enter(&vd->vdev_obsolete_lock);
336 range_tree_add(vd->vdev_obsolete_segments, offset, size);
337 mutex_exit(&vd->vdev_obsolete_lock);
338 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
339 }
340 }
341
342 /*
343 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
344 * wrapper is provided because the DMU does not know about vdev_t's and
345 * cannot directly call vdev_indirect_mark_obsolete.
346 */
347 void
348 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
349 uint64_t size, dmu_tx_t *tx)
350 {
351 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
352 ASSERT(dmu_tx_is_syncing(tx));
353
354 /* The DMU can only remap indirect vdevs. */
355 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
356 vdev_indirect_mark_obsolete(vd, offset, size);
357 }
358
359 static spa_condensing_indirect_t *
360 spa_condensing_indirect_create(spa_t *spa)
361 {
362 spa_condensing_indirect_phys_t *scip =
363 &spa->spa_condensing_indirect_phys;
364 spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
365 objset_t *mos = spa->spa_meta_objset;
366
367 for (int i = 0; i < TXG_SIZE; i++) {
368 list_create(&sci->sci_new_mapping_entries[i],
369 sizeof (vdev_indirect_mapping_entry_t),
370 offsetof(vdev_indirect_mapping_entry_t, vime_node));
371 }
372
373 sci->sci_new_mapping =
374 vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
375
376 return (sci);
377 }
378
379 static void
380 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
381 {
382 for (int i = 0; i < TXG_SIZE; i++)
383 list_destroy(&sci->sci_new_mapping_entries[i]);
384
385 if (sci->sci_new_mapping != NULL)
386 vdev_indirect_mapping_close(sci->sci_new_mapping);
387
388 kmem_free(sci, sizeof (*sci));
389 }
390
391 boolean_t
392 vdev_indirect_should_condense(vdev_t *vd)
393 {
394 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
395 spa_t *spa = vd->vdev_spa;
396
397 ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
398
399 if (!zfs_condense_indirect_vdevs_enable)
400 return (B_FALSE);
401
402 /*
403 * We can only condense one indirect vdev at a time.
404 */
405 if (spa->spa_condensing_indirect != NULL)
406 return (B_FALSE);
407
408 if (spa_shutting_down(spa))
409 return (B_FALSE);
410
411 /*
412 * The mapping object size must not change while we are
413 * condensing, so we can only condense indirect vdevs
414 * (not vdevs that are still in the middle of being removed).
415 */
416 if (vd->vdev_ops != &vdev_indirect_ops)
417 return (B_FALSE);
418
419 /*
420 * If nothing new has been marked obsolete, there is no
421 * point in condensing.
422 */
423 uint64_t obsolete_sm_obj __maybe_unused;
424 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
425 if (vd->vdev_obsolete_sm == NULL) {
426 ASSERT0(obsolete_sm_obj);
427 return (B_FALSE);
428 }
429
430 ASSERT(vd->vdev_obsolete_sm != NULL);
431
432 ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
433
434 uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
435 uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
436 uint64_t mapping_size = vdev_indirect_mapping_size(vim);
437 uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
438
439 ASSERT3U(bytes_obsolete, <=, bytes_mapped);
440
441 /*
442 * If a high percentage of the bytes that are mapped have become
443 * obsolete, condense (unless the mapping is already small enough).
444 * This has a good chance of reducing the amount of memory used
445 * by the mapping.
446 */
447 if (bytes_obsolete * 100 / bytes_mapped >=
448 zfs_condense_indirect_obsolete_pct &&
449 mapping_size > zfs_condense_min_mapping_bytes) {
450 zfs_dbgmsg("should condense vdev %llu because obsolete "
451 "spacemap covers %d%% of %lluMB mapping",
452 (u_longlong_t)vd->vdev_id,
453 (int)(bytes_obsolete * 100 / bytes_mapped),
454 (u_longlong_t)bytes_mapped / 1024 / 1024);
455 return (B_TRUE);
456 }
457
458 /*
459 * If the obsolete space map takes up too much space on disk,
460 * condense in order to free up this disk space.
461 */
462 if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
463 zfs_dbgmsg("should condense vdev %llu because obsolete sm "
464 "length %lluMB >= max size %lluMB",
465 (u_longlong_t)vd->vdev_id,
466 (u_longlong_t)obsolete_sm_size / 1024 / 1024,
467 (u_longlong_t)zfs_condense_max_obsolete_bytes /
468 1024 / 1024);
469 return (B_TRUE);
470 }
471
472 return (B_FALSE);
473 }
474
475 /*
476 * This sync task completes (finishes) a condense, deleting the old
477 * mapping and replacing it with the new one.
478 */
479 static void
480 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
481 {
482 spa_condensing_indirect_t *sci = arg;
483 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
484 spa_condensing_indirect_phys_t *scip =
485 &spa->spa_condensing_indirect_phys;
486 vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
487 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
488 objset_t *mos = spa->spa_meta_objset;
489 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
490 uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
491 uint64_t new_count =
492 vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
493
494 ASSERT(dmu_tx_is_syncing(tx));
495 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
496 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
497 for (int i = 0; i < TXG_SIZE; i++) {
498 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
499 }
500 ASSERT(vic->vic_mapping_object != 0);
501 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
502 ASSERT(scip->scip_next_mapping_object != 0);
503 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
504
505 /*
506 * Reset vdev_indirect_mapping to refer to the new object.
507 */
508 rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
509 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
510 vd->vdev_indirect_mapping = sci->sci_new_mapping;
511 rw_exit(&vd->vdev_indirect_rwlock);
512
513 sci->sci_new_mapping = NULL;
514 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
515 vic->vic_mapping_object = scip->scip_next_mapping_object;
516 scip->scip_next_mapping_object = 0;
517
518 space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
519 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
520 scip->scip_prev_obsolete_sm_object = 0;
521
522 scip->scip_vdev = 0;
523
524 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
525 DMU_POOL_CONDENSING_INDIRECT, tx));
526 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
527 spa->spa_condensing_indirect = NULL;
528
529 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
530 "new mapping object %llu has %llu entries "
531 "(was %llu entries)",
532 (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
533 (u_longlong_t)vic->vic_mapping_object,
534 (u_longlong_t)new_count, (u_longlong_t)old_count);
535
536 vdev_config_dirty(spa->spa_root_vdev);
537 }
538
539 /*
540 * This sync task appends entries to the new mapping object.
541 */
542 static void
543 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
544 {
545 spa_condensing_indirect_t *sci = arg;
546 uint64_t txg = dmu_tx_get_txg(tx);
547 spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
548
549 ASSERT(dmu_tx_is_syncing(tx));
550 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
551
552 vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
553 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
554 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
555 }
556
557 /*
558 * Open-context function to add one entry to the new mapping. The new
559 * entry will be remembered and written from syncing context.
560 */
561 static void
562 spa_condense_indirect_commit_entry(spa_t *spa,
563 vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
564 {
565 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
566
567 ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
568
569 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
570 dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
571 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
572 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
573
574 /*
575 * If we are the first entry committed this txg, kick off the sync
576 * task to write to the MOS on our behalf.
577 */
578 if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
579 dsl_sync_task_nowait(dmu_tx_pool(tx),
580 spa_condense_indirect_commit_sync, sci, tx);
581 }
582
583 vdev_indirect_mapping_entry_t *vime =
584 kmem_alloc(sizeof (*vime), KM_SLEEP);
585 vime->vime_mapping = *vimep;
586 vime->vime_obsolete_count = count;
587 list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
588
589 dmu_tx_commit(tx);
590 }
591
592 static void
593 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
594 uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
595 {
596 spa_t *spa = vd->vdev_spa;
597 uint64_t mapi = start_index;
598 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
599 uint64_t old_num_entries =
600 vdev_indirect_mapping_num_entries(old_mapping);
601
602 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
603 ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
604
605 zfs_dbgmsg("starting condense of vdev %llu from index %llu",
606 (u_longlong_t)vd->vdev_id,
607 (u_longlong_t)mapi);
608
609 while (mapi < old_num_entries) {
610
611 if (zthr_iscancelled(zthr)) {
612 zfs_dbgmsg("pausing condense of vdev %llu "
613 "at index %llu", (u_longlong_t)vd->vdev_id,
614 (u_longlong_t)mapi);
615 break;
616 }
617
618 vdev_indirect_mapping_entry_phys_t *entry =
619 &old_mapping->vim_entries[mapi];
620 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
621 ASSERT3U(obsolete_counts[mapi], <=, entry_size);
622 if (obsolete_counts[mapi] < entry_size) {
623 spa_condense_indirect_commit_entry(spa, entry,
624 obsolete_counts[mapi]);
625
626 /*
627 * This delay may be requested for testing, debugging,
628 * or performance reasons.
629 */
630 hrtime_t now = gethrtime();
631 hrtime_t sleep_until = now + MSEC2NSEC(
632 zfs_condense_indirect_commit_entry_delay_ms);
633 zfs_sleep_until(sleep_until);
634 }
635
636 mapi++;
637 }
638 }
639
640 static boolean_t
641 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
642 {
643 (void) zthr;
644 spa_t *spa = arg;
645
646 return (spa->spa_condensing_indirect != NULL);
647 }
648
649 static void
650 spa_condense_indirect_thread(void *arg, zthr_t *zthr)
651 {
652 spa_t *spa = arg;
653 vdev_t *vd;
654
655 ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
656 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
657 vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
658 ASSERT3P(vd, !=, NULL);
659 spa_config_exit(spa, SCL_VDEV, FTAG);
660
661 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
662 spa_condensing_indirect_phys_t *scip =
663 &spa->spa_condensing_indirect_phys;
664 uint32_t *counts;
665 uint64_t start_index;
666 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
667 space_map_t *prev_obsolete_sm = NULL;
668
669 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
670 ASSERT(scip->scip_next_mapping_object != 0);
671 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
672 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
673
674 for (int i = 0; i < TXG_SIZE; i++) {
675 /*
676 * The list must start out empty in order for the
677 * _commit_sync() sync task to be properly registered
678 * on the first call to _commit_entry(); so it's wise
679 * to double check and ensure we actually are starting
680 * with empty lists.
681 */
682 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
683 }
684
685 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
686 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
687 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
688 if (prev_obsolete_sm != NULL) {
689 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
690 counts, prev_obsolete_sm);
691 }
692 space_map_close(prev_obsolete_sm);
693
694 /*
695 * Generate new mapping. Determine what index to continue from
696 * based on the max offset that we've already written in the
697 * new mapping.
698 */
699 uint64_t max_offset =
700 vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
701 if (max_offset == 0) {
702 /* We haven't written anything to the new mapping yet. */
703 start_index = 0;
704 } else {
705 /*
706 * Pick up from where we left off. _entry_for_offset()
707 * returns a pointer into the vim_entries array. If
708 * max_offset is greater than any of the mappings
709 * contained in the table NULL will be returned and
710 * that indicates we've exhausted our iteration of the
711 * old_mapping.
712 */
713
714 vdev_indirect_mapping_entry_phys_t *entry =
715 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
716 max_offset);
717
718 if (entry == NULL) {
719 /*
720 * We've already written the whole new mapping.
721 * This special value will cause us to skip the
722 * generate_new_mapping step and just do the sync
723 * task to complete the condense.
724 */
725 start_index = UINT64_MAX;
726 } else {
727 start_index = entry - old_mapping->vim_entries;
728 ASSERT3U(start_index, <,
729 vdev_indirect_mapping_num_entries(old_mapping));
730 }
731 }
732
733 spa_condense_indirect_generate_new_mapping(vd, counts,
734 start_index, zthr);
735
736 vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
737
738 /*
739 * If the zthr has received a cancellation signal while running
740 * in generate_new_mapping() or at any point after that, then bail
741 * early. We don't want to complete the condense if the spa is
742 * shutting down.
743 */
744 if (zthr_iscancelled(zthr))
745 return;
746
747 VERIFY0(dsl_sync_task(spa_name(spa), NULL,
748 spa_condense_indirect_complete_sync, sci, 0,
749 ZFS_SPACE_CHECK_EXTRA_RESERVED));
750 }
751
752 /*
753 * Sync task to begin the condensing process.
754 */
755 void
756 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
757 {
758 spa_t *spa = vd->vdev_spa;
759 spa_condensing_indirect_phys_t *scip =
760 &spa->spa_condensing_indirect_phys;
761
762 ASSERT0(scip->scip_next_mapping_object);
763 ASSERT0(scip->scip_prev_obsolete_sm_object);
764 ASSERT0(scip->scip_vdev);
765 ASSERT(dmu_tx_is_syncing(tx));
766 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
767 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
768 ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
769
770 uint64_t obsolete_sm_obj;
771 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
772 ASSERT3U(obsolete_sm_obj, !=, 0);
773
774 scip->scip_vdev = vd->vdev_id;
775 scip->scip_next_mapping_object =
776 vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
777
778 scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
779
780 /*
781 * We don't need to allocate a new space map object, since
782 * vdev_indirect_sync_obsolete will allocate one when needed.
783 */
784 space_map_close(vd->vdev_obsolete_sm);
785 vd->vdev_obsolete_sm = NULL;
786 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
787 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
788
789 VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
790 DMU_POOL_DIRECTORY_OBJECT,
791 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
792 sizeof (*scip) / sizeof (uint64_t), scip, tx));
793
794 ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
795 spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
796
797 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
798 "posm=%llu nm=%llu",
799 (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
800 (u_longlong_t)scip->scip_prev_obsolete_sm_object,
801 (u_longlong_t)scip->scip_next_mapping_object);
802
803 zthr_wakeup(spa->spa_condense_zthr);
804 }
805
806 /*
807 * Sync to the given vdev's obsolete space map any segments that are no longer
808 * referenced as of the given txg.
809 *
810 * If the obsolete space map doesn't exist yet, create and open it.
811 */
812 void
813 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
814 {
815 spa_t *spa = vd->vdev_spa;
816 vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
817
818 ASSERT3U(vic->vic_mapping_object, !=, 0);
819 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
820 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
821 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
822
823 uint64_t obsolete_sm_object;
824 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
825 if (obsolete_sm_object == 0) {
826 obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
827 zfs_vdev_standard_sm_blksz, tx);
828
829 ASSERT(vd->vdev_top_zap != 0);
830 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
831 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
832 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
833 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
834 ASSERT3U(obsolete_sm_object, !=, 0);
835
836 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
837 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
838 spa->spa_meta_objset, obsolete_sm_object,
839 0, vd->vdev_asize, 0));
840 }
841
842 ASSERT(vd->vdev_obsolete_sm != NULL);
843 ASSERT3U(obsolete_sm_object, ==,
844 space_map_object(vd->vdev_obsolete_sm));
845
846 space_map_write(vd->vdev_obsolete_sm,
847 vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
848 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
849 }
850
851 int
852 spa_condense_init(spa_t *spa)
853 {
854 int error = zap_lookup(spa->spa_meta_objset,
855 DMU_POOL_DIRECTORY_OBJECT,
856 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
857 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
858 &spa->spa_condensing_indirect_phys);
859 if (error == 0) {
860 if (spa_writeable(spa)) {
861 spa->spa_condensing_indirect =
862 spa_condensing_indirect_create(spa);
863 }
864 return (0);
865 } else if (error == ENOENT) {
866 return (0);
867 } else {
868 return (error);
869 }
870 }
871
872 void
873 spa_condense_fini(spa_t *spa)
874 {
875 if (spa->spa_condensing_indirect != NULL) {
876 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
877 spa->spa_condensing_indirect = NULL;
878 }
879 }
880
881 void
882 spa_start_indirect_condensing_thread(spa_t *spa)
883 {
884 ASSERT3P(spa->spa_condense_zthr, ==, NULL);
885 spa->spa_condense_zthr = zthr_create("z_indirect_condense",
886 spa_condense_indirect_thread_check,
887 spa_condense_indirect_thread, spa, minclsyspri);
888 }
889
890 /*
891 * Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj
892 * will contain either the obsolete spacemap object or zero if none exists.
893 * All other errors are returned to the caller.
894 */
895 int
896 vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
897 {
898 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
899
900 if (vd->vdev_top_zap == 0) {
901 *sm_obj = 0;
902 return (0);
903 }
904
905 int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
906 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
907 if (error == ENOENT) {
908 *sm_obj = 0;
909 error = 0;
910 }
911
912 return (error);
913 }
914
915 /*
916 * Gets the obsolete count are precise spacemap object from the vdev's ZAP.
917 * On success are_precise will be set to reflect if the counts are precise.
918 * All other errors are returned to the caller.
919 */
920 int
921 vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
922 {
923 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
924
925 if (vd->vdev_top_zap == 0) {
926 *are_precise = B_FALSE;
927 return (0);
928 }
929
930 uint64_t val = 0;
931 int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
932 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
933 if (error == 0) {
934 *are_precise = (val != 0);
935 } else if (error == ENOENT) {
936 *are_precise = B_FALSE;
937 error = 0;
938 }
939
940 return (error);
941 }
942
943 static void
944 vdev_indirect_close(vdev_t *vd)
945 {
946 (void) vd;
947 }
948
949 static int
950 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
951 uint64_t *logical_ashift, uint64_t *physical_ashift)
952 {
953 *psize = *max_psize = vd->vdev_asize +
954 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
955 *logical_ashift = vd->vdev_ashift;
956 *physical_ashift = vd->vdev_physical_ashift;
957 return (0);
958 }
959
960 typedef struct remap_segment {
961 vdev_t *rs_vd;
962 uint64_t rs_offset;
963 uint64_t rs_asize;
964 uint64_t rs_split_offset;
965 list_node_t rs_node;
966 } remap_segment_t;
967
968 static remap_segment_t *
969 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
970 {
971 remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
972 rs->rs_vd = vd;
973 rs->rs_offset = offset;
974 rs->rs_asize = asize;
975 rs->rs_split_offset = split_offset;
976 return (rs);
977 }
978
979 /*
980 * Given an indirect vdev and an extent on that vdev, it duplicates the
981 * physical entries of the indirect mapping that correspond to the extent
982 * to a new array and returns a pointer to it. In addition, copied_entries
983 * is populated with the number of mapping entries that were duplicated.
984 *
985 * Note that the function assumes that the caller holds vdev_indirect_rwlock.
986 * This ensures that the mapping won't change due to condensing as we
987 * copy over its contents.
988 *
989 * Finally, since we are doing an allocation, it is up to the caller to
990 * free the array allocated in this function.
991 */
992 static vdev_indirect_mapping_entry_phys_t *
993 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
994 uint64_t asize, uint64_t *copied_entries)
995 {
996 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
997 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
998 uint64_t entries = 0;
999
1000 ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
1001
1002 vdev_indirect_mapping_entry_phys_t *first_mapping =
1003 vdev_indirect_mapping_entry_for_offset(vim, offset);
1004 ASSERT3P(first_mapping, !=, NULL);
1005
1006 vdev_indirect_mapping_entry_phys_t *m = first_mapping;
1007 while (asize > 0) {
1008 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1009
1010 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
1011 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1012
1013 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
1014 uint64_t inner_size = MIN(asize, size - inner_offset);
1015
1016 offset += inner_size;
1017 asize -= inner_size;
1018 entries++;
1019 m++;
1020 }
1021
1022 size_t copy_length = entries * sizeof (*first_mapping);
1023 duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
1024 bcopy(first_mapping, duplicate_mappings, copy_length);
1025 *copied_entries = entries;
1026
1027 return (duplicate_mappings);
1028 }
1029
1030 /*
1031 * Goes through the relevant indirect mappings until it hits a concrete vdev
1032 * and issues the callback. On the way to the concrete vdev, if any other
1033 * indirect vdevs are encountered, then the callback will also be called on
1034 * each of those indirect vdevs. For example, if the segment is mapped to
1035 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
1036 * mapped to segment B on concrete vdev 2, then the callback will be called on
1037 * both vdev 1 and vdev 2.
1038 *
1039 * While the callback passed to vdev_indirect_remap() is called on every vdev
1040 * the function encounters, certain callbacks only care about concrete vdevs.
1041 * These types of callbacks should return immediately and explicitly when they
1042 * are called on an indirect vdev.
1043 *
1044 * Because there is a possibility that a DVA section in the indirect device
1045 * has been split into multiple sections in our mapping, we keep track
1046 * of the relevant contiguous segments of the new location (remap_segment_t)
1047 * in a stack. This way we can call the callback for each of the new sections
1048 * created by a single section of the indirect device. Note though, that in
1049 * this scenario the callbacks in each split block won't occur in-order in
1050 * terms of offset, so callers should not make any assumptions about that.
1051 *
1052 * For callbacks that don't handle split blocks and immediately return when
1053 * they encounter them (as is the case for remap_blkptr_cb), the caller can
1054 * assume that its callback will be applied from the first indirect vdev
1055 * encountered to the last one and then the concrete vdev, in that order.
1056 */
1057 static void
1058 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1059 void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1060 {
1061 list_t stack;
1062 spa_t *spa = vd->vdev_spa;
1063
1064 list_create(&stack, sizeof (remap_segment_t),
1065 offsetof(remap_segment_t, rs_node));
1066
1067 for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1068 rs != NULL; rs = list_remove_head(&stack)) {
1069 vdev_t *v = rs->rs_vd;
1070 uint64_t num_entries = 0;
1071
1072 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1073 ASSERT(rs->rs_asize > 0);
1074
1075 /*
1076 * Note: As this function can be called from open context
1077 * (e.g. zio_read()), we need the following rwlock to
1078 * prevent the mapping from being changed by condensing.
1079 *
1080 * So we grab the lock and we make a copy of the entries
1081 * that are relevant to the extent that we are working on.
1082 * Once that is done, we drop the lock and iterate over
1083 * our copy of the mapping. Once we are done with the with
1084 * the remap segment and we free it, we also free our copy
1085 * of the indirect mapping entries that are relevant to it.
1086 *
1087 * This way we don't need to wait until the function is
1088 * finished with a segment, to condense it. In addition, we
1089 * don't need a recursive rwlock for the case that a call to
1090 * vdev_indirect_remap() needs to call itself (through the
1091 * codepath of its callback) for the same vdev in the middle
1092 * of its execution.
1093 */
1094 rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1095 ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
1096
1097 vdev_indirect_mapping_entry_phys_t *mapping =
1098 vdev_indirect_mapping_duplicate_adjacent_entries(v,
1099 rs->rs_offset, rs->rs_asize, &num_entries);
1100 ASSERT3P(mapping, !=, NULL);
1101 ASSERT3U(num_entries, >, 0);
1102 rw_exit(&v->vdev_indirect_rwlock);
1103
1104 for (uint64_t i = 0; i < num_entries; i++) {
1105 /*
1106 * Note: the vdev_indirect_mapping can not change
1107 * while we are running. It only changes while the
1108 * removal is in progress, and then only from syncing
1109 * context. While a removal is in progress, this
1110 * function is only called for frees, which also only
1111 * happen from syncing context.
1112 */
1113 vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1114
1115 ASSERT3P(m, !=, NULL);
1116 ASSERT3U(rs->rs_asize, >, 0);
1117
1118 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1119 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1120 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1121
1122 ASSERT3U(rs->rs_offset, >=,
1123 DVA_MAPPING_GET_SRC_OFFSET(m));
1124 ASSERT3U(rs->rs_offset, <,
1125 DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1126 ASSERT3U(dst_vdev, !=, v->vdev_id);
1127
1128 uint64_t inner_offset = rs->rs_offset -
1129 DVA_MAPPING_GET_SRC_OFFSET(m);
1130 uint64_t inner_size =
1131 MIN(rs->rs_asize, size - inner_offset);
1132
1133 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1134 ASSERT3P(dst_v, !=, NULL);
1135
1136 if (dst_v->vdev_ops == &vdev_indirect_ops) {
1137 list_insert_head(&stack,
1138 rs_alloc(dst_v, dst_offset + inner_offset,
1139 inner_size, rs->rs_split_offset));
1140
1141 }
1142
1143 if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1144 IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1145 /*
1146 * Note: This clause exists only solely for
1147 * testing purposes. We use it to ensure that
1148 * split blocks work and that the callbacks
1149 * using them yield the same result if issued
1150 * in reverse order.
1151 */
1152 uint64_t inner_half = inner_size / 2;
1153
1154 func(rs->rs_split_offset + inner_half, dst_v,
1155 dst_offset + inner_offset + inner_half,
1156 inner_half, arg);
1157
1158 func(rs->rs_split_offset, dst_v,
1159 dst_offset + inner_offset,
1160 inner_half, arg);
1161 } else {
1162 func(rs->rs_split_offset, dst_v,
1163 dst_offset + inner_offset,
1164 inner_size, arg);
1165 }
1166
1167 rs->rs_offset += inner_size;
1168 rs->rs_asize -= inner_size;
1169 rs->rs_split_offset += inner_size;
1170 }
1171 VERIFY0(rs->rs_asize);
1172
1173 kmem_free(mapping, num_entries * sizeof (*mapping));
1174 kmem_free(rs, sizeof (remap_segment_t));
1175 }
1176 list_destroy(&stack);
1177 }
1178
1179 static void
1180 vdev_indirect_child_io_done(zio_t *zio)
1181 {
1182 zio_t *pio = zio->io_private;
1183
1184 mutex_enter(&pio->io_lock);
1185 pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1186 mutex_exit(&pio->io_lock);
1187
1188 abd_free(zio->io_abd);
1189 }
1190
1191 /*
1192 * This is a callback for vdev_indirect_remap() which allocates an
1193 * indirect_split_t for each split segment and adds it to iv_splits.
1194 */
1195 static void
1196 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1197 uint64_t size, void *arg)
1198 {
1199 zio_t *zio = arg;
1200 indirect_vsd_t *iv = zio->io_vsd;
1201
1202 ASSERT3P(vd, !=, NULL);
1203
1204 if (vd->vdev_ops == &vdev_indirect_ops)
1205 return;
1206
1207 int n = 1;
1208 if (vd->vdev_ops == &vdev_mirror_ops)
1209 n = vd->vdev_children;
1210
1211 indirect_split_t *is =
1212 kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1213
1214 is->is_children = n;
1215 is->is_size = size;
1216 is->is_split_offset = split_offset;
1217 is->is_target_offset = offset;
1218 is->is_vdev = vd;
1219 list_create(&is->is_unique_child, sizeof (indirect_child_t),
1220 offsetof(indirect_child_t, ic_node));
1221
1222 /*
1223 * Note that we only consider multiple copies of the data for
1224 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
1225 * though they use the same ops as mirror, because there's only one
1226 * "good" copy under the replacing/spare.
1227 */
1228 if (vd->vdev_ops == &vdev_mirror_ops) {
1229 for (int i = 0; i < n; i++) {
1230 is->is_child[i].ic_vdev = vd->vdev_child[i];
1231 list_link_init(&is->is_child[i].ic_node);
1232 }
1233 } else {
1234 is->is_child[0].ic_vdev = vd;
1235 }
1236
1237 list_insert_tail(&iv->iv_splits, is);
1238 }
1239
1240 static void
1241 vdev_indirect_read_split_done(zio_t *zio)
1242 {
1243 indirect_child_t *ic = zio->io_private;
1244
1245 if (zio->io_error != 0) {
1246 /*
1247 * Clear ic_data to indicate that we do not have data for this
1248 * child.
1249 */
1250 abd_free(ic->ic_data);
1251 ic->ic_data = NULL;
1252 }
1253 }
1254
1255 /*
1256 * Issue reads for all copies (mirror children) of all splits.
1257 */
1258 static void
1259 vdev_indirect_read_all(zio_t *zio)
1260 {
1261 indirect_vsd_t *iv = zio->io_vsd;
1262
1263 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1264
1265 for (indirect_split_t *is = list_head(&iv->iv_splits);
1266 is != NULL; is = list_next(&iv->iv_splits, is)) {
1267 for (int i = 0; i < is->is_children; i++) {
1268 indirect_child_t *ic = &is->is_child[i];
1269
1270 if (!vdev_readable(ic->ic_vdev))
1271 continue;
1272
1273 /*
1274 * If a child is missing the data, set ic_error. Used
1275 * in vdev_indirect_repair(). We perform the read
1276 * nevertheless which provides the opportunity to
1277 * reconstruct the split block if at all possible.
1278 */
1279 if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING,
1280 zio->io_txg, 1))
1281 ic->ic_error = SET_ERROR(ESTALE);
1282
1283 ic->ic_data = abd_alloc_sametype(zio->io_abd,
1284 is->is_size);
1285 ic->ic_duplicate = NULL;
1286
1287 zio_nowait(zio_vdev_child_io(zio, NULL,
1288 ic->ic_vdev, is->is_target_offset, ic->ic_data,
1289 is->is_size, zio->io_type, zio->io_priority, 0,
1290 vdev_indirect_read_split_done, ic));
1291 }
1292 }
1293 iv->iv_reconstruct = B_TRUE;
1294 }
1295
1296 static void
1297 vdev_indirect_io_start(zio_t *zio)
1298 {
1299 spa_t *spa __maybe_unused = zio->io_spa;
1300 indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1301 list_create(&iv->iv_splits,
1302 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1303
1304 zio->io_vsd = iv;
1305 zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1306
1307 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1308 if (zio->io_type != ZIO_TYPE_READ) {
1309 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1310 /*
1311 * Note: this code can handle other kinds of writes,
1312 * but we don't expect them.
1313 */
1314 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1315 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1316 }
1317
1318 vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1319 vdev_indirect_gather_splits, zio);
1320
1321 indirect_split_t *first = list_head(&iv->iv_splits);
1322 if (first->is_size == zio->io_size) {
1323 /*
1324 * This is not a split block; we are pointing to the entire
1325 * data, which will checksum the same as the original data.
1326 * Pass the BP down so that the child i/o can verify the
1327 * checksum, and try a different location if available
1328 * (e.g. on a mirror).
1329 *
1330 * While this special case could be handled the same as the
1331 * general (split block) case, doing it this way ensures
1332 * that the vast majority of blocks on indirect vdevs
1333 * (which are not split) are handled identically to blocks
1334 * on non-indirect vdevs. This allows us to be less strict
1335 * about performance in the general (but rare) case.
1336 */
1337 ASSERT0(first->is_split_offset);
1338 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1339 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1340 first->is_vdev, first->is_target_offset,
1341 abd_get_offset(zio->io_abd, 0),
1342 zio->io_size, zio->io_type, zio->io_priority, 0,
1343 vdev_indirect_child_io_done, zio));
1344 } else {
1345 iv->iv_split_block = B_TRUE;
1346 if (zio->io_type == ZIO_TYPE_READ &&
1347 zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1348 /*
1349 * Read all copies. Note that for simplicity,
1350 * we don't bother consulting the DTL in the
1351 * resilver case.
1352 */
1353 vdev_indirect_read_all(zio);
1354 } else {
1355 /*
1356 * If this is a read zio, we read one copy of each
1357 * split segment, from the top-level vdev. Since
1358 * we don't know the checksum of each split
1359 * individually, the child zio can't ensure that
1360 * we get the right data. E.g. if it's a mirror,
1361 * it will just read from a random (healthy) leaf
1362 * vdev. We have to verify the checksum in
1363 * vdev_indirect_io_done().
1364 *
1365 * For write zios, the vdev code will ensure we write
1366 * to all children.
1367 */
1368 for (indirect_split_t *is = list_head(&iv->iv_splits);
1369 is != NULL; is = list_next(&iv->iv_splits, is)) {
1370 zio_nowait(zio_vdev_child_io(zio, NULL,
1371 is->is_vdev, is->is_target_offset,
1372 abd_get_offset(zio->io_abd,
1373 is->is_split_offset), is->is_size,
1374 zio->io_type, zio->io_priority, 0,
1375 vdev_indirect_child_io_done, zio));
1376 }
1377
1378 }
1379 }
1380
1381 zio_execute(zio);
1382 }
1383
1384 /*
1385 * Report a checksum error for a child.
1386 */
1387 static void
1388 vdev_indirect_checksum_error(zio_t *zio,
1389 indirect_split_t *is, indirect_child_t *ic)
1390 {
1391 vdev_t *vd = ic->ic_vdev;
1392
1393 if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1394 return;
1395
1396 mutex_enter(&vd->vdev_stat_lock);
1397 vd->vdev_stat.vs_checksum_errors++;
1398 mutex_exit(&vd->vdev_stat_lock);
1399
1400 zio_bad_cksum_t zbc = {{{ 0 }}};
1401 abd_t *bad_abd = ic->ic_data;
1402 abd_t *good_abd = is->is_good_child->ic_data;
1403 (void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
1404 is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
1405 }
1406
1407 /*
1408 * Issue repair i/os for any incorrect copies. We do this by comparing
1409 * each split segment's correct data (is_good_child's ic_data) with each
1410 * other copy of the data. If they differ, then we overwrite the bad data
1411 * with the good copy. The DTL is checked in vdev_indirect_read_all() and
1412 * if a vdev is missing a copy of the data we set ic_error and the read is
1413 * performed. This provides the opportunity to reconstruct the split block
1414 * if at all possible. ic_error is checked here and if set it suppresses
1415 * incrementing the checksum counter. Aside from this DTLs are not checked,
1416 * which simplifies this code and also issues the optimal number of writes
1417 * (based on which copies actually read bad data, as opposed to which we
1418 * think might be wrong). For the same reason, we always use
1419 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1420 */
1421 static void
1422 vdev_indirect_repair(zio_t *zio)
1423 {
1424 indirect_vsd_t *iv = zio->io_vsd;
1425
1426 if (!spa_writeable(zio->io_spa))
1427 return;
1428
1429 for (indirect_split_t *is = list_head(&iv->iv_splits);
1430 is != NULL; is = list_next(&iv->iv_splits, is)) {
1431 for (int c = 0; c < is->is_children; c++) {
1432 indirect_child_t *ic = &is->is_child[c];
1433 if (ic == is->is_good_child)
1434 continue;
1435 if (ic->ic_data == NULL)
1436 continue;
1437 if (ic->ic_duplicate == is->is_good_child)
1438 continue;
1439
1440 zio_nowait(zio_vdev_child_io(zio, NULL,
1441 ic->ic_vdev, is->is_target_offset,
1442 is->is_good_child->ic_data, is->is_size,
1443 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1444 ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1445 NULL, NULL));
1446
1447 /*
1448 * If ic_error is set the current child does not have
1449 * a copy of the data, so suppress incrementing the
1450 * checksum counter.
1451 */
1452 if (ic->ic_error == ESTALE)
1453 continue;
1454
1455 vdev_indirect_checksum_error(zio, is, ic);
1456 }
1457 }
1458 }
1459
1460 /*
1461 * Report checksum errors on all children that we read from.
1462 */
1463 static void
1464 vdev_indirect_all_checksum_errors(zio_t *zio)
1465 {
1466 indirect_vsd_t *iv = zio->io_vsd;
1467
1468 if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1469 return;
1470
1471 for (indirect_split_t *is = list_head(&iv->iv_splits);
1472 is != NULL; is = list_next(&iv->iv_splits, is)) {
1473 for (int c = 0; c < is->is_children; c++) {
1474 indirect_child_t *ic = &is->is_child[c];
1475
1476 if (ic->ic_data == NULL)
1477 continue;
1478
1479 vdev_t *vd = ic->ic_vdev;
1480
1481 (void) zfs_ereport_post_checksum(zio->io_spa, vd,
1482 NULL, zio, is->is_target_offset, is->is_size,
1483 NULL, NULL, NULL);
1484 mutex_enter(&vd->vdev_stat_lock);
1485 vd->vdev_stat.vs_checksum_errors++;
1486 mutex_exit(&vd->vdev_stat_lock);
1487 }
1488 }
1489 }
1490
1491 /*
1492 * Copy data from all the splits to a main zio then validate the checksum.
1493 * If then checksum is successfully validated return success.
1494 */
1495 static int
1496 vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
1497 {
1498 zio_bad_cksum_t zbc;
1499
1500 for (indirect_split_t *is = list_head(&iv->iv_splits);
1501 is != NULL; is = list_next(&iv->iv_splits, is)) {
1502
1503 ASSERT3P(is->is_good_child->ic_data, !=, NULL);
1504 ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
1505
1506 abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
1507 is->is_split_offset, 0, is->is_size);
1508 }
1509
1510 return (zio_checksum_error(zio, &zbc));
1511 }
1512
1513 /*
1514 * There are relatively few possible combinations making it feasible to
1515 * deterministically check them all. We do this by setting the good_child
1516 * to the next unique split version. If we reach the end of the list then
1517 * "carry over" to the next unique split version (like counting in base
1518 * is_unique_children, but each digit can have a different base).
1519 */
1520 static int
1521 vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
1522 {
1523 boolean_t more = B_TRUE;
1524
1525 iv->iv_attempts = 0;
1526
1527 for (indirect_split_t *is = list_head(&iv->iv_splits);
1528 is != NULL; is = list_next(&iv->iv_splits, is))
1529 is->is_good_child = list_head(&is->is_unique_child);
1530
1531 while (more == B_TRUE) {
1532 iv->iv_attempts++;
1533 more = B_FALSE;
1534
1535 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1536 return (0);
1537
1538 for (indirect_split_t *is = list_head(&iv->iv_splits);
1539 is != NULL; is = list_next(&iv->iv_splits, is)) {
1540 is->is_good_child = list_next(&is->is_unique_child,
1541 is->is_good_child);
1542 if (is->is_good_child != NULL) {
1543 more = B_TRUE;
1544 break;
1545 }
1546
1547 is->is_good_child = list_head(&is->is_unique_child);
1548 }
1549 }
1550
1551 ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
1552
1553 return (SET_ERROR(ECKSUM));
1554 }
1555
1556 /*
1557 * There are too many combinations to try all of them in a reasonable amount
1558 * of time. So try a fixed number of random combinations from the unique
1559 * split versions, after which we'll consider the block unrecoverable.
1560 */
1561 static int
1562 vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
1563 {
1564 iv->iv_attempts = 0;
1565
1566 while (iv->iv_attempts < iv->iv_attempts_max) {
1567 iv->iv_attempts++;
1568
1569 for (indirect_split_t *is = list_head(&iv->iv_splits);
1570 is != NULL; is = list_next(&iv->iv_splits, is)) {
1571 indirect_child_t *ic = list_head(&is->is_unique_child);
1572 int children = is->is_unique_children;
1573
1574 for (int i = random_in_range(children); i > 0; i--)
1575 ic = list_next(&is->is_unique_child, ic);
1576
1577 ASSERT3P(ic, !=, NULL);
1578 is->is_good_child = ic;
1579 }
1580
1581 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1582 return (0);
1583 }
1584
1585 return (SET_ERROR(ECKSUM));
1586 }
1587
1588 /*
1589 * This is a validation function for reconstruction. It randomly selects
1590 * a good combination, if one can be found, and then it intentionally
1591 * damages all other segment copes by zeroing them. This forces the
1592 * reconstruction algorithm to locate the one remaining known good copy.
1593 */
1594 static int
1595 vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
1596 {
1597 int error;
1598
1599 /* Presume all the copies are unique for initial selection. */
1600 for (indirect_split_t *is = list_head(&iv->iv_splits);
1601 is != NULL; is = list_next(&iv->iv_splits, is)) {
1602 is->is_unique_children = 0;
1603
1604 for (int i = 0; i < is->is_children; i++) {
1605 indirect_child_t *ic = &is->is_child[i];
1606 if (ic->ic_data != NULL) {
1607 is->is_unique_children++;
1608 list_insert_tail(&is->is_unique_child, ic);
1609 }
1610 }
1611
1612 if (list_is_empty(&is->is_unique_child)) {
1613 error = SET_ERROR(EIO);
1614 goto out;
1615 }
1616 }
1617
1618 /*
1619 * Set each is_good_child to a randomly-selected child which
1620 * is known to contain validated data.
1621 */
1622 error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1623 if (error)
1624 goto out;
1625
1626 /*
1627 * Damage all but the known good copy by zeroing it. This will
1628 * result in two or less unique copies per indirect_child_t.
1629 * Both may need to be checked in order to reconstruct the block.
1630 * Set iv->iv_attempts_max such that all unique combinations will
1631 * enumerated, but limit the damage to at most 12 indirect splits.
1632 */
1633 iv->iv_attempts_max = 1;
1634
1635 for (indirect_split_t *is = list_head(&iv->iv_splits);
1636 is != NULL; is = list_next(&iv->iv_splits, is)) {
1637 for (int c = 0; c < is->is_children; c++) {
1638 indirect_child_t *ic = &is->is_child[c];
1639
1640 if (ic == is->is_good_child)
1641 continue;
1642 if (ic->ic_data == NULL)
1643 continue;
1644
1645 abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
1646 }
1647
1648 iv->iv_attempts_max *= 2;
1649 if (iv->iv_attempts_max >= (1ULL << 12)) {
1650 iv->iv_attempts_max = UINT64_MAX;
1651 break;
1652 }
1653 }
1654
1655 out:
1656 /* Empty the unique children lists so they can be reconstructed. */
1657 for (indirect_split_t *is = list_head(&iv->iv_splits);
1658 is != NULL; is = list_next(&iv->iv_splits, is)) {
1659 indirect_child_t *ic;
1660 while ((ic = list_head(&is->is_unique_child)) != NULL)
1661 list_remove(&is->is_unique_child, ic);
1662
1663 is->is_unique_children = 0;
1664 }
1665
1666 return (error);
1667 }
1668
1669 /*
1670 * This function is called when we have read all copies of the data and need
1671 * to try to find a combination of copies that gives us the right checksum.
1672 *
1673 * If we pointed to any mirror vdevs, this effectively does the job of the
1674 * mirror. The mirror vdev code can't do its own job because we don't know
1675 * the checksum of each split segment individually.
1676 *
1677 * We have to try every unique combination of copies of split segments, until
1678 * we find one that checksums correctly. Duplicate segment copies are first
1679 * identified and latter skipped during reconstruction. This optimization
1680 * reduces the search space and ensures that of the remaining combinations
1681 * at most one is correct.
1682 *
1683 * When the total number of combinations is small they can all be checked.
1684 * For example, if we have 3 segments in the split, and each points to a
1685 * 2-way mirror with unique copies, we will have the following pieces of data:
1686 *
1687 * | mirror child
1688 * split | [0] [1]
1689 * ======|=====================
1690 * A | data_A_0 data_A_1
1691 * B | data_B_0 data_B_1
1692 * C | data_C_0 data_C_1
1693 *
1694 * We will try the following (mirror children)^(number of splits) (2^3=8)
1695 * combinations, which is similar to bitwise-little-endian counting in
1696 * binary. In general each "digit" corresponds to a split segment, and the
1697 * base of each digit is is_children, which can be different for each
1698 * digit.
1699 *
1700 * "low bit" "high bit"
1701 * v v
1702 * data_A_0 data_B_0 data_C_0
1703 * data_A_1 data_B_0 data_C_0
1704 * data_A_0 data_B_1 data_C_0
1705 * data_A_1 data_B_1 data_C_0
1706 * data_A_0 data_B_0 data_C_1
1707 * data_A_1 data_B_0 data_C_1
1708 * data_A_0 data_B_1 data_C_1
1709 * data_A_1 data_B_1 data_C_1
1710 *
1711 * Note that the split segments may be on the same or different top-level
1712 * vdevs. In either case, we may need to try lots of combinations (see
1713 * zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror
1714 * has small silent errors on all of its children, we can still reconstruct
1715 * the correct data, as long as those errors are at sufficiently-separated
1716 * offsets (specifically, separated by the largest block size - default of
1717 * 128KB, but up to 16MB).
1718 */
1719 static void
1720 vdev_indirect_reconstruct_io_done(zio_t *zio)
1721 {
1722 indirect_vsd_t *iv = zio->io_vsd;
1723 boolean_t known_good = B_FALSE;
1724 int error;
1725
1726 iv->iv_unique_combinations = 1;
1727 iv->iv_attempts_max = UINT64_MAX;
1728
1729 if (zfs_reconstruct_indirect_combinations_max > 0)
1730 iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
1731
1732 /*
1733 * If nonzero, every 1/x blocks will be damaged, in order to validate
1734 * reconstruction when there are split segments with damaged copies.
1735 * Known_good will be TRUE when reconstruction is known to be possible.
1736 */
1737 if (zfs_reconstruct_indirect_damage_fraction != 0 &&
1738 random_in_range(zfs_reconstruct_indirect_damage_fraction) == 0)
1739 known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
1740
1741 /*
1742 * Determine the unique children for a split segment and add them
1743 * to the is_unique_child list. By restricting reconstruction
1744 * to these children, only unique combinations will be considered.
1745 * This can vastly reduce the search space when there are a large
1746 * number of indirect splits.
1747 */
1748 for (indirect_split_t *is = list_head(&iv->iv_splits);
1749 is != NULL; is = list_next(&iv->iv_splits, is)) {
1750 is->is_unique_children = 0;
1751
1752 for (int i = 0; i < is->is_children; i++) {
1753 indirect_child_t *ic_i = &is->is_child[i];
1754
1755 if (ic_i->ic_data == NULL ||
1756 ic_i->ic_duplicate != NULL)
1757 continue;
1758
1759 for (int j = i + 1; j < is->is_children; j++) {
1760 indirect_child_t *ic_j = &is->is_child[j];
1761
1762 if (ic_j->ic_data == NULL ||
1763 ic_j->ic_duplicate != NULL)
1764 continue;
1765
1766 if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
1767 ic_j->ic_duplicate = ic_i;
1768 }
1769
1770 is->is_unique_children++;
1771 list_insert_tail(&is->is_unique_child, ic_i);
1772 }
1773
1774 /* Reconstruction is impossible, no valid children */
1775 EQUIV(list_is_empty(&is->is_unique_child),
1776 is->is_unique_children == 0);
1777 if (list_is_empty(&is->is_unique_child)) {
1778 zio->io_error = EIO;
1779 vdev_indirect_all_checksum_errors(zio);
1780 zio_checksum_verified(zio);
1781 return;
1782 }
1783
1784 iv->iv_unique_combinations *= is->is_unique_children;
1785 }
1786
1787 if (iv->iv_unique_combinations <= iv->iv_attempts_max)
1788 error = vdev_indirect_splits_enumerate_all(iv, zio);
1789 else
1790 error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1791
1792 if (error != 0) {
1793 /* All attempted combinations failed. */
1794 ASSERT3B(known_good, ==, B_FALSE);
1795 zio->io_error = error;
1796 vdev_indirect_all_checksum_errors(zio);
1797 } else {
1798 /*
1799 * The checksum has been successfully validated. Issue
1800 * repair I/Os to any copies of splits which don't match
1801 * the validated version.
1802 */
1803 ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
1804 vdev_indirect_repair(zio);
1805 zio_checksum_verified(zio);
1806 }
1807 }
1808
1809 static void
1810 vdev_indirect_io_done(zio_t *zio)
1811 {
1812 indirect_vsd_t *iv = zio->io_vsd;
1813
1814 if (iv->iv_reconstruct) {
1815 /*
1816 * We have read all copies of the data (e.g. from mirrors),
1817 * either because this was a scrub/resilver, or because the
1818 * one-copy read didn't checksum correctly.
1819 */
1820 vdev_indirect_reconstruct_io_done(zio);
1821 return;
1822 }
1823
1824 if (!iv->iv_split_block) {
1825 /*
1826 * This was not a split block, so we passed the BP down,
1827 * and the checksum was handled by the (one) child zio.
1828 */
1829 return;
1830 }
1831
1832 zio_bad_cksum_t zbc;
1833 int ret = zio_checksum_error(zio, &zbc);
1834 if (ret == 0) {
1835 zio_checksum_verified(zio);
1836 return;
1837 }
1838
1839 /*
1840 * The checksum didn't match. Read all copies of all splits, and
1841 * then we will try to reconstruct. The next time
1842 * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1843 */
1844 vdev_indirect_read_all(zio);
1845
1846 zio_vdev_io_redone(zio);
1847 }
1848
1849 vdev_ops_t vdev_indirect_ops = {
1850 .vdev_op_init = NULL,
1851 .vdev_op_fini = NULL,
1852 .vdev_op_open = vdev_indirect_open,
1853 .vdev_op_close = vdev_indirect_close,
1854 .vdev_op_asize = vdev_default_asize,
1855 .vdev_op_min_asize = vdev_default_min_asize,
1856 .vdev_op_min_alloc = NULL,
1857 .vdev_op_io_start = vdev_indirect_io_start,
1858 .vdev_op_io_done = vdev_indirect_io_done,
1859 .vdev_op_state_change = NULL,
1860 .vdev_op_need_resilver = NULL,
1861 .vdev_op_hold = NULL,
1862 .vdev_op_rele = NULL,
1863 .vdev_op_remap = vdev_indirect_remap,
1864 .vdev_op_xlate = NULL,
1865 .vdev_op_rebuild_asize = NULL,
1866 .vdev_op_metaslab_init = NULL,
1867 .vdev_op_config_generate = NULL,
1868 .vdev_op_nparity = NULL,
1869 .vdev_op_ndisks = NULL,
1870 .vdev_op_type = VDEV_TYPE_INDIRECT, /* name of this vdev type */
1871 .vdev_op_leaf = B_FALSE /* leaf vdev */
1872 };
1873
1874 EXPORT_SYMBOL(spa_condense_fini);
1875 EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
1876 EXPORT_SYMBOL(spa_condense_indirect_start_sync);
1877 EXPORT_SYMBOL(spa_condense_init);
1878 EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
1879 EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
1880 EXPORT_SYMBOL(vdev_indirect_should_condense);
1881 EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
1882 EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
1883 EXPORT_SYMBOL(vdev_obsolete_sm_object);
1884
1885 /* BEGIN CSTYLED */
1886 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
1887 ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
1888
1889 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, INT,
1890 ZMOD_RW,
1891 "Minimum obsolete percent of bytes in the mapping "
1892 "to attempt condensing");
1893
1894 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, ULONG, ZMOD_RW,
1895 "Don't bother condensing if the mapping uses less than this amount of "
1896 "memory");
1897
1898 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, ULONG,
1899 ZMOD_RW,
1900 "Minimum size obsolete spacemap to attempt condensing");
1901
1902 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
1903 INT, ZMOD_RW,
1904 "Used by tests to ensure certain actions happen in the middle of a "
1905 "condense. A maximum value of 1 should be sufficient.");
1906
1907 ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
1908 INT, ZMOD_RW,
1909 "Maximum number of combinations when reconstructing split segments");
1910 /* END CSTYLED */