]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/vdev_mirror.c
Cleanup nits from ab7615d92
[mirror_zfs.git] / module / zfs / vdev_mirror.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
25
1bd201e7 26/*
3dfb57a3 27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
1bd201e7
CS
28 */
29
34dc7c2f
BB
30#include <sys/zfs_context.h>
31#include <sys/spa.h>
f384c045 32#include <sys/spa_impl.h>
33#include <sys/dsl_pool.h>
34#include <sys/dsl_scan.h>
34dc7c2f
BB
35#include <sys/vdev_impl.h>
36#include <sys/zio.h>
a6255b7f 37#include <sys/abd.h>
34dc7c2f
BB
38#include <sys/fs/zfs.h>
39
551905dd
GN
40/*
41 * Vdev mirror kstats
42 */
43static kstat_t *mirror_ksp = NULL;
44
45typedef struct mirror_stats {
46 kstat_named_t vdev_mirror_stat_rotating_linear;
47 kstat_named_t vdev_mirror_stat_rotating_offset;
48 kstat_named_t vdev_mirror_stat_rotating_seek;
49 kstat_named_t vdev_mirror_stat_non_rotating_linear;
50 kstat_named_t vdev_mirror_stat_non_rotating_seek;
51
52 kstat_named_t vdev_mirror_stat_preferred_found;
53 kstat_named_t vdev_mirror_stat_preferred_not_found;
54} mirror_stats_t;
55
56static mirror_stats_t mirror_stats = {
57 /* New I/O follows directly the last I/O */
58 { "rotating_linear", KSTAT_DATA_UINT64 },
59 /* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
60 { "rotating_offset", KSTAT_DATA_UINT64 },
61 /* New I/O requires random seek */
62 { "rotating_seek", KSTAT_DATA_UINT64 },
63 /* New I/O follows directly the last I/O (nonrot) */
64 { "non_rotating_linear", KSTAT_DATA_UINT64 },
65 /* New I/O requires random seek (nonrot) */
66 { "non_rotating_seek", KSTAT_DATA_UINT64 },
67 /* Preferred child vdev found */
68 { "preferred_found", KSTAT_DATA_UINT64 },
69 /* Preferred child vdev not found or equal load */
70 { "preferred_not_found", KSTAT_DATA_UINT64 },
71
72};
73
74#define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64)
75#define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val)
76#define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1)
77
78void
79vdev_mirror_stat_init(void)
80{
81 mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats",
82 "misc", KSTAT_TYPE_NAMED,
83 sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
84 if (mirror_ksp != NULL) {
85 mirror_ksp->ks_data = &mirror_stats;
86 kstat_install(mirror_ksp);
87 }
88}
89
90void
91vdev_mirror_stat_fini(void)
92{
93 if (mirror_ksp != NULL) {
94 kstat_delete(mirror_ksp);
95 mirror_ksp = NULL;
96 }
97}
98
34dc7c2f
BB
99/*
100 * Virtual device vector for mirroring.
101 */
102
103typedef struct mirror_child {
104 vdev_t *mc_vd;
105 uint64_t mc_offset;
106 int mc_error;
9f500936 107 int mc_load;
b128c09f
BB
108 uint8_t mc_tried;
109 uint8_t mc_skipped;
110 uint8_t mc_speculative;
34dc7c2f
BB
111} mirror_child_t;
112
113typedef struct mirror_map {
9f500936 114 int *mm_preferred;
115 int mm_preferred_cnt;
34dc7c2f 116 int mm_children;
f384c045 117 boolean_t mm_resilvering;
9f500936 118 boolean_t mm_root;
119 mirror_child_t mm_child[];
34dc7c2f
BB
120} mirror_map_t;
121
9f500936 122static int vdev_mirror_shift = 21;
123
556011db 124/*
9f500936 125 * The load configuration settings below are tuned by default for
126 * the case where all devices are of the same rotational type.
556011db 127 *
9f500936 128 * If there is a mixture of rotating and non-rotating media, setting
129 * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
130 * as it will direct more reads to the non-rotating vdevs which are more likely
131 * to have a higher performance.
556011db 132 */
9f500936 133
134/* Rotating media load calculation configuration. */
135static int zfs_vdev_mirror_rotating_inc = 0;
136static int zfs_vdev_mirror_rotating_seek_inc = 5;
137static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
138
139/* Non-rotating media load calculation configuration. */
140static int zfs_vdev_mirror_non_rotating_inc = 0;
141static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
142
143static inline size_t
144vdev_mirror_map_size(int children)
145{
146 return (offsetof(mirror_map_t, mm_child[children]) +
147 sizeof (int) * children);
148}
149
150static inline mirror_map_t *
f384c045 151vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root)
9f500936 152{
153 mirror_map_t *mm;
154
155 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
156 mm->mm_children = children;
f384c045 157 mm->mm_resilvering = resilvering;
9f500936 158 mm->mm_root = root;
159 mm->mm_preferred = (int *)((uintptr_t)mm +
160 offsetof(mirror_map_t, mm_child[children]));
161
162 return (mm);
163}
34dc7c2f 164
b128c09f
BB
165static void
166vdev_mirror_map_free(zio_t *zio)
167{
168 mirror_map_t *mm = zio->io_vsd;
169
9f500936 170 kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
b128c09f
BB
171}
172
428870ff 173static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
56d8d8ac
MW
174 .vsd_free = vdev_mirror_map_free,
175 .vsd_cksum_report = zio_vsd_default_cksum_report
428870ff
BB
176};
177
556011db 178static int
9f500936 179vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
556011db 180{
d6c6590c
GN
181 uint64_t last_offset;
182 int64_t offset_diff;
9f500936 183 int load;
184
185 /* All DVAs have equal weight at the root. */
186 if (mm->mm_root)
187 return (INT_MAX);
188
189 /*
190 * We don't return INT_MAX if the device is resilvering i.e.
191 * vdev_resilver_txg != 0 as when tested performance was slightly
192 * worse overall when resilvering with compared to without.
193 */
194
d6c6590c
GN
195 /* Fix zio_offset for leaf vdevs */
196 if (vd->vdev_ops->vdev_op_leaf)
197 zio_offset += VDEV_LABEL_START_SIZE;
198
9f500936 199 /* Standard load based on pending queue length. */
200 load = vdev_queue_length(vd);
d6c6590c 201 last_offset = vdev_queue_last_offset(vd);
9f500936 202
203 if (vd->vdev_nonrot) {
204 /* Non-rotating media. */
551905dd
GN
205 if (last_offset == zio_offset) {
206 MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear);
9f500936 207 return (load + zfs_vdev_mirror_non_rotating_inc);
551905dd 208 }
9f500936 209
210 /*
211 * Apply a seek penalty even for non-rotating devices as
212 * sequential I/O's can be aggregated into fewer operations on
213 * the device, thus avoiding unnecessary per-command overhead
214 * and boosting performance.
215 */
551905dd 216 MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek);
9f500936 217 return (load + zfs_vdev_mirror_non_rotating_seek_inc);
218 }
219
220 /* Rotating media I/O's which directly follow the last I/O. */
551905dd
GN
221 if (last_offset == zio_offset) {
222 MIRROR_BUMP(vdev_mirror_stat_rotating_linear);
9f500936 223 return (load + zfs_vdev_mirror_rotating_inc);
551905dd 224 }
9f500936 225
226 /*
227 * Apply half the seek increment to I/O's within seek offset
d6c6590c 228 * of the last I/O issued to this vdev as they should incur less
9f500936 229 * of a seek increment.
230 */
d6c6590c 231 offset_diff = (int64_t)(last_offset - zio_offset);
551905dd
GN
232 if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) {
233 MIRROR_BUMP(vdev_mirror_stat_rotating_offset);
9f500936 234 return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
551905dd 235 }
9f500936 236
237 /* Apply the full seek increment to all other I/O's. */
551905dd 238 MIRROR_BUMP(vdev_mirror_stat_rotating_seek);
9f500936 239 return (load + zfs_vdev_mirror_rotating_seek_inc);
556011db
BB
240}
241
a1687880
BB
242/*
243 * Avoid inlining the function to keep vdev_mirror_io_start(), which
244 * is this functions only caller, as small as possible on the stack.
245 */
246noinline static mirror_map_t *
9f500936 247vdev_mirror_map_init(zio_t *zio)
34dc7c2f
BB
248{
249 mirror_map_t *mm = NULL;
250 mirror_child_t *mc;
251 vdev_t *vd = zio->io_vd;
9f500936 252 int c;
34dc7c2f
BB
253
254 if (vd == NULL) {
255 dva_t *dva = zio->io_bp->blk_dva;
256 spa_t *spa = zio->io_spa;
ab7615d9 257 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
6cb8e530 258 dva_t dva_copy[SPA_DVAS_PER_BP];
34dc7c2f 259
ab7615d9
TC
260 /*
261 * The sequential scrub code sorts and issues all DVAs
262 * of a bp separately. Each of these IOs includes all
263 * original DVA copies so that repairs can be performed
264 * in the event of an error, but we only actually want
265 * to check the first DVA since the others will be
266 * checked by their respective sorted IOs. Only if we
267 * hit an error will we try all DVAs upon retrying.
268 *
269 * Note: This check is safe even if the user switches
270 * from a legacy scrub to a sequential one in the middle
271 * of processing, since scn_is_sorted isn't updated until
272 * all outstanding IOs from the previous scrub pass
273 * complete.
274 */
275 if ((zio->io_flags & ZIO_FLAG_SCRUB) &&
276 !(zio->io_flags & ZIO_FLAG_IO_RETRY) &&
277 dsl_scan_scrubbing(spa->spa_dsl_pool) &&
278 scn->scn_is_sorted) {
279 c = 1;
280 } else {
281 c = BP_GET_NDVAS(zio->io_bp);
282 }
283
6cb8e530
PZ
284 /*
285 * If we do not trust the pool config, some DVAs might be
286 * invalid or point to vdevs that do not exist. We skip them.
287 */
288 if (!spa_trust_config(spa)) {
289 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
290 int j = 0;
291 for (int i = 0; i < c; i++) {
292 if (zfs_dva_valid(spa, &dva[i], zio->io_bp))
293 dva_copy[j++] = dva[i];
294 }
295 if (j == 0) {
296 zio->io_vsd = NULL;
297 zio->io_error = ENXIO;
298 return (NULL);
299 }
300 if (j < c) {
301 dva = dva_copy;
302 c = j;
303 }
304 }
305
306 mm = vdev_mirror_map_alloc(c, B_FALSE, B_TRUE);
34dc7c2f
BB
307 for (c = 0; c < mm->mm_children; c++) {
308 mc = &mm->mm_child[c];
309
310 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
311 mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
312 }
313 } else {
f384c045 314 /*
315 * If we are resilvering, then we should handle scrub reads
316 * differently; we shouldn't issue them to the resilvering
317 * device because it might not have those blocks.
318 *
319 * We are resilvering iff:
320 * 1) We are a replacing vdev (ie our name is "replacing-1" or
321 * "spare-1" or something like that), and
322 * 2) The pool is currently being resilvered.
323 *
324 * We cannot simply check vd->vdev_resilver_txg, because it's
325 * not set in this path.
326 *
327 * Nor can we just check our vdev_ops; there are cases (such as
328 * when a user types "zpool replace pool odev spare_dev" and
329 * spare_dev is in the spare list, or when a spare device is
330 * automatically used to replace a DEGRADED device) when
331 * resilvering is complete but both the original vdev and the
332 * spare vdev remain in the pool. That behavior is intentional.
333 * It helps implement the policy that a spare should be
334 * automatically removed from the pool after the user replaces
335 * the device that originally failed.
336 *
337 * If a spa load is in progress, then spa_dsl_pool may be
338 * uninitialized. But we shouldn't be resilvering during a spa
339 * load anyway.
340 */
341 boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops ||
342 vd->vdev_ops == &vdev_spare_ops) &&
343 spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE &&
344 dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool);
345 mm = vdev_mirror_map_alloc(vd->vdev_children, replacing,
346 B_FALSE);
34dc7c2f
BB
347 for (c = 0; c < mm->mm_children; c++) {
348 mc = &mm->mm_child[c];
349 mc->mc_vd = vd->vdev_child[c];
350 mc->mc_offset = zio->io_offset;
351 }
352 }
353
354 zio->io_vsd = mm;
428870ff 355 zio->io_vsd_ops = &vdev_mirror_vsd_ops;
34dc7c2f
BB
356 return (mm);
357}
358
34dc7c2f 359static int
1bd201e7
CS
360vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
361 uint64_t *ashift)
34dc7c2f 362{
34dc7c2f 363 int numerrors = 0;
45d1cae3 364 int lasterror = 0;
34dc7c2f
BB
365
366 if (vd->vdev_children == 0) {
367 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
2e528b49 368 return (SET_ERROR(EINVAL));
34dc7c2f
BB
369 }
370
45d1cae3 371 vdev_open_children(vd);
34dc7c2f 372
1c27024e 373 for (int c = 0; c < vd->vdev_children; c++) {
45d1cae3
BB
374 vdev_t *cvd = vd->vdev_child[c];
375
376 if (cvd->vdev_open_error) {
377 lasterror = cvd->vdev_open_error;
34dc7c2f
BB
378 numerrors++;
379 continue;
380 }
381
382 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
1bd201e7 383 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
34dc7c2f
BB
384 *ashift = MAX(*ashift, cvd->vdev_ashift);
385 }
386
387 if (numerrors == vd->vdev_children) {
6cb8e530
PZ
388 if (vdev_children_are_offline(vd))
389 vd->vdev_stat.vs_aux = VDEV_AUX_CHILDREN_OFFLINE;
390 else
391 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
34dc7c2f
BB
392 return (lasterror);
393 }
394
395 return (0);
396}
397
398static void
399vdev_mirror_close(vdev_t *vd)
400{
1c27024e 401 for (int c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
402 vdev_close(vd->vdev_child[c]);
403}
404
405static void
406vdev_mirror_child_done(zio_t *zio)
407{
408 mirror_child_t *mc = zio->io_private;
409
410 mc->mc_error = zio->io_error;
411 mc->mc_tried = 1;
412 mc->mc_skipped = 0;
413}
414
415static void
416vdev_mirror_scrub_done(zio_t *zio)
417{
418 mirror_child_t *mc = zio->io_private;
419
420 if (zio->io_error == 0) {
d164b209 421 zio_t *pio;
3dfb57a3 422 zio_link_t *zl = NULL;
d164b209
BB
423
424 mutex_enter(&zio->io_lock);
3dfb57a3 425 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
d164b209
BB
426 mutex_enter(&pio->io_lock);
427 ASSERT3U(zio->io_size, >=, pio->io_size);
a6255b7f 428 abd_copy(pio->io_abd, zio->io_abd, pio->io_size);
d164b209
BB
429 mutex_exit(&pio->io_lock);
430 }
431 mutex_exit(&zio->io_lock);
34dc7c2f
BB
432 }
433
a6255b7f 434 abd_free(zio->io_abd);
34dc7c2f
BB
435
436 mc->mc_error = zio->io_error;
437 mc->mc_tried = 1;
438 mc->mc_skipped = 0;
439}
440
34dc7c2f 441/*
9f500936 442 * Check the other, lower-index DVAs to see if they're on the same
443 * vdev as the child we picked. If they are, use them since they
444 * are likely to have been allocated from the primary metaslab in
445 * use at the time, and hence are more likely to have locality with
446 * single-copy data.
447 */
448static int
449vdev_mirror_dva_select(zio_t *zio, int p)
450{
451 dva_t *dva = zio->io_bp->blk_dva;
452 mirror_map_t *mm = zio->io_vsd;
453 int preferred;
454 int c;
455
456 preferred = mm->mm_preferred[p];
457 for (p--; p >= 0; p--) {
458 c = mm->mm_preferred[p];
459 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
460 preferred = c;
461 }
462 return (preferred);
463}
464
465static int
466vdev_mirror_preferred_child_randomize(zio_t *zio)
467{
468 mirror_map_t *mm = zio->io_vsd;
469 int p;
470
471 if (mm->mm_root) {
472 p = spa_get_random(mm->mm_preferred_cnt);
473 return (vdev_mirror_dva_select(zio, p));
474 }
475
476 /*
477 * To ensure we don't always favour the first matching vdev,
478 * which could lead to wear leveling issues on SSD's, we
479 * use the I/O offset as a pseudo random seed into the vdevs
480 * which have the lowest load.
481 */
482 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
483 return (mm->mm_preferred[p]);
484}
485
486/*
487 * Try to find a vdev whose DTL doesn't contain the block we want to read
488 * prefering vdevs based on determined load.
489 *
34dc7c2f
BB
490 * Try to find a child whose DTL doesn't contain the block we want to read.
491 * If we can't, try the read on any vdev we haven't already tried.
492 */
493static int
494vdev_mirror_child_select(zio_t *zio)
495{
496 mirror_map_t *mm = zio->io_vsd;
34dc7c2f 497 uint64_t txg = zio->io_txg;
9f500936 498 int c, lowest_load;
34dc7c2f 499
428870ff 500 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
34dc7c2f 501
9f500936 502 lowest_load = INT_MAX;
503 mm->mm_preferred_cnt = 0;
504 for (c = 0; c < mm->mm_children; c++) {
505 mirror_child_t *mc;
506
34dc7c2f
BB
507 mc = &mm->mm_child[c];
508 if (mc->mc_tried || mc->mc_skipped)
509 continue;
9f500936 510
33074f22 511 if (mc->mc_vd == NULL || !vdev_readable(mc->mc_vd)) {
2e528b49 512 mc->mc_error = SET_ERROR(ENXIO);
34dc7c2f
BB
513 mc->mc_tried = 1; /* don't even try */
514 mc->mc_skipped = 1;
515 continue;
516 }
9f500936 517
518 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
519 mc->mc_error = SET_ERROR(ESTALE);
520 mc->mc_skipped = 1;
521 mc->mc_speculative = 1;
522 continue;
523 }
524
525 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
526 if (mc->mc_load > lowest_load)
527 continue;
528
529 if (mc->mc_load < lowest_load) {
530 lowest_load = mc->mc_load;
531 mm->mm_preferred_cnt = 0;
532 }
533 mm->mm_preferred[mm->mm_preferred_cnt] = c;
534 mm->mm_preferred_cnt++;
535 }
536
551905dd
GN
537 if (mm->mm_preferred_cnt == 1) {
538 MIRROR_BUMP(vdev_mirror_stat_preferred_found);
9f500936 539 return (mm->mm_preferred[0]);
551905dd 540 }
9f500936 541
551905dd
GN
542 if (mm->mm_preferred_cnt > 1) {
543 MIRROR_BUMP(vdev_mirror_stat_preferred_not_found);
d6c6590c 544 return (vdev_mirror_preferred_child_randomize(zio));
551905dd 545 }
34dc7c2f
BB
546
547 /*
548 * Every device is either missing or has this txg in its DTL.
549 * Look for any child we haven't already tried before giving up.
550 */
9f500936 551 for (c = 0; c < mm->mm_children; c++) {
d6c6590c 552 if (!mm->mm_child[c].mc_tried)
34dc7c2f 553 return (c);
9f500936 554 }
34dc7c2f
BB
555
556 /*
557 * Every child failed. There's no place left to look.
558 */
559 return (-1);
560}
561
98b25418 562static void
34dc7c2f
BB
563vdev_mirror_io_start(zio_t *zio)
564{
565 mirror_map_t *mm;
566 mirror_child_t *mc;
567 int c, children;
568
9f500936 569 mm = vdev_mirror_map_init(zio);
34dc7c2f 570
6cb8e530
PZ
571 if (mm == NULL) {
572 ASSERT(!spa_trust_config(zio->io_spa));
573 ASSERT(zio->io_type == ZIO_TYPE_READ);
574 zio_execute(zio);
575 return;
576 }
577
34dc7c2f 578 if (zio->io_type == ZIO_TYPE_READ) {
9e052db4 579 if (zio->io_bp != NULL &&
f384c045 580 (zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering) {
34dc7c2f 581 /*
9e052db4
MA
582 * For scrubbing reads (if we can verify the
583 * checksum here, as indicated by io_bp being
584 * non-NULL) we need to allocate a read buffer for
585 * each child and issue reads to all children. If
586 * any child succeeds, it will copy its data into
587 * zio->io_data in vdev_mirror_scrub_done.
34dc7c2f
BB
588 */
589 for (c = 0; c < mm->mm_children; c++) {
590 mc = &mm->mm_child[c];
591 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
592 mc->mc_vd, mc->mc_offset,
a6255b7f
DQ
593 abd_alloc_sametype(zio->io_abd,
594 zio->io_size), zio->io_size,
b128c09f 595 zio->io_type, zio->io_priority, 0,
34dc7c2f
BB
596 vdev_mirror_scrub_done, mc));
597 }
98b25418
GW
598 zio_execute(zio);
599 return;
34dc7c2f
BB
600 }
601 /*
602 * For normal reads just pick one child.
603 */
604 c = vdev_mirror_child_select(zio);
605 children = (c >= 0);
606 } else {
607 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
608
609 /*
fb5f0bc8 610 * Writes go to all children.
34dc7c2f 611 */
fb5f0bc8
BB
612 c = 0;
613 children = mm->mm_children;
34dc7c2f
BB
614 }
615
616 while (children--) {
617 mc = &mm->mm_child[c];
618 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
a6255b7f 619 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
b128c09f
BB
620 zio->io_type, zio->io_priority, 0,
621 vdev_mirror_child_done, mc));
34dc7c2f
BB
622 c++;
623 }
624
98b25418 625 zio_execute(zio);
34dc7c2f
BB
626}
627
628static int
b128c09f
BB
629vdev_mirror_worst_error(mirror_map_t *mm)
630{
1c27024e 631 int error[2] = { 0, 0 };
b128c09f 632
1c27024e 633 for (int c = 0; c < mm->mm_children; c++) {
b128c09f
BB
634 mirror_child_t *mc = &mm->mm_child[c];
635 int s = mc->mc_speculative;
636 error[s] = zio_worst_error(error[s], mc->mc_error);
637 }
638
639 return (error[0] ? error[0] : error[1]);
640}
641
642static void
34dc7c2f
BB
643vdev_mirror_io_done(zio_t *zio)
644{
645 mirror_map_t *mm = zio->io_vsd;
646 mirror_child_t *mc;
647 int c;
648 int good_copies = 0;
649 int unexpected_errors = 0;
650
6cb8e530
PZ
651 if (mm == NULL)
652 return;
653
34dc7c2f
BB
654 for (c = 0; c < mm->mm_children; c++) {
655 mc = &mm->mm_child[c];
656
34dc7c2f 657 if (mc->mc_error) {
34dc7c2f
BB
658 if (!mc->mc_skipped)
659 unexpected_errors++;
b128c09f
BB
660 } else if (mc->mc_tried) {
661 good_copies++;
34dc7c2f
BB
662 }
663 }
664
665 if (zio->io_type == ZIO_TYPE_WRITE) {
666 /*
667 * XXX -- for now, treat partial writes as success.
b128c09f
BB
668 *
669 * Now that we support write reallocation, it would be better
670 * to treat partial failure as real failure unless there are
671 * no non-degraded top-level vdevs left, and not update DTLs
672 * if we intend to reallocate.
34dc7c2f
BB
673 */
674 /* XXPOLICY */
b128c09f
BB
675 if (good_copies != mm->mm_children) {
676 /*
677 * Always require at least one good copy.
678 *
679 * For ditto blocks (io_vd == NULL), require
680 * all copies to be good.
681 *
682 * XXX -- for replacing vdevs, there's no great answer.
683 * If the old device is really dead, we may not even
684 * be able to access it -- so we only want to
685 * require good writes to the new device. But if
686 * the new device turns out to be flaky, we want
687 * to be able to detach it -- which requires all
688 * writes to the old device to have succeeded.
689 */
690 if (good_copies == 0 || zio->io_vd == NULL)
691 zio->io_error = vdev_mirror_worst_error(mm);
692 }
693 return;
34dc7c2f
BB
694 }
695
696 ASSERT(zio->io_type == ZIO_TYPE_READ);
697
698 /*
699 * If we don't have a good copy yet, keep trying other children.
700 */
701 /* XXPOLICY */
702 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
703 ASSERT(c >= 0 && c < mm->mm_children);
704 mc = &mm->mm_child[c];
34dc7c2f
BB
705 zio_vdev_io_redone(zio);
706 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
a6255b7f 707 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
b128c09f 708 ZIO_TYPE_READ, zio->io_priority, 0,
34dc7c2f 709 vdev_mirror_child_done, mc));
b128c09f 710 return;
34dc7c2f
BB
711 }
712
713 /* XXPOLICY */
b128c09f
BB
714 if (good_copies == 0) {
715 zio->io_error = vdev_mirror_worst_error(mm);
34dc7c2f 716 ASSERT(zio->io_error != 0);
b128c09f 717 }
34dc7c2f 718
fb5f0bc8 719 if (good_copies && spa_writeable(zio->io_spa) &&
34dc7c2f
BB
720 (unexpected_errors ||
721 (zio->io_flags & ZIO_FLAG_RESILVER) ||
f384c045 722 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) {
34dc7c2f
BB
723 /*
724 * Use the good data we have in hand to repair damaged children.
34dc7c2f 725 */
34dc7c2f
BB
726 for (c = 0; c < mm->mm_children; c++) {
727 /*
728 * Don't rewrite known good children.
729 * Not only is it unnecessary, it could
730 * actually be harmful: if the system lost
731 * power while rewriting the only good copy,
732 * there would be no good copies left!
733 */
734 mc = &mm->mm_child[c];
735
736 if (mc->mc_error == 0) {
737 if (mc->mc_tried)
738 continue;
9e052db4
MA
739 /*
740 * We didn't try this child. We need to
741 * repair it if:
742 * 1. it's a scrub (in which case we have
743 * tried everything that was healthy)
744 * - or -
745 * 2. it's an indirect vdev (in which case
746 * it could point to any other vdev, which
747 * might have a bad DTL)
748 * - or -
749 * 3. the DTL indicates that this data is
750 * missing from this vdev
751 */
34dc7c2f 752 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
9e052db4 753 mc->mc_vd->vdev_ops != &vdev_indirect_ops &&
fb5f0bc8 754 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
34dc7c2f
BB
755 zio->io_txg, 1))
756 continue;
2e528b49 757 mc->mc_error = SET_ERROR(ESTALE);
34dc7c2f
BB
758 }
759
b128c09f
BB
760 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
761 mc->mc_vd, mc->mc_offset,
a6255b7f 762 zio->io_abd, zio->io_size,
e8b96c60 763 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
fb5f0bc8
BB
764 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
765 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
34dc7c2f 766 }
34dc7c2f 767 }
34dc7c2f
BB
768}
769
770static void
771vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
772{
6cb8e530
PZ
773 if (faulted == vd->vdev_children) {
774 if (vdev_children_are_offline(vd)) {
775 vdev_set_state(vd, B_FALSE, VDEV_STATE_OFFLINE,
776 VDEV_AUX_CHILDREN_OFFLINE);
777 } else {
778 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
779 VDEV_AUX_NO_REPLICAS);
780 }
781 } else if (degraded + faulted != 0) {
34dc7c2f 782 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
6cb8e530 783 } else {
34dc7c2f 784 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
6cb8e530 785 }
34dc7c2f
BB
786}
787
788vdev_ops_t vdev_mirror_ops = {
789 vdev_mirror_open,
790 vdev_mirror_close,
34dc7c2f
BB
791 vdev_default_asize,
792 vdev_mirror_io_start,
793 vdev_mirror_io_done,
794 vdev_mirror_state_change,
428870ff
BB
795 NULL,
796 NULL,
3d6da72d 797 NULL,
a1d477c2 798 NULL,
619f0976 799 vdev_default_xlate,
34dc7c2f
BB
800 VDEV_TYPE_MIRROR, /* name of this vdev type */
801 B_FALSE /* not a leaf vdev */
802};
803
804vdev_ops_t vdev_replacing_ops = {
805 vdev_mirror_open,
806 vdev_mirror_close,
34dc7c2f
BB
807 vdev_default_asize,
808 vdev_mirror_io_start,
809 vdev_mirror_io_done,
810 vdev_mirror_state_change,
428870ff
BB
811 NULL,
812 NULL,
3d6da72d 813 NULL,
a1d477c2 814 NULL,
619f0976 815 vdev_default_xlate,
34dc7c2f
BB
816 VDEV_TYPE_REPLACING, /* name of this vdev type */
817 B_FALSE /* not a leaf vdev */
818};
819
820vdev_ops_t vdev_spare_ops = {
821 vdev_mirror_open,
822 vdev_mirror_close,
34dc7c2f
BB
823 vdev_default_asize,
824 vdev_mirror_io_start,
825 vdev_mirror_io_done,
826 vdev_mirror_state_change,
428870ff
BB
827 NULL,
828 NULL,
3d6da72d 829 NULL,
a1d477c2 830 NULL,
619f0976 831 vdev_default_xlate,
34dc7c2f
BB
832 VDEV_TYPE_SPARE, /* name of this vdev type */
833 B_FALSE /* not a leaf vdev */
834};
556011db 835
93ce2b4c 836#if defined(_KERNEL)
4ea3f864 837/* BEGIN CSTYLED */
9f500936 838module_param(zfs_vdev_mirror_rotating_inc, int, 0644);
839MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc,
840 "Rotating media load increment for non-seeking I/O's");
841
842module_param(zfs_vdev_mirror_rotating_seek_inc, int, 0644);
843MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_inc,
844 "Rotating media load increment for seeking I/O's");
845
846module_param(zfs_vdev_mirror_rotating_seek_offset, int, 0644);
4ea3f864 847
9f500936 848MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset,
849 "Offset in bytes from the last I/O which "
850 "triggers a reduced rotating media seek increment");
851
852module_param(zfs_vdev_mirror_non_rotating_inc, int, 0644);
853MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_inc,
854 "Non-rotating media load increment for non-seeking I/O's");
855
856module_param(zfs_vdev_mirror_non_rotating_seek_inc, int, 0644);
857MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc,
858 "Non-rotating media load increment for seeking I/O's");
4ea3f864 859/* END CSTYLED */
556011db 860#endif