4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/spa_impl.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_scan.h>
35 #include <sys/vdev_impl.h>
38 #include <sys/fs/zfs.h>
43 static kstat_t
*mirror_ksp
= NULL
;
45 typedef struct mirror_stats
{
46 kstat_named_t vdev_mirror_stat_rotating_linear
;
47 kstat_named_t vdev_mirror_stat_rotating_offset
;
48 kstat_named_t vdev_mirror_stat_rotating_seek
;
49 kstat_named_t vdev_mirror_stat_non_rotating_linear
;
50 kstat_named_t vdev_mirror_stat_non_rotating_seek
;
52 kstat_named_t vdev_mirror_stat_preferred_found
;
53 kstat_named_t vdev_mirror_stat_preferred_not_found
;
56 static mirror_stats_t mirror_stats
= {
57 /* New I/O follows directly the last I/O */
58 { "rotating_linear", KSTAT_DATA_UINT64
},
59 /* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
60 { "rotating_offset", KSTAT_DATA_UINT64
},
61 /* New I/O requires random seek */
62 { "rotating_seek", KSTAT_DATA_UINT64
},
63 /* New I/O follows directly the last I/O (nonrot) */
64 { "non_rotating_linear", KSTAT_DATA_UINT64
},
65 /* New I/O requires random seek (nonrot) */
66 { "non_rotating_seek", KSTAT_DATA_UINT64
},
67 /* Preferred child vdev found */
68 { "preferred_found", KSTAT_DATA_UINT64
},
69 /* Preferred child vdev not found or equal load */
70 { "preferred_not_found", KSTAT_DATA_UINT64
},
74 #define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64)
75 #define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val)
76 #define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1)
79 vdev_mirror_stat_init(void)
81 mirror_ksp
= kstat_create("zfs", 0, "vdev_mirror_stats",
82 "misc", KSTAT_TYPE_NAMED
,
83 sizeof (mirror_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
84 if (mirror_ksp
!= NULL
) {
85 mirror_ksp
->ks_data
= &mirror_stats
;
86 kstat_install(mirror_ksp
);
91 vdev_mirror_stat_fini(void)
93 if (mirror_ksp
!= NULL
) {
94 kstat_delete(mirror_ksp
);
100 * Virtual device vector for mirroring.
103 typedef struct mirror_child
{
110 uint8_t mc_speculative
;
113 typedef struct mirror_map
{
115 int mm_preferred_cnt
;
117 boolean_t mm_resilvering
;
119 mirror_child_t mm_child
[];
122 static int vdev_mirror_shift
= 21;
125 * The load configuration settings below are tuned by default for
126 * the case where all devices are of the same rotational type.
128 * If there is a mixture of rotating and non-rotating media, setting
129 * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
130 * as it will direct more reads to the non-rotating vdevs which are more likely
131 * to have a higher performance.
134 /* Rotating media load calculation configuration. */
135 static int zfs_vdev_mirror_rotating_inc
= 0;
136 static int zfs_vdev_mirror_rotating_seek_inc
= 5;
137 static int zfs_vdev_mirror_rotating_seek_offset
= 1 * 1024 * 1024;
139 /* Non-rotating media load calculation configuration. */
140 static int zfs_vdev_mirror_non_rotating_inc
= 0;
141 static int zfs_vdev_mirror_non_rotating_seek_inc
= 1;
144 vdev_mirror_map_size(int children
)
146 return (offsetof(mirror_map_t
, mm_child
[children
]) +
147 sizeof (int) * children
);
150 static inline mirror_map_t
*
151 vdev_mirror_map_alloc(int children
, boolean_t resilvering
, boolean_t root
)
155 mm
= kmem_zalloc(vdev_mirror_map_size(children
), KM_SLEEP
);
156 mm
->mm_children
= children
;
157 mm
->mm_resilvering
= resilvering
;
159 mm
->mm_preferred
= (int *)((uintptr_t)mm
+
160 offsetof(mirror_map_t
, mm_child
[children
]));
166 vdev_mirror_map_free(zio_t
*zio
)
168 mirror_map_t
*mm
= zio
->io_vsd
;
170 kmem_free(mm
, vdev_mirror_map_size(mm
->mm_children
));
173 static const zio_vsd_ops_t vdev_mirror_vsd_ops
= {
174 .vsd_free
= vdev_mirror_map_free
,
175 .vsd_cksum_report
= zio_vsd_default_cksum_report
179 vdev_mirror_load(mirror_map_t
*mm
, vdev_t
*vd
, uint64_t zio_offset
)
181 uint64_t last_offset
;
185 /* All DVAs have equal weight at the root. */
190 * We don't return INT_MAX if the device is resilvering i.e.
191 * vdev_resilver_txg != 0 as when tested performance was slightly
192 * worse overall when resilvering with compared to without.
195 /* Fix zio_offset for leaf vdevs */
196 if (vd
->vdev_ops
->vdev_op_leaf
)
197 zio_offset
+= VDEV_LABEL_START_SIZE
;
199 /* Standard load based on pending queue length. */
200 load
= vdev_queue_length(vd
);
201 last_offset
= vdev_queue_last_offset(vd
);
203 if (vd
->vdev_nonrot
) {
204 /* Non-rotating media. */
205 if (last_offset
== zio_offset
) {
206 MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear
);
207 return (load
+ zfs_vdev_mirror_non_rotating_inc
);
211 * Apply a seek penalty even for non-rotating devices as
212 * sequential I/O's can be aggregated into fewer operations on
213 * the device, thus avoiding unnecessary per-command overhead
214 * and boosting performance.
216 MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek
);
217 return (load
+ zfs_vdev_mirror_non_rotating_seek_inc
);
220 /* Rotating media I/O's which directly follow the last I/O. */
221 if (last_offset
== zio_offset
) {
222 MIRROR_BUMP(vdev_mirror_stat_rotating_linear
);
223 return (load
+ zfs_vdev_mirror_rotating_inc
);
227 * Apply half the seek increment to I/O's within seek offset
228 * of the last I/O issued to this vdev as they should incur less
229 * of a seek increment.
231 offset_diff
= (int64_t)(last_offset
- zio_offset
);
232 if (ABS(offset_diff
) < zfs_vdev_mirror_rotating_seek_offset
) {
233 MIRROR_BUMP(vdev_mirror_stat_rotating_offset
);
234 return (load
+ (zfs_vdev_mirror_rotating_seek_inc
/ 2));
237 /* Apply the full seek increment to all other I/O's. */
238 MIRROR_BUMP(vdev_mirror_stat_rotating_seek
);
239 return (load
+ zfs_vdev_mirror_rotating_seek_inc
);
243 * Avoid inlining the function to keep vdev_mirror_io_start(), which
244 * is this functions only caller, as small as possible on the stack.
246 noinline
static mirror_map_t
*
247 vdev_mirror_map_init(zio_t
*zio
)
249 mirror_map_t
*mm
= NULL
;
251 vdev_t
*vd
= zio
->io_vd
;
255 dva_t
*dva
= zio
->io_bp
->blk_dva
;
256 spa_t
*spa
= zio
->io_spa
;
257 dsl_scan_t
*scn
= spa
->spa_dsl_pool
->dp_scan
;
258 dva_t dva_copy
[SPA_DVAS_PER_BP
];
261 * The sequential scrub code sorts and issues all DVAs
262 * of a bp separately. Each of these IOs includes all
263 * original DVA copies so that repairs can be performed
264 * in the event of an error, but we only actually want
265 * to check the first DVA since the others will be
266 * checked by their respective sorted IOs. Only if we
267 * hit an error will we try all DVAs upon retrying.
269 * Note: This check is safe even if the user switches
270 * from a legacy scrub to a sequential one in the middle
271 * of processing, since scn_is_sorted isn't updated until
272 * all outstanding IOs from the previous scrub pass
275 if ((zio
->io_flags
& ZIO_FLAG_SCRUB
) &&
276 !(zio
->io_flags
& ZIO_FLAG_IO_RETRY
) &&
277 dsl_scan_scrubbing(spa
->spa_dsl_pool
) &&
278 scn
->scn_is_sorted
) {
281 c
= BP_GET_NDVAS(zio
->io_bp
);
285 * If we do not trust the pool config, some DVAs might be
286 * invalid or point to vdevs that do not exist. We skip them.
288 if (!spa_trust_config(spa
)) {
289 ASSERT3U(zio
->io_type
, ==, ZIO_TYPE_READ
);
291 for (int i
= 0; i
< c
; i
++) {
292 if (zfs_dva_valid(spa
, &dva
[i
], zio
->io_bp
))
293 dva_copy
[j
++] = dva
[i
];
297 zio
->io_error
= ENXIO
;
306 mm
= vdev_mirror_map_alloc(c
, B_FALSE
, B_TRUE
);
307 for (c
= 0; c
< mm
->mm_children
; c
++) {
308 mc
= &mm
->mm_child
[c
];
310 mc
->mc_vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[c
]));
311 mc
->mc_offset
= DVA_GET_OFFSET(&dva
[c
]);
315 * If we are resilvering, then we should handle scrub reads
316 * differently; we shouldn't issue them to the resilvering
317 * device because it might not have those blocks.
319 * We are resilvering iff:
320 * 1) We are a replacing vdev (ie our name is "replacing-1" or
321 * "spare-1" or something like that), and
322 * 2) The pool is currently being resilvered.
324 * We cannot simply check vd->vdev_resilver_txg, because it's
325 * not set in this path.
327 * Nor can we just check our vdev_ops; there are cases (such as
328 * when a user types "zpool replace pool odev spare_dev" and
329 * spare_dev is in the spare list, or when a spare device is
330 * automatically used to replace a DEGRADED device) when
331 * resilvering is complete but both the original vdev and the
332 * spare vdev remain in the pool. That behavior is intentional.
333 * It helps implement the policy that a spare should be
334 * automatically removed from the pool after the user replaces
335 * the device that originally failed.
337 * If a spa load is in progress, then spa_dsl_pool may be
338 * uninitialized. But we shouldn't be resilvering during a spa
341 boolean_t replacing
= (vd
->vdev_ops
== &vdev_replacing_ops
||
342 vd
->vdev_ops
== &vdev_spare_ops
) &&
343 spa_load_state(vd
->vdev_spa
) == SPA_LOAD_NONE
&&
344 dsl_scan_resilvering(vd
->vdev_spa
->spa_dsl_pool
);
345 mm
= vdev_mirror_map_alloc(vd
->vdev_children
, replacing
,
347 for (c
= 0; c
< mm
->mm_children
; c
++) {
348 mc
= &mm
->mm_child
[c
];
349 mc
->mc_vd
= vd
->vdev_child
[c
];
350 mc
->mc_offset
= zio
->io_offset
;
355 zio
->io_vsd_ops
= &vdev_mirror_vsd_ops
;
360 vdev_mirror_open(vdev_t
*vd
, uint64_t *asize
, uint64_t *max_asize
,
366 if (vd
->vdev_children
== 0) {
367 vd
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
368 return (SET_ERROR(EINVAL
));
371 vdev_open_children(vd
);
373 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
374 vdev_t
*cvd
= vd
->vdev_child
[c
];
376 if (cvd
->vdev_open_error
) {
377 lasterror
= cvd
->vdev_open_error
;
382 *asize
= MIN(*asize
- 1, cvd
->vdev_asize
- 1) + 1;
383 *max_asize
= MIN(*max_asize
- 1, cvd
->vdev_max_asize
- 1) + 1;
384 *ashift
= MAX(*ashift
, cvd
->vdev_ashift
);
387 if (numerrors
== vd
->vdev_children
) {
388 if (vdev_children_are_offline(vd
))
389 vd
->vdev_stat
.vs_aux
= VDEV_AUX_CHILDREN_OFFLINE
;
391 vd
->vdev_stat
.vs_aux
= VDEV_AUX_NO_REPLICAS
;
399 vdev_mirror_close(vdev_t
*vd
)
401 for (int c
= 0; c
< vd
->vdev_children
; c
++)
402 vdev_close(vd
->vdev_child
[c
]);
406 vdev_mirror_child_done(zio_t
*zio
)
408 mirror_child_t
*mc
= zio
->io_private
;
410 mc
->mc_error
= zio
->io_error
;
416 vdev_mirror_scrub_done(zio_t
*zio
)
418 mirror_child_t
*mc
= zio
->io_private
;
420 if (zio
->io_error
== 0) {
422 zio_link_t
*zl
= NULL
;
424 mutex_enter(&zio
->io_lock
);
425 while ((pio
= zio_walk_parents(zio
, &zl
)) != NULL
) {
426 mutex_enter(&pio
->io_lock
);
427 ASSERT3U(zio
->io_size
, >=, pio
->io_size
);
428 abd_copy(pio
->io_abd
, zio
->io_abd
, pio
->io_size
);
429 mutex_exit(&pio
->io_lock
);
431 mutex_exit(&zio
->io_lock
);
434 abd_free(zio
->io_abd
);
436 mc
->mc_error
= zio
->io_error
;
442 * Check the other, lower-index DVAs to see if they're on the same
443 * vdev as the child we picked. If they are, use them since they
444 * are likely to have been allocated from the primary metaslab in
445 * use at the time, and hence are more likely to have locality with
449 vdev_mirror_dva_select(zio_t
*zio
, int p
)
451 dva_t
*dva
= zio
->io_bp
->blk_dva
;
452 mirror_map_t
*mm
= zio
->io_vsd
;
456 preferred
= mm
->mm_preferred
[p
];
457 for (p
--; p
>= 0; p
--) {
458 c
= mm
->mm_preferred
[p
];
459 if (DVA_GET_VDEV(&dva
[c
]) == DVA_GET_VDEV(&dva
[preferred
]))
466 vdev_mirror_preferred_child_randomize(zio_t
*zio
)
468 mirror_map_t
*mm
= zio
->io_vsd
;
472 p
= spa_get_random(mm
->mm_preferred_cnt
);
473 return (vdev_mirror_dva_select(zio
, p
));
477 * To ensure we don't always favour the first matching vdev,
478 * which could lead to wear leveling issues on SSD's, we
479 * use the I/O offset as a pseudo random seed into the vdevs
480 * which have the lowest load.
482 p
= (zio
->io_offset
>> vdev_mirror_shift
) % mm
->mm_preferred_cnt
;
483 return (mm
->mm_preferred
[p
]);
487 * Try to find a vdev whose DTL doesn't contain the block we want to read
488 * prefering vdevs based on determined load.
490 * Try to find a child whose DTL doesn't contain the block we want to read.
491 * If we can't, try the read on any vdev we haven't already tried.
494 vdev_mirror_child_select(zio_t
*zio
)
496 mirror_map_t
*mm
= zio
->io_vsd
;
497 uint64_t txg
= zio
->io_txg
;
500 ASSERT(zio
->io_bp
== NULL
|| BP_PHYSICAL_BIRTH(zio
->io_bp
) == txg
);
502 lowest_load
= INT_MAX
;
503 mm
->mm_preferred_cnt
= 0;
504 for (c
= 0; c
< mm
->mm_children
; c
++) {
507 mc
= &mm
->mm_child
[c
];
508 if (mc
->mc_tried
|| mc
->mc_skipped
)
511 if (mc
->mc_vd
== NULL
|| !vdev_readable(mc
->mc_vd
)) {
512 mc
->mc_error
= SET_ERROR(ENXIO
);
513 mc
->mc_tried
= 1; /* don't even try */
518 if (vdev_dtl_contains(mc
->mc_vd
, DTL_MISSING
, txg
, 1)) {
519 mc
->mc_error
= SET_ERROR(ESTALE
);
521 mc
->mc_speculative
= 1;
525 mc
->mc_load
= vdev_mirror_load(mm
, mc
->mc_vd
, mc
->mc_offset
);
526 if (mc
->mc_load
> lowest_load
)
529 if (mc
->mc_load
< lowest_load
) {
530 lowest_load
= mc
->mc_load
;
531 mm
->mm_preferred_cnt
= 0;
533 mm
->mm_preferred
[mm
->mm_preferred_cnt
] = c
;
534 mm
->mm_preferred_cnt
++;
537 if (mm
->mm_preferred_cnt
== 1) {
538 MIRROR_BUMP(vdev_mirror_stat_preferred_found
);
539 return (mm
->mm_preferred
[0]);
542 if (mm
->mm_preferred_cnt
> 1) {
543 MIRROR_BUMP(vdev_mirror_stat_preferred_not_found
);
544 return (vdev_mirror_preferred_child_randomize(zio
));
548 * Every device is either missing or has this txg in its DTL.
549 * Look for any child we haven't already tried before giving up.
551 for (c
= 0; c
< mm
->mm_children
; c
++) {
552 if (!mm
->mm_child
[c
].mc_tried
)
557 * Every child failed. There's no place left to look.
563 vdev_mirror_io_start(zio_t
*zio
)
569 mm
= vdev_mirror_map_init(zio
);
572 ASSERT(!spa_trust_config(zio
->io_spa
));
573 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
578 if (zio
->io_type
== ZIO_TYPE_READ
) {
579 if (zio
->io_bp
!= NULL
&&
580 (zio
->io_flags
& ZIO_FLAG_SCRUB
) && !mm
->mm_resilvering
) {
582 * For scrubbing reads (if we can verify the
583 * checksum here, as indicated by io_bp being
584 * non-NULL) we need to allocate a read buffer for
585 * each child and issue reads to all children. If
586 * any child succeeds, it will copy its data into
587 * zio->io_data in vdev_mirror_scrub_done.
589 for (c
= 0; c
< mm
->mm_children
; c
++) {
590 mc
= &mm
->mm_child
[c
];
591 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
592 mc
->mc_vd
, mc
->mc_offset
,
593 abd_alloc_sametype(zio
->io_abd
,
594 zio
->io_size
), zio
->io_size
,
595 zio
->io_type
, zio
->io_priority
, 0,
596 vdev_mirror_scrub_done
, mc
));
602 * For normal reads just pick one child.
604 c
= vdev_mirror_child_select(zio
);
607 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
610 * Writes go to all children.
613 children
= mm
->mm_children
;
617 mc
= &mm
->mm_child
[c
];
618 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
619 mc
->mc_vd
, mc
->mc_offset
, zio
->io_abd
, zio
->io_size
,
620 zio
->io_type
, zio
->io_priority
, 0,
621 vdev_mirror_child_done
, mc
));
629 vdev_mirror_worst_error(mirror_map_t
*mm
)
631 int error
[2] = { 0, 0 };
633 for (int c
= 0; c
< mm
->mm_children
; c
++) {
634 mirror_child_t
*mc
= &mm
->mm_child
[c
];
635 int s
= mc
->mc_speculative
;
636 error
[s
] = zio_worst_error(error
[s
], mc
->mc_error
);
639 return (error
[0] ? error
[0] : error
[1]);
643 vdev_mirror_io_done(zio_t
*zio
)
645 mirror_map_t
*mm
= zio
->io_vsd
;
649 int unexpected_errors
= 0;
654 for (c
= 0; c
< mm
->mm_children
; c
++) {
655 mc
= &mm
->mm_child
[c
];
660 } else if (mc
->mc_tried
) {
665 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
667 * XXX -- for now, treat partial writes as success.
669 * Now that we support write reallocation, it would be better
670 * to treat partial failure as real failure unless there are
671 * no non-degraded top-level vdevs left, and not update DTLs
672 * if we intend to reallocate.
675 if (good_copies
!= mm
->mm_children
) {
677 * Always require at least one good copy.
679 * For ditto blocks (io_vd == NULL), require
680 * all copies to be good.
682 * XXX -- for replacing vdevs, there's no great answer.
683 * If the old device is really dead, we may not even
684 * be able to access it -- so we only want to
685 * require good writes to the new device. But if
686 * the new device turns out to be flaky, we want
687 * to be able to detach it -- which requires all
688 * writes to the old device to have succeeded.
690 if (good_copies
== 0 || zio
->io_vd
== NULL
)
691 zio
->io_error
= vdev_mirror_worst_error(mm
);
696 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
699 * If we don't have a good copy yet, keep trying other children.
702 if (good_copies
== 0 && (c
= vdev_mirror_child_select(zio
)) != -1) {
703 ASSERT(c
>= 0 && c
< mm
->mm_children
);
704 mc
= &mm
->mm_child
[c
];
705 zio_vdev_io_redone(zio
);
706 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
707 mc
->mc_vd
, mc
->mc_offset
, zio
->io_abd
, zio
->io_size
,
708 ZIO_TYPE_READ
, zio
->io_priority
, 0,
709 vdev_mirror_child_done
, mc
));
714 if (good_copies
== 0) {
715 zio
->io_error
= vdev_mirror_worst_error(mm
);
716 ASSERT(zio
->io_error
!= 0);
719 if (good_copies
&& spa_writeable(zio
->io_spa
) &&
720 (unexpected_errors
||
721 (zio
->io_flags
& ZIO_FLAG_RESILVER
) ||
722 ((zio
->io_flags
& ZIO_FLAG_SCRUB
) && mm
->mm_resilvering
))) {
724 * Use the good data we have in hand to repair damaged children.
726 for (c
= 0; c
< mm
->mm_children
; c
++) {
728 * Don't rewrite known good children.
729 * Not only is it unnecessary, it could
730 * actually be harmful: if the system lost
731 * power while rewriting the only good copy,
732 * there would be no good copies left!
734 mc
= &mm
->mm_child
[c
];
736 if (mc
->mc_error
== 0) {
740 * We didn't try this child. We need to
742 * 1. it's a scrub (in which case we have
743 * tried everything that was healthy)
745 * 2. it's an indirect vdev (in which case
746 * it could point to any other vdev, which
747 * might have a bad DTL)
749 * 3. the DTL indicates that this data is
750 * missing from this vdev
752 if (!(zio
->io_flags
& ZIO_FLAG_SCRUB
) &&
753 mc
->mc_vd
->vdev_ops
!= &vdev_indirect_ops
&&
754 !vdev_dtl_contains(mc
->mc_vd
, DTL_PARTIAL
,
757 mc
->mc_error
= SET_ERROR(ESTALE
);
760 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
761 mc
->mc_vd
, mc
->mc_offset
,
762 zio
->io_abd
, zio
->io_size
,
763 ZIO_TYPE_WRITE
, ZIO_PRIORITY_ASYNC_WRITE
,
764 ZIO_FLAG_IO_REPAIR
| (unexpected_errors
?
765 ZIO_FLAG_SELF_HEAL
: 0), NULL
, NULL
));
771 vdev_mirror_state_change(vdev_t
*vd
, int faulted
, int degraded
)
773 if (faulted
== vd
->vdev_children
) {
774 if (vdev_children_are_offline(vd
)) {
775 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_OFFLINE
,
776 VDEV_AUX_CHILDREN_OFFLINE
);
778 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_CANT_OPEN
,
779 VDEV_AUX_NO_REPLICAS
);
781 } else if (degraded
+ faulted
!= 0) {
782 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_DEGRADED
, VDEV_AUX_NONE
);
784 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_HEALTHY
, VDEV_AUX_NONE
);
788 vdev_ops_t vdev_mirror_ops
= {
792 vdev_mirror_io_start
,
794 vdev_mirror_state_change
,
800 VDEV_TYPE_MIRROR
, /* name of this vdev type */
801 B_FALSE
/* not a leaf vdev */
804 vdev_ops_t vdev_replacing_ops
= {
808 vdev_mirror_io_start
,
810 vdev_mirror_state_change
,
816 VDEV_TYPE_REPLACING
, /* name of this vdev type */
817 B_FALSE
/* not a leaf vdev */
820 vdev_ops_t vdev_spare_ops
= {
824 vdev_mirror_io_start
,
826 vdev_mirror_state_change
,
832 VDEV_TYPE_SPARE
, /* name of this vdev type */
833 B_FALSE
/* not a leaf vdev */
838 module_param(zfs_vdev_mirror_rotating_inc
, int, 0644);
839 MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc
,
840 "Rotating media load increment for non-seeking I/O's");
842 module_param(zfs_vdev_mirror_rotating_seek_inc
, int, 0644);
843 MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_inc
,
844 "Rotating media load increment for seeking I/O's");
846 module_param(zfs_vdev_mirror_rotating_seek_offset
, int, 0644);
848 MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset
,
849 "Offset in bytes from the last I/O which "
850 "triggers a reduced rotating media seek increment");
852 module_param(zfs_vdev_mirror_non_rotating_inc
, int, 0644);
853 MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_inc
,
854 "Non-rotating media load increment for non-seeking I/O's");
856 module_param(zfs_vdev_mirror_non_rotating_seek_inc
, int, 0644);
857 MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc
,
858 "Non-rotating media load increment for seeking I/O's");