4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/vdev_impl.h>
35 #include <sys/fs/zfs.h>
38 * Virtual device vector for mirroring.
41 typedef struct mirror_child
{
48 uint8_t mc_speculative
;
51 typedef struct mirror_map
{
55 boolean_t mm_replacing
;
57 mirror_child_t mm_child
[];
60 static int vdev_mirror_shift
= 21;
63 * The load configuration settings below are tuned by default for
64 * the case where all devices are of the same rotational type.
66 * If there is a mixture of rotating and non-rotating media, setting
67 * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
68 * as it will direct more reads to the non-rotating vdevs which are more likely
69 * to have a higher performance.
72 /* Rotating media load calculation configuration. */
73 static int zfs_vdev_mirror_rotating_inc
= 0;
74 static int zfs_vdev_mirror_rotating_seek_inc
= 5;
75 static int zfs_vdev_mirror_rotating_seek_offset
= 1 * 1024 * 1024;
77 /* Non-rotating media load calculation configuration. */
78 static int zfs_vdev_mirror_non_rotating_inc
= 0;
79 static int zfs_vdev_mirror_non_rotating_seek_inc
= 1;
82 vdev_mirror_map_size(int children
)
84 return (offsetof(mirror_map_t
, mm_child
[children
]) +
85 sizeof (int) * children
);
88 static inline mirror_map_t
*
89 vdev_mirror_map_alloc(int children
, boolean_t replacing
, boolean_t root
)
93 mm
= kmem_zalloc(vdev_mirror_map_size(children
), KM_SLEEP
);
94 mm
->mm_children
= children
;
95 mm
->mm_replacing
= replacing
;
97 mm
->mm_preferred
= (int *)((uintptr_t)mm
+
98 offsetof(mirror_map_t
, mm_child
[children
]));
104 vdev_mirror_map_free(zio_t
*zio
)
106 mirror_map_t
*mm
= zio
->io_vsd
;
108 kmem_free(mm
, vdev_mirror_map_size(mm
->mm_children
));
111 static const zio_vsd_ops_t vdev_mirror_vsd_ops
= {
112 .vsd_free
= vdev_mirror_map_free
,
113 .vsd_cksum_report
= zio_vsd_default_cksum_report
117 vdev_mirror_load(mirror_map_t
*mm
, vdev_t
*vd
, uint64_t zio_offset
)
119 uint64_t last_offset
;
123 /* All DVAs have equal weight at the root. */
128 * We don't return INT_MAX if the device is resilvering i.e.
129 * vdev_resilver_txg != 0 as when tested performance was slightly
130 * worse overall when resilvering with compared to without.
133 /* Fix zio_offset for leaf vdevs */
134 if (vd
->vdev_ops
->vdev_op_leaf
)
135 zio_offset
+= VDEV_LABEL_START_SIZE
;
137 /* Standard load based on pending queue length. */
138 load
= vdev_queue_length(vd
);
139 last_offset
= vdev_queue_last_offset(vd
);
141 if (vd
->vdev_nonrot
) {
142 /* Non-rotating media. */
143 if (last_offset
== zio_offset
)
144 return (load
+ zfs_vdev_mirror_non_rotating_inc
);
147 * Apply a seek penalty even for non-rotating devices as
148 * sequential I/O's can be aggregated into fewer operations on
149 * the device, thus avoiding unnecessary per-command overhead
150 * and boosting performance.
152 return (load
+ zfs_vdev_mirror_non_rotating_seek_inc
);
155 /* Rotating media I/O's which directly follow the last I/O. */
156 if (last_offset
== zio_offset
)
157 return (load
+ zfs_vdev_mirror_rotating_inc
);
160 * Apply half the seek increment to I/O's within seek offset
161 * of the last I/O issued to this vdev as they should incur less
162 * of a seek increment.
164 offset_diff
= (int64_t)(last_offset
- zio_offset
);
165 if (ABS(offset_diff
) < zfs_vdev_mirror_rotating_seek_offset
)
166 return (load
+ (zfs_vdev_mirror_rotating_seek_inc
/ 2));
168 /* Apply the full seek increment to all other I/O's. */
169 return (load
+ zfs_vdev_mirror_rotating_seek_inc
);
173 * Avoid inlining the function to keep vdev_mirror_io_start(), which
174 * is this functions only caller, as small as possible on the stack.
176 noinline
static mirror_map_t
*
177 vdev_mirror_map_init(zio_t
*zio
)
179 mirror_map_t
*mm
= NULL
;
181 vdev_t
*vd
= zio
->io_vd
;
185 dva_t
*dva
= zio
->io_bp
->blk_dva
;
186 spa_t
*spa
= zio
->io_spa
;
188 mm
= vdev_mirror_map_alloc(BP_GET_NDVAS(zio
->io_bp
), B_FALSE
,
190 for (c
= 0; c
< mm
->mm_children
; c
++) {
191 mc
= &mm
->mm_child
[c
];
193 mc
->mc_vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[c
]));
194 mc
->mc_offset
= DVA_GET_OFFSET(&dva
[c
]);
197 mm
= vdev_mirror_map_alloc(vd
->vdev_children
,
198 (vd
->vdev_ops
== &vdev_replacing_ops
||
199 vd
->vdev_ops
== &vdev_spare_ops
), B_FALSE
);
200 for (c
= 0; c
< mm
->mm_children
; c
++) {
201 mc
= &mm
->mm_child
[c
];
202 mc
->mc_vd
= vd
->vdev_child
[c
];
203 mc
->mc_offset
= zio
->io_offset
;
208 zio
->io_vsd_ops
= &vdev_mirror_vsd_ops
;
213 vdev_mirror_open(vdev_t
*vd
, uint64_t *asize
, uint64_t *max_asize
,
220 if (vd
->vdev_children
== 0) {
221 vd
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
222 return (SET_ERROR(EINVAL
));
225 vdev_open_children(vd
);
227 for (c
= 0; c
< vd
->vdev_children
; c
++) {
228 vdev_t
*cvd
= vd
->vdev_child
[c
];
230 if (cvd
->vdev_open_error
) {
231 lasterror
= cvd
->vdev_open_error
;
236 *asize
= MIN(*asize
- 1, cvd
->vdev_asize
- 1) + 1;
237 *max_asize
= MIN(*max_asize
- 1, cvd
->vdev_max_asize
- 1) + 1;
238 *ashift
= MAX(*ashift
, cvd
->vdev_ashift
);
241 if (numerrors
== vd
->vdev_children
) {
242 vd
->vdev_stat
.vs_aux
= VDEV_AUX_NO_REPLICAS
;
250 vdev_mirror_close(vdev_t
*vd
)
254 for (c
= 0; c
< vd
->vdev_children
; c
++)
255 vdev_close(vd
->vdev_child
[c
]);
259 vdev_mirror_child_done(zio_t
*zio
)
261 mirror_child_t
*mc
= zio
->io_private
;
263 mc
->mc_error
= zio
->io_error
;
269 vdev_mirror_scrub_done(zio_t
*zio
)
271 mirror_child_t
*mc
= zio
->io_private
;
273 if (zio
->io_error
== 0) {
275 zio_link_t
*zl
= NULL
;
277 mutex_enter(&zio
->io_lock
);
278 while ((pio
= zio_walk_parents(zio
, &zl
)) != NULL
) {
279 mutex_enter(&pio
->io_lock
);
280 ASSERT3U(zio
->io_size
, >=, pio
->io_size
);
281 abd_copy(pio
->io_abd
, zio
->io_abd
, pio
->io_size
);
282 mutex_exit(&pio
->io_lock
);
284 mutex_exit(&zio
->io_lock
);
287 abd_free(zio
->io_abd
);
289 mc
->mc_error
= zio
->io_error
;
295 * Check the other, lower-index DVAs to see if they're on the same
296 * vdev as the child we picked. If they are, use them since they
297 * are likely to have been allocated from the primary metaslab in
298 * use at the time, and hence are more likely to have locality with
302 vdev_mirror_dva_select(zio_t
*zio
, int p
)
304 dva_t
*dva
= zio
->io_bp
->blk_dva
;
305 mirror_map_t
*mm
= zio
->io_vsd
;
309 preferred
= mm
->mm_preferred
[p
];
310 for (p
--; p
>= 0; p
--) {
311 c
= mm
->mm_preferred
[p
];
312 if (DVA_GET_VDEV(&dva
[c
]) == DVA_GET_VDEV(&dva
[preferred
]))
319 vdev_mirror_preferred_child_randomize(zio_t
*zio
)
321 mirror_map_t
*mm
= zio
->io_vsd
;
325 p
= spa_get_random(mm
->mm_preferred_cnt
);
326 return (vdev_mirror_dva_select(zio
, p
));
330 * To ensure we don't always favour the first matching vdev,
331 * which could lead to wear leveling issues on SSD's, we
332 * use the I/O offset as a pseudo random seed into the vdevs
333 * which have the lowest load.
335 p
= (zio
->io_offset
>> vdev_mirror_shift
) % mm
->mm_preferred_cnt
;
336 return (mm
->mm_preferred
[p
]);
340 * Try to find a vdev whose DTL doesn't contain the block we want to read
341 * prefering vdevs based on determined load.
343 * Try to find a child whose DTL doesn't contain the block we want to read.
344 * If we can't, try the read on any vdev we haven't already tried.
347 vdev_mirror_child_select(zio_t
*zio
)
349 mirror_map_t
*mm
= zio
->io_vsd
;
350 uint64_t txg
= zio
->io_txg
;
353 ASSERT(zio
->io_bp
== NULL
|| BP_PHYSICAL_BIRTH(zio
->io_bp
) == txg
);
355 lowest_load
= INT_MAX
;
356 mm
->mm_preferred_cnt
= 0;
357 for (c
= 0; c
< mm
->mm_children
; c
++) {
360 mc
= &mm
->mm_child
[c
];
361 if (mc
->mc_tried
|| mc
->mc_skipped
)
364 if (mc
->mc_vd
== NULL
|| !vdev_readable(mc
->mc_vd
)) {
365 mc
->mc_error
= SET_ERROR(ENXIO
);
366 mc
->mc_tried
= 1; /* don't even try */
371 if (vdev_dtl_contains(mc
->mc_vd
, DTL_MISSING
, txg
, 1)) {
372 mc
->mc_error
= SET_ERROR(ESTALE
);
374 mc
->mc_speculative
= 1;
378 mc
->mc_load
= vdev_mirror_load(mm
, mc
->mc_vd
, mc
->mc_offset
);
379 if (mc
->mc_load
> lowest_load
)
382 if (mc
->mc_load
< lowest_load
) {
383 lowest_load
= mc
->mc_load
;
384 mm
->mm_preferred_cnt
= 0;
386 mm
->mm_preferred
[mm
->mm_preferred_cnt
] = c
;
387 mm
->mm_preferred_cnt
++;
390 if (mm
->mm_preferred_cnt
== 1)
391 return (mm
->mm_preferred
[0]);
394 if (mm
->mm_preferred_cnt
> 1)
395 return (vdev_mirror_preferred_child_randomize(zio
));
398 * Every device is either missing or has this txg in its DTL.
399 * Look for any child we haven't already tried before giving up.
401 for (c
= 0; c
< mm
->mm_children
; c
++) {
402 if (!mm
->mm_child
[c
].mc_tried
)
407 * Every child failed. There's no place left to look.
413 vdev_mirror_io_start(zio_t
*zio
)
419 mm
= vdev_mirror_map_init(zio
);
421 if (zio
->io_type
== ZIO_TYPE_READ
) {
422 if ((zio
->io_flags
& ZIO_FLAG_SCRUB
) && !mm
->mm_replacing
) {
424 * For scrubbing reads we need to allocate a read
425 * buffer for each child and issue reads to all
426 * children. If any child succeeds, it will copy its
427 * data into zio->io_data in vdev_mirror_scrub_done.
429 for (c
= 0; c
< mm
->mm_children
; c
++) {
430 mc
= &mm
->mm_child
[c
];
431 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
432 mc
->mc_vd
, mc
->mc_offset
,
433 abd_alloc_sametype(zio
->io_abd
,
434 zio
->io_size
), zio
->io_size
,
435 zio
->io_type
, zio
->io_priority
, 0,
436 vdev_mirror_scrub_done
, mc
));
442 * For normal reads just pick one child.
444 c
= vdev_mirror_child_select(zio
);
447 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
450 * Writes go to all children.
453 children
= mm
->mm_children
;
457 mc
= &mm
->mm_child
[c
];
458 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
459 mc
->mc_vd
, mc
->mc_offset
, zio
->io_abd
, zio
->io_size
,
460 zio
->io_type
, zio
->io_priority
, 0,
461 vdev_mirror_child_done
, mc
));
469 vdev_mirror_worst_error(mirror_map_t
*mm
)
471 int c
, error
[2] = { 0, 0 };
473 for (c
= 0; c
< mm
->mm_children
; c
++) {
474 mirror_child_t
*mc
= &mm
->mm_child
[c
];
475 int s
= mc
->mc_speculative
;
476 error
[s
] = zio_worst_error(error
[s
], mc
->mc_error
);
479 return (error
[0] ? error
[0] : error
[1]);
483 vdev_mirror_io_done(zio_t
*zio
)
485 mirror_map_t
*mm
= zio
->io_vsd
;
489 int unexpected_errors
= 0;
491 for (c
= 0; c
< mm
->mm_children
; c
++) {
492 mc
= &mm
->mm_child
[c
];
497 } else if (mc
->mc_tried
) {
502 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
504 * XXX -- for now, treat partial writes as success.
506 * Now that we support write reallocation, it would be better
507 * to treat partial failure as real failure unless there are
508 * no non-degraded top-level vdevs left, and not update DTLs
509 * if we intend to reallocate.
512 if (good_copies
!= mm
->mm_children
) {
514 * Always require at least one good copy.
516 * For ditto blocks (io_vd == NULL), require
517 * all copies to be good.
519 * XXX -- for replacing vdevs, there's no great answer.
520 * If the old device is really dead, we may not even
521 * be able to access it -- so we only want to
522 * require good writes to the new device. But if
523 * the new device turns out to be flaky, we want
524 * to be able to detach it -- which requires all
525 * writes to the old device to have succeeded.
527 if (good_copies
== 0 || zio
->io_vd
== NULL
)
528 zio
->io_error
= vdev_mirror_worst_error(mm
);
533 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
536 * If we don't have a good copy yet, keep trying other children.
539 if (good_copies
== 0 && (c
= vdev_mirror_child_select(zio
)) != -1) {
540 ASSERT(c
>= 0 && c
< mm
->mm_children
);
541 mc
= &mm
->mm_child
[c
];
542 zio_vdev_io_redone(zio
);
543 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
544 mc
->mc_vd
, mc
->mc_offset
, zio
->io_abd
, zio
->io_size
,
545 ZIO_TYPE_READ
, zio
->io_priority
, 0,
546 vdev_mirror_child_done
, mc
));
551 if (good_copies
== 0) {
552 zio
->io_error
= vdev_mirror_worst_error(mm
);
553 ASSERT(zio
->io_error
!= 0);
556 if (good_copies
&& spa_writeable(zio
->io_spa
) &&
557 (unexpected_errors
||
558 (zio
->io_flags
& ZIO_FLAG_RESILVER
) ||
559 ((zio
->io_flags
& ZIO_FLAG_SCRUB
) && mm
->mm_replacing
))) {
561 * Use the good data we have in hand to repair damaged children.
563 for (c
= 0; c
< mm
->mm_children
; c
++) {
565 * Don't rewrite known good children.
566 * Not only is it unnecessary, it could
567 * actually be harmful: if the system lost
568 * power while rewriting the only good copy,
569 * there would be no good copies left!
571 mc
= &mm
->mm_child
[c
];
573 if (mc
->mc_error
== 0) {
576 if (!(zio
->io_flags
& ZIO_FLAG_SCRUB
) &&
577 !vdev_dtl_contains(mc
->mc_vd
, DTL_PARTIAL
,
580 mc
->mc_error
= SET_ERROR(ESTALE
);
583 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
584 mc
->mc_vd
, mc
->mc_offset
,
585 zio
->io_abd
, zio
->io_size
,
586 ZIO_TYPE_WRITE
, ZIO_PRIORITY_ASYNC_WRITE
,
587 ZIO_FLAG_IO_REPAIR
| (unexpected_errors
?
588 ZIO_FLAG_SELF_HEAL
: 0), NULL
, NULL
));
594 vdev_mirror_state_change(vdev_t
*vd
, int faulted
, int degraded
)
596 if (faulted
== vd
->vdev_children
)
597 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_CANT_OPEN
,
598 VDEV_AUX_NO_REPLICAS
);
599 else if (degraded
+ faulted
!= 0)
600 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_DEGRADED
, VDEV_AUX_NONE
);
602 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_HEALTHY
, VDEV_AUX_NONE
);
605 vdev_ops_t vdev_mirror_ops
= {
609 vdev_mirror_io_start
,
611 vdev_mirror_state_change
,
615 VDEV_TYPE_MIRROR
, /* name of this vdev type */
616 B_FALSE
/* not a leaf vdev */
619 vdev_ops_t vdev_replacing_ops
= {
623 vdev_mirror_io_start
,
625 vdev_mirror_state_change
,
629 VDEV_TYPE_REPLACING
, /* name of this vdev type */
630 B_FALSE
/* not a leaf vdev */
633 vdev_ops_t vdev_spare_ops
= {
637 vdev_mirror_io_start
,
639 vdev_mirror_state_change
,
643 VDEV_TYPE_SPARE
, /* name of this vdev type */
644 B_FALSE
/* not a leaf vdev */
647 #if defined(_KERNEL) && defined(HAVE_SPL)
649 module_param(zfs_vdev_mirror_rotating_inc
, int, 0644);
650 MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc
,
651 "Rotating media load increment for non-seeking I/O's");
653 module_param(zfs_vdev_mirror_rotating_seek_inc
, int, 0644);
654 MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_inc
,
655 "Rotating media load increment for seeking I/O's");
657 module_param(zfs_vdev_mirror_rotating_seek_offset
, int, 0644);
659 MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset
,
660 "Offset in bytes from the last I/O which "
661 "triggers a reduced rotating media seek increment");
663 module_param(zfs_vdev_mirror_non_rotating_inc
, int, 0644);
664 MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_inc
,
665 "Non-rotating media load increment for non-seeking I/O's");
667 module_param(zfs_vdev_mirror_non_rotating_seek_inc
, int, 0644);
668 MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc
,
669 "Non-rotating media load increment for seeking I/O's");