]> git.proxmox.com Git - mirror_zfs-debian.git/blame - module/zfs/vdev_mirror.c
New upstream version 0.7.6
[mirror_zfs-debian.git] / module / zfs / vdev_mirror.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
25
1bd201e7 26/*
cae5b340 27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
1bd201e7
CS
28 */
29
34dc7c2f
BB
30#include <sys/zfs_context.h>
31#include <sys/spa.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
cae5b340 34#include <sys/abd.h>
34dc7c2f
BB
35#include <sys/fs/zfs.h>
36
37/*
38 * Virtual device vector for mirroring.
39 */
40
41typedef struct mirror_child {
42 vdev_t *mc_vd;
43 uint64_t mc_offset;
44 int mc_error;
cae5b340 45 int mc_load;
b128c09f
BB
46 uint8_t mc_tried;
47 uint8_t mc_skipped;
48 uint8_t mc_speculative;
34dc7c2f
BB
49} mirror_child_t;
50
51typedef struct mirror_map {
cae5b340
AX
52 int *mm_preferred;
53 int mm_preferred_cnt;
34dc7c2f 54 int mm_children;
cae5b340
AX
55 boolean_t mm_replacing;
56 boolean_t mm_root;
57 mirror_child_t mm_child[];
34dc7c2f
BB
58} mirror_map_t;
59
cae5b340
AX
60static int vdev_mirror_shift = 21;
61
c06d4368 62/*
cae5b340
AX
63 * The load configuration settings below are tuned by default for
64 * the case where all devices are of the same rotational type.
c06d4368 65 *
cae5b340
AX
66 * If there is a mixture of rotating and non-rotating media, setting
67 * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
68 * as it will direct more reads to the non-rotating vdevs which are more likely
69 * to have a higher performance.
c06d4368 70 */
cae5b340
AX
71
72/* Rotating media load calculation configuration. */
73static int zfs_vdev_mirror_rotating_inc = 0;
74static int zfs_vdev_mirror_rotating_seek_inc = 5;
75static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
76
77/* Non-rotating media load calculation configuration. */
78static int zfs_vdev_mirror_non_rotating_inc = 0;
79static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
80
81static inline size_t
82vdev_mirror_map_size(int children)
83{
84 return (offsetof(mirror_map_t, mm_child[children]) +
85 sizeof (int) * children);
86}
87
88static inline mirror_map_t *
89vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
90{
91 mirror_map_t *mm;
92
93 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
94 mm->mm_children = children;
95 mm->mm_replacing = replacing;
96 mm->mm_root = root;
97 mm->mm_preferred = (int *)((uintptr_t)mm +
98 offsetof(mirror_map_t, mm_child[children]));
99
100 return (mm);
101}
34dc7c2f 102
b128c09f
BB
103static void
104vdev_mirror_map_free(zio_t *zio)
105{
106 mirror_map_t *mm = zio->io_vsd;
107
cae5b340 108 kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
b128c09f
BB
109}
110
428870ff 111static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
41d74433
AX
112 .vsd_free = vdev_mirror_map_free,
113 .vsd_cksum_report = zio_vsd_default_cksum_report
428870ff
BB
114};
115
c06d4368 116static int
cae5b340 117vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
c06d4368 118{
047218e2
AX
119 uint64_t last_offset;
120 int64_t offset_diff;
cae5b340
AX
121 int load;
122
123 /* All DVAs have equal weight at the root. */
124 if (mm->mm_root)
125 return (INT_MAX);
126
127 /*
128 * We don't return INT_MAX if the device is resilvering i.e.
129 * vdev_resilver_txg != 0 as when tested performance was slightly
130 * worse overall when resilvering with compared to without.
131 */
132
047218e2
AX
133 /* Fix zio_offset for leaf vdevs */
134 if (vd->vdev_ops->vdev_op_leaf)
135 zio_offset += VDEV_LABEL_START_SIZE;
136
cae5b340
AX
137 /* Standard load based on pending queue length. */
138 load = vdev_queue_length(vd);
047218e2 139 last_offset = vdev_queue_last_offset(vd);
cae5b340
AX
140
141 if (vd->vdev_nonrot) {
142 /* Non-rotating media. */
047218e2 143 if (last_offset == zio_offset)
cae5b340
AX
144 return (load + zfs_vdev_mirror_non_rotating_inc);
145
146 /*
147 * Apply a seek penalty even for non-rotating devices as
148 * sequential I/O's can be aggregated into fewer operations on
149 * the device, thus avoiding unnecessary per-command overhead
150 * and boosting performance.
151 */
152 return (load + zfs_vdev_mirror_non_rotating_seek_inc);
153 }
154
155 /* Rotating media I/O's which directly follow the last I/O. */
047218e2 156 if (last_offset == zio_offset)
cae5b340
AX
157 return (load + zfs_vdev_mirror_rotating_inc);
158
159 /*
160 * Apply half the seek increment to I/O's within seek offset
047218e2 161 * of the last I/O issued to this vdev as they should incur less
cae5b340
AX
162 * of a seek increment.
163 */
047218e2
AX
164 offset_diff = (int64_t)(last_offset - zio_offset);
165 if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset)
cae5b340
AX
166 return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
167
168 /* Apply the full seek increment to all other I/O's. */
169 return (load + zfs_vdev_mirror_rotating_seek_inc);
c06d4368
AX
170}
171
a08ee875
LG
172/*
173 * Avoid inlining the function to keep vdev_mirror_io_start(), which
174 * is this functions only caller, as small as possible on the stack.
175 */
176noinline static mirror_map_t *
cae5b340 177vdev_mirror_map_init(zio_t *zio)
34dc7c2f
BB
178{
179 mirror_map_t *mm = NULL;
180 mirror_child_t *mc;
181 vdev_t *vd = zio->io_vd;
cae5b340 182 int c;
34dc7c2f
BB
183
184 if (vd == NULL) {
185 dva_t *dva = zio->io_bp->blk_dva;
186 spa_t *spa = zio->io_spa;
187
cae5b340
AX
188 mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
189 B_TRUE);
34dc7c2f
BB
190 for (c = 0; c < mm->mm_children; c++) {
191 mc = &mm->mm_child[c];
192
193 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
194 mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
195 }
196 } else {
cae5b340
AX
197 mm = vdev_mirror_map_alloc(vd->vdev_children,
198 (vd->vdev_ops == &vdev_replacing_ops ||
199 vd->vdev_ops == &vdev_spare_ops), B_FALSE);
34dc7c2f
BB
200 for (c = 0; c < mm->mm_children; c++) {
201 mc = &mm->mm_child[c];
202 mc->mc_vd = vd->vdev_child[c];
203 mc->mc_offset = zio->io_offset;
204 }
205 }
206
207 zio->io_vsd = mm;
428870ff 208 zio->io_vsd_ops = &vdev_mirror_vsd_ops;
34dc7c2f
BB
209 return (mm);
210}
211
34dc7c2f 212static int
1bd201e7
CS
213vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
214 uint64_t *ashift)
34dc7c2f 215{
34dc7c2f 216 int numerrors = 0;
45d1cae3 217 int lasterror = 0;
d6320ddb 218 int c;
34dc7c2f
BB
219
220 if (vd->vdev_children == 0) {
221 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
a08ee875 222 return (SET_ERROR(EINVAL));
34dc7c2f
BB
223 }
224
45d1cae3 225 vdev_open_children(vd);
34dc7c2f 226
d6320ddb 227 for (c = 0; c < vd->vdev_children; c++) {
45d1cae3
BB
228 vdev_t *cvd = vd->vdev_child[c];
229
230 if (cvd->vdev_open_error) {
231 lasterror = cvd->vdev_open_error;
34dc7c2f
BB
232 numerrors++;
233 continue;
234 }
235
236 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
1bd201e7 237 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
34dc7c2f
BB
238 *ashift = MAX(*ashift, cvd->vdev_ashift);
239 }
240
241 if (numerrors == vd->vdev_children) {
242 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
243 return (lasterror);
244 }
245
246 return (0);
247}
248
249static void
250vdev_mirror_close(vdev_t *vd)
251{
d6320ddb
BB
252 int c;
253
254 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
255 vdev_close(vd->vdev_child[c]);
256}
257
258static void
259vdev_mirror_child_done(zio_t *zio)
260{
261 mirror_child_t *mc = zio->io_private;
262
263 mc->mc_error = zio->io_error;
264 mc->mc_tried = 1;
265 mc->mc_skipped = 0;
266}
267
268static void
269vdev_mirror_scrub_done(zio_t *zio)
270{
271 mirror_child_t *mc = zio->io_private;
272
273 if (zio->io_error == 0) {
d164b209 274 zio_t *pio;
cae5b340 275 zio_link_t *zl = NULL;
d164b209
BB
276
277 mutex_enter(&zio->io_lock);
cae5b340 278 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
d164b209
BB
279 mutex_enter(&pio->io_lock);
280 ASSERT3U(zio->io_size, >=, pio->io_size);
cae5b340 281 abd_copy(pio->io_abd, zio->io_abd, pio->io_size);
d164b209
BB
282 mutex_exit(&pio->io_lock);
283 }
284 mutex_exit(&zio->io_lock);
34dc7c2f
BB
285 }
286
cae5b340 287 abd_free(zio->io_abd);
34dc7c2f
BB
288
289 mc->mc_error = zio->io_error;
290 mc->mc_tried = 1;
291 mc->mc_skipped = 0;
292}
293
34dc7c2f 294/*
cae5b340
AX
295 * Check the other, lower-index DVAs to see if they're on the same
296 * vdev as the child we picked. If they are, use them since they
297 * are likely to have been allocated from the primary metaslab in
298 * use at the time, and hence are more likely to have locality with
299 * single-copy data.
300 */
301static int
302vdev_mirror_dva_select(zio_t *zio, int p)
303{
304 dva_t *dva = zio->io_bp->blk_dva;
305 mirror_map_t *mm = zio->io_vsd;
306 int preferred;
307 int c;
308
309 preferred = mm->mm_preferred[p];
310 for (p--; p >= 0; p--) {
311 c = mm->mm_preferred[p];
312 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
313 preferred = c;
314 }
315 return (preferred);
316}
317
318static int
319vdev_mirror_preferred_child_randomize(zio_t *zio)
320{
321 mirror_map_t *mm = zio->io_vsd;
322 int p;
323
324 if (mm->mm_root) {
325 p = spa_get_random(mm->mm_preferred_cnt);
326 return (vdev_mirror_dva_select(zio, p));
327 }
328
329 /*
330 * To ensure we don't always favour the first matching vdev,
331 * which could lead to wear leveling issues on SSD's, we
332 * use the I/O offset as a pseudo random seed into the vdevs
333 * which have the lowest load.
334 */
335 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
336 return (mm->mm_preferred[p]);
337}
338
339/*
340 * Try to find a vdev whose DTL doesn't contain the block we want to read
341 * prefering vdevs based on determined load.
342 *
34dc7c2f
BB
343 * Try to find a child whose DTL doesn't contain the block we want to read.
344 * If we can't, try the read on any vdev we haven't already tried.
345 */
346static int
347vdev_mirror_child_select(zio_t *zio)
348{
349 mirror_map_t *mm = zio->io_vsd;
34dc7c2f 350 uint64_t txg = zio->io_txg;
cae5b340 351 int c, lowest_load;
34dc7c2f 352
428870ff 353 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
34dc7c2f 354
cae5b340
AX
355 lowest_load = INT_MAX;
356 mm->mm_preferred_cnt = 0;
357 for (c = 0; c < mm->mm_children; c++) {
358 mirror_child_t *mc;
359
34dc7c2f
BB
360 mc = &mm->mm_child[c];
361 if (mc->mc_tried || mc->mc_skipped)
362 continue;
cae5b340 363
ea04106b 364 if (mc->mc_vd == NULL || !vdev_readable(mc->mc_vd)) {
a08ee875 365 mc->mc_error = SET_ERROR(ENXIO);
34dc7c2f
BB
366 mc->mc_tried = 1; /* don't even try */
367 mc->mc_skipped = 1;
368 continue;
369 }
cae5b340
AX
370
371 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
372 mc->mc_error = SET_ERROR(ESTALE);
373 mc->mc_skipped = 1;
374 mc->mc_speculative = 1;
375 continue;
376 }
377
378 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
379 if (mc->mc_load > lowest_load)
380 continue;
381
382 if (mc->mc_load < lowest_load) {
383 lowest_load = mc->mc_load;
384 mm->mm_preferred_cnt = 0;
385 }
386 mm->mm_preferred[mm->mm_preferred_cnt] = c;
387 mm->mm_preferred_cnt++;
388 }
389
047218e2 390 if (mm->mm_preferred_cnt == 1)
cae5b340 391 return (mm->mm_preferred[0]);
cae5b340 392
cae5b340 393
047218e2
AX
394 if (mm->mm_preferred_cnt > 1)
395 return (vdev_mirror_preferred_child_randomize(zio));
34dc7c2f
BB
396
397 /*
398 * Every device is either missing or has this txg in its DTL.
399 * Look for any child we haven't already tried before giving up.
400 */
cae5b340 401 for (c = 0; c < mm->mm_children; c++) {
047218e2 402 if (!mm->mm_child[c].mc_tried)
34dc7c2f 403 return (c);
cae5b340 404 }
34dc7c2f
BB
405
406 /*
407 * Every child failed. There's no place left to look.
408 */
409 return (-1);
410}
411
e10b0808 412static void
34dc7c2f
BB
413vdev_mirror_io_start(zio_t *zio)
414{
415 mirror_map_t *mm;
416 mirror_child_t *mc;
417 int c, children;
418
cae5b340 419 mm = vdev_mirror_map_init(zio);
34dc7c2f
BB
420
421 if (zio->io_type == ZIO_TYPE_READ) {
422 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) {
423 /*
424 * For scrubbing reads we need to allocate a read
425 * buffer for each child and issue reads to all
426 * children. If any child succeeds, it will copy its
427 * data into zio->io_data in vdev_mirror_scrub_done.
428 */
429 for (c = 0; c < mm->mm_children; c++) {
430 mc = &mm->mm_child[c];
431 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
432 mc->mc_vd, mc->mc_offset,
cae5b340
AX
433 abd_alloc_sametype(zio->io_abd,
434 zio->io_size), zio->io_size,
b128c09f 435 zio->io_type, zio->io_priority, 0,
34dc7c2f
BB
436 vdev_mirror_scrub_done, mc));
437 }
e10b0808
AX
438 zio_execute(zio);
439 return;
34dc7c2f
BB
440 }
441 /*
442 * For normal reads just pick one child.
443 */
444 c = vdev_mirror_child_select(zio);
445 children = (c >= 0);
446 } else {
447 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
448
449 /*
fb5f0bc8 450 * Writes go to all children.
34dc7c2f 451 */
fb5f0bc8
BB
452 c = 0;
453 children = mm->mm_children;
34dc7c2f
BB
454 }
455
456 while (children--) {
457 mc = &mm->mm_child[c];
458 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
cae5b340 459 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
b128c09f
BB
460 zio->io_type, zio->io_priority, 0,
461 vdev_mirror_child_done, mc));
34dc7c2f
BB
462 c++;
463 }
464
e10b0808 465 zio_execute(zio);
34dc7c2f
BB
466}
467
468static int
b128c09f
BB
469vdev_mirror_worst_error(mirror_map_t *mm)
470{
d6320ddb 471 int c, error[2] = { 0, 0 };
b128c09f 472
d6320ddb 473 for (c = 0; c < mm->mm_children; c++) {
b128c09f
BB
474 mirror_child_t *mc = &mm->mm_child[c];
475 int s = mc->mc_speculative;
476 error[s] = zio_worst_error(error[s], mc->mc_error);
477 }
478
479 return (error[0] ? error[0] : error[1]);
480}
481
482static void
34dc7c2f
BB
483vdev_mirror_io_done(zio_t *zio)
484{
485 mirror_map_t *mm = zio->io_vsd;
486 mirror_child_t *mc;
487 int c;
488 int good_copies = 0;
489 int unexpected_errors = 0;
490
34dc7c2f
BB
491 for (c = 0; c < mm->mm_children; c++) {
492 mc = &mm->mm_child[c];
493
34dc7c2f 494 if (mc->mc_error) {
34dc7c2f
BB
495 if (!mc->mc_skipped)
496 unexpected_errors++;
b128c09f
BB
497 } else if (mc->mc_tried) {
498 good_copies++;
34dc7c2f
BB
499 }
500 }
501
502 if (zio->io_type == ZIO_TYPE_WRITE) {
503 /*
504 * XXX -- for now, treat partial writes as success.
b128c09f
BB
505 *
506 * Now that we support write reallocation, it would be better
507 * to treat partial failure as real failure unless there are
508 * no non-degraded top-level vdevs left, and not update DTLs
509 * if we intend to reallocate.
34dc7c2f
BB
510 */
511 /* XXPOLICY */
b128c09f
BB
512 if (good_copies != mm->mm_children) {
513 /*
514 * Always require at least one good copy.
515 *
516 * For ditto blocks (io_vd == NULL), require
517 * all copies to be good.
518 *
519 * XXX -- for replacing vdevs, there's no great answer.
520 * If the old device is really dead, we may not even
521 * be able to access it -- so we only want to
522 * require good writes to the new device. But if
523 * the new device turns out to be flaky, we want
524 * to be able to detach it -- which requires all
525 * writes to the old device to have succeeded.
526 */
527 if (good_copies == 0 || zio->io_vd == NULL)
528 zio->io_error = vdev_mirror_worst_error(mm);
529 }
530 return;
34dc7c2f
BB
531 }
532
533 ASSERT(zio->io_type == ZIO_TYPE_READ);
534
535 /*
536 * If we don't have a good copy yet, keep trying other children.
537 */
538 /* XXPOLICY */
539 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
540 ASSERT(c >= 0 && c < mm->mm_children);
541 mc = &mm->mm_child[c];
34dc7c2f
BB
542 zio_vdev_io_redone(zio);
543 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
cae5b340 544 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
b128c09f 545 ZIO_TYPE_READ, zio->io_priority, 0,
34dc7c2f 546 vdev_mirror_child_done, mc));
b128c09f 547 return;
34dc7c2f
BB
548 }
549
550 /* XXPOLICY */
b128c09f
BB
551 if (good_copies == 0) {
552 zio->io_error = vdev_mirror_worst_error(mm);
34dc7c2f 553 ASSERT(zio->io_error != 0);
b128c09f 554 }
34dc7c2f 555
fb5f0bc8 556 if (good_copies && spa_writeable(zio->io_spa) &&
34dc7c2f
BB
557 (unexpected_errors ||
558 (zio->io_flags & ZIO_FLAG_RESILVER) ||
559 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) {
34dc7c2f
BB
560 /*
561 * Use the good data we have in hand to repair damaged children.
34dc7c2f 562 */
34dc7c2f
BB
563 for (c = 0; c < mm->mm_children; c++) {
564 /*
565 * Don't rewrite known good children.
566 * Not only is it unnecessary, it could
567 * actually be harmful: if the system lost
568 * power while rewriting the only good copy,
569 * there would be no good copies left!
570 */
571 mc = &mm->mm_child[c];
572
573 if (mc->mc_error == 0) {
574 if (mc->mc_tried)
575 continue;
576 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
fb5f0bc8 577 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
34dc7c2f
BB
578 zio->io_txg, 1))
579 continue;
a08ee875 580 mc->mc_error = SET_ERROR(ESTALE);
34dc7c2f
BB
581 }
582
b128c09f
BB
583 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
584 mc->mc_vd, mc->mc_offset,
cae5b340 585 zio->io_abd, zio->io_size,
a08ee875 586 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
fb5f0bc8
BB
587 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
588 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
34dc7c2f 589 }
34dc7c2f 590 }
34dc7c2f
BB
591}
592
593static void
594vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
595{
596 if (faulted == vd->vdev_children)
597 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
598 VDEV_AUX_NO_REPLICAS);
599 else if (degraded + faulted != 0)
600 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
601 else
602 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
603}
604
605vdev_ops_t vdev_mirror_ops = {
606 vdev_mirror_open,
607 vdev_mirror_close,
34dc7c2f
BB
608 vdev_default_asize,
609 vdev_mirror_io_start,
610 vdev_mirror_io_done,
611 vdev_mirror_state_change,
428870ff
BB
612 NULL,
613 NULL,
cae5b340 614 NULL,
34dc7c2f
BB
615 VDEV_TYPE_MIRROR, /* name of this vdev type */
616 B_FALSE /* not a leaf vdev */
617};
618
619vdev_ops_t vdev_replacing_ops = {
620 vdev_mirror_open,
621 vdev_mirror_close,
34dc7c2f
BB
622 vdev_default_asize,
623 vdev_mirror_io_start,
624 vdev_mirror_io_done,
625 vdev_mirror_state_change,
428870ff
BB
626 NULL,
627 NULL,
cae5b340 628 NULL,
34dc7c2f
BB
629 VDEV_TYPE_REPLACING, /* name of this vdev type */
630 B_FALSE /* not a leaf vdev */
631};
632
633vdev_ops_t vdev_spare_ops = {
634 vdev_mirror_open,
635 vdev_mirror_close,
34dc7c2f
BB
636 vdev_default_asize,
637 vdev_mirror_io_start,
638 vdev_mirror_io_done,
639 vdev_mirror_state_change,
428870ff
BB
640 NULL,
641 NULL,
cae5b340 642 NULL,
34dc7c2f
BB
643 VDEV_TYPE_SPARE, /* name of this vdev type */
644 B_FALSE /* not a leaf vdev */
645};
c06d4368
AX
646
647#if defined(_KERNEL) && defined(HAVE_SPL)
cae5b340
AX
648/* BEGIN CSTYLED */
649module_param(zfs_vdev_mirror_rotating_inc, int, 0644);
650MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc,
651 "Rotating media load increment for non-seeking I/O's");
652
653module_param(zfs_vdev_mirror_rotating_seek_inc, int, 0644);
654MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_inc,
655 "Rotating media load increment for seeking I/O's");
656
657module_param(zfs_vdev_mirror_rotating_seek_offset, int, 0644);
658
659MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset,
660 "Offset in bytes from the last I/O which "
661 "triggers a reduced rotating media seek increment");
662
663module_param(zfs_vdev_mirror_non_rotating_inc, int, 0644);
664MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_inc,
665 "Non-rotating media load increment for non-seeking I/O's");
666
667module_param(zfs_vdev_mirror_non_rotating_seek_inc, int, 0644);
668MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc,
669 "Non-rotating media load increment for seeking I/O's");
670/* END CSTYLED */
c06d4368 671#endif