]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/vdev_mirror.c
OpenZFS 7090 - zfs should throttle allocations
[mirror_zfs.git] / module / zfs / vdev_mirror.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
25
1bd201e7 26/*
3dfb57a3 27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
1bd201e7
CS
28 */
29
34dc7c2f
BB
30#include <sys/zfs_context.h>
31#include <sys/spa.h>
32#include <sys/vdev_impl.h>
33#include <sys/zio.h>
34#include <sys/fs/zfs.h>
35
36/*
37 * Virtual device vector for mirroring.
38 */
39
40typedef struct mirror_child {
41 vdev_t *mc_vd;
42 uint64_t mc_offset;
43 int mc_error;
9f500936 44 int mc_load;
b128c09f
BB
45 uint8_t mc_tried;
46 uint8_t mc_skipped;
47 uint8_t mc_speculative;
34dc7c2f
BB
48} mirror_child_t;
49
50typedef struct mirror_map {
9f500936 51 int *mm_preferred;
52 int mm_preferred_cnt;
34dc7c2f 53 int mm_children;
9f500936 54 boolean_t mm_replacing;
55 boolean_t mm_root;
56 mirror_child_t mm_child[];
34dc7c2f
BB
57} mirror_map_t;
58
9f500936 59static int vdev_mirror_shift = 21;
60
556011db 61/*
9f500936 62 * The load configuration settings below are tuned by default for
63 * the case where all devices are of the same rotational type.
556011db 64 *
9f500936 65 * If there is a mixture of rotating and non-rotating media, setting
66 * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
67 * as it will direct more reads to the non-rotating vdevs which are more likely
68 * to have a higher performance.
556011db 69 */
9f500936 70
71/* Rotating media load calculation configuration. */
72static int zfs_vdev_mirror_rotating_inc = 0;
73static int zfs_vdev_mirror_rotating_seek_inc = 5;
74static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
75
76/* Non-rotating media load calculation configuration. */
77static int zfs_vdev_mirror_non_rotating_inc = 0;
78static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
79
80static inline size_t
81vdev_mirror_map_size(int children)
82{
83 return (offsetof(mirror_map_t, mm_child[children]) +
84 sizeof (int) * children);
85}
86
87static inline mirror_map_t *
88vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
89{
90 mirror_map_t *mm;
91
92 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
93 mm->mm_children = children;
94 mm->mm_replacing = replacing;
95 mm->mm_root = root;
96 mm->mm_preferred = (int *)((uintptr_t)mm +
97 offsetof(mirror_map_t, mm_child[children]));
98
99 return (mm);
100}
34dc7c2f 101
b128c09f
BB
102static void
103vdev_mirror_map_free(zio_t *zio)
104{
105 mirror_map_t *mm = zio->io_vsd;
106
9f500936 107 kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
b128c09f
BB
108}
109
428870ff
BB
110static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
111 vdev_mirror_map_free,
112 zio_vsd_default_cksum_report
113};
114
556011db 115static int
9f500936 116vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
556011db 117{
9f500936 118 uint64_t lastoffset;
119 int load;
120
121 /* All DVAs have equal weight at the root. */
122 if (mm->mm_root)
123 return (INT_MAX);
124
125 /*
126 * We don't return INT_MAX if the device is resilvering i.e.
127 * vdev_resilver_txg != 0 as when tested performance was slightly
128 * worse overall when resilvering with compared to without.
129 */
130
131 /* Standard load based on pending queue length. */
132 load = vdev_queue_length(vd);
133 lastoffset = vdev_queue_lastoffset(vd);
134
135 if (vd->vdev_nonrot) {
136 /* Non-rotating media. */
137 if (lastoffset == zio_offset)
138 return (load + zfs_vdev_mirror_non_rotating_inc);
139
140 /*
141 * Apply a seek penalty even for non-rotating devices as
142 * sequential I/O's can be aggregated into fewer operations on
143 * the device, thus avoiding unnecessary per-command overhead
144 * and boosting performance.
145 */
146 return (load + zfs_vdev_mirror_non_rotating_seek_inc);
147 }
148
149 /* Rotating media I/O's which directly follow the last I/O. */
150 if (lastoffset == zio_offset)
151 return (load + zfs_vdev_mirror_rotating_inc);
152
153 /*
154 * Apply half the seek increment to I/O's within seek offset
155 * of the last I/O queued to this vdev as they should incure less
156 * of a seek increment.
157 */
158 if (ABS(lastoffset - zio_offset) <
159 zfs_vdev_mirror_rotating_seek_offset)
160 return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
161
162 /* Apply the full seek increment to all other I/O's. */
163 return (load + zfs_vdev_mirror_rotating_seek_inc);
556011db
BB
164}
165
a1687880
BB
166/*
167 * Avoid inlining the function to keep vdev_mirror_io_start(), which
168 * is this functions only caller, as small as possible on the stack.
169 */
170noinline static mirror_map_t *
9f500936 171vdev_mirror_map_init(zio_t *zio)
34dc7c2f
BB
172{
173 mirror_map_t *mm = NULL;
174 mirror_child_t *mc;
175 vdev_t *vd = zio->io_vd;
9f500936 176 int c;
34dc7c2f
BB
177
178 if (vd == NULL) {
179 dva_t *dva = zio->io_bp->blk_dva;
180 spa_t *spa = zio->io_spa;
181
9f500936 182 mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
183 B_TRUE);
34dc7c2f
BB
184 for (c = 0; c < mm->mm_children; c++) {
185 mc = &mm->mm_child[c];
186
187 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
188 mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
189 }
190 } else {
9f500936 191 mm = vdev_mirror_map_alloc(vd->vdev_children,
192 (vd->vdev_ops == &vdev_replacing_ops ||
193 vd->vdev_ops == &vdev_spare_ops), B_FALSE);
34dc7c2f
BB
194 for (c = 0; c < mm->mm_children; c++) {
195 mc = &mm->mm_child[c];
196 mc->mc_vd = vd->vdev_child[c];
197 mc->mc_offset = zio->io_offset;
198 }
199 }
200
201 zio->io_vsd = mm;
428870ff 202 zio->io_vsd_ops = &vdev_mirror_vsd_ops;
34dc7c2f
BB
203 return (mm);
204}
205
34dc7c2f 206static int
1bd201e7
CS
207vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
208 uint64_t *ashift)
34dc7c2f 209{
34dc7c2f 210 int numerrors = 0;
45d1cae3 211 int lasterror = 0;
d6320ddb 212 int c;
34dc7c2f
BB
213
214 if (vd->vdev_children == 0) {
215 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
2e528b49 216 return (SET_ERROR(EINVAL));
34dc7c2f
BB
217 }
218
45d1cae3 219 vdev_open_children(vd);
34dc7c2f 220
d6320ddb 221 for (c = 0; c < vd->vdev_children; c++) {
45d1cae3
BB
222 vdev_t *cvd = vd->vdev_child[c];
223
224 if (cvd->vdev_open_error) {
225 lasterror = cvd->vdev_open_error;
34dc7c2f
BB
226 numerrors++;
227 continue;
228 }
229
230 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
1bd201e7 231 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
34dc7c2f
BB
232 *ashift = MAX(*ashift, cvd->vdev_ashift);
233 }
234
235 if (numerrors == vd->vdev_children) {
236 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
237 return (lasterror);
238 }
239
240 return (0);
241}
242
243static void
244vdev_mirror_close(vdev_t *vd)
245{
d6320ddb
BB
246 int c;
247
248 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
249 vdev_close(vd->vdev_child[c]);
250}
251
252static void
253vdev_mirror_child_done(zio_t *zio)
254{
255 mirror_child_t *mc = zio->io_private;
256
257 mc->mc_error = zio->io_error;
258 mc->mc_tried = 1;
259 mc->mc_skipped = 0;
260}
261
262static void
263vdev_mirror_scrub_done(zio_t *zio)
264{
265 mirror_child_t *mc = zio->io_private;
266
267 if (zio->io_error == 0) {
d164b209 268 zio_t *pio;
3dfb57a3 269 zio_link_t *zl = NULL;
d164b209
BB
270
271 mutex_enter(&zio->io_lock);
3dfb57a3 272 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
d164b209
BB
273 mutex_enter(&pio->io_lock);
274 ASSERT3U(zio->io_size, >=, pio->io_size);
275 bcopy(zio->io_data, pio->io_data, pio->io_size);
276 mutex_exit(&pio->io_lock);
277 }
278 mutex_exit(&zio->io_lock);
34dc7c2f
BB
279 }
280
281 zio_buf_free(zio->io_data, zio->io_size);
282
283 mc->mc_error = zio->io_error;
284 mc->mc_tried = 1;
285 mc->mc_skipped = 0;
286}
287
34dc7c2f 288/*
9f500936 289 * Check the other, lower-index DVAs to see if they're on the same
290 * vdev as the child we picked. If they are, use them since they
291 * are likely to have been allocated from the primary metaslab in
292 * use at the time, and hence are more likely to have locality with
293 * single-copy data.
294 */
295static int
296vdev_mirror_dva_select(zio_t *zio, int p)
297{
298 dva_t *dva = zio->io_bp->blk_dva;
299 mirror_map_t *mm = zio->io_vsd;
300 int preferred;
301 int c;
302
303 preferred = mm->mm_preferred[p];
304 for (p--; p >= 0; p--) {
305 c = mm->mm_preferred[p];
306 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
307 preferred = c;
308 }
309 return (preferred);
310}
311
312static int
313vdev_mirror_preferred_child_randomize(zio_t *zio)
314{
315 mirror_map_t *mm = zio->io_vsd;
316 int p;
317
318 if (mm->mm_root) {
319 p = spa_get_random(mm->mm_preferred_cnt);
320 return (vdev_mirror_dva_select(zio, p));
321 }
322
323 /*
324 * To ensure we don't always favour the first matching vdev,
325 * which could lead to wear leveling issues on SSD's, we
326 * use the I/O offset as a pseudo random seed into the vdevs
327 * which have the lowest load.
328 */
329 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
330 return (mm->mm_preferred[p]);
331}
332
333/*
334 * Try to find a vdev whose DTL doesn't contain the block we want to read
335 * prefering vdevs based on determined load.
336 *
34dc7c2f
BB
337 * Try to find a child whose DTL doesn't contain the block we want to read.
338 * If we can't, try the read on any vdev we haven't already tried.
339 */
340static int
341vdev_mirror_child_select(zio_t *zio)
342{
343 mirror_map_t *mm = zio->io_vsd;
34dc7c2f 344 uint64_t txg = zio->io_txg;
9f500936 345 int c, lowest_load;
34dc7c2f 346
428870ff 347 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
34dc7c2f 348
9f500936 349 lowest_load = INT_MAX;
350 mm->mm_preferred_cnt = 0;
351 for (c = 0; c < mm->mm_children; c++) {
352 mirror_child_t *mc;
353
34dc7c2f
BB
354 mc = &mm->mm_child[c];
355 if (mc->mc_tried || mc->mc_skipped)
356 continue;
9f500936 357
33074f22 358 if (mc->mc_vd == NULL || !vdev_readable(mc->mc_vd)) {
2e528b49 359 mc->mc_error = SET_ERROR(ENXIO);
34dc7c2f
BB
360 mc->mc_tried = 1; /* don't even try */
361 mc->mc_skipped = 1;
362 continue;
363 }
9f500936 364
365 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
366 mc->mc_error = SET_ERROR(ESTALE);
367 mc->mc_skipped = 1;
368 mc->mc_speculative = 1;
369 continue;
370 }
371
372 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
373 if (mc->mc_load > lowest_load)
374 continue;
375
376 if (mc->mc_load < lowest_load) {
377 lowest_load = mc->mc_load;
378 mm->mm_preferred_cnt = 0;
379 }
380 mm->mm_preferred[mm->mm_preferred_cnt] = c;
381 mm->mm_preferred_cnt++;
382 }
383
384 if (mm->mm_preferred_cnt == 1) {
385 vdev_queue_register_lastoffset(
386 mm->mm_child[mm->mm_preferred[0]].mc_vd, zio);
387 return (mm->mm_preferred[0]);
388 }
389
390 if (mm->mm_preferred_cnt > 1) {
391 int c = vdev_mirror_preferred_child_randomize(zio);
392
393 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd, zio);
394 return (c);
34dc7c2f
BB
395 }
396
397 /*
398 * Every device is either missing or has this txg in its DTL.
399 * Look for any child we haven't already tried before giving up.
400 */
9f500936 401 for (c = 0; c < mm->mm_children; c++) {
402 if (!mm->mm_child[c].mc_tried) {
403 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd,
404 zio);
34dc7c2f 405 return (c);
9f500936 406 }
407 }
34dc7c2f
BB
408
409 /*
410 * Every child failed. There's no place left to look.
411 */
412 return (-1);
413}
414
98b25418 415static void
34dc7c2f
BB
416vdev_mirror_io_start(zio_t *zio)
417{
418 mirror_map_t *mm;
419 mirror_child_t *mc;
420 int c, children;
421
9f500936 422 mm = vdev_mirror_map_init(zio);
34dc7c2f
BB
423
424 if (zio->io_type == ZIO_TYPE_READ) {
425 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) {
426 /*
427 * For scrubbing reads we need to allocate a read
428 * buffer for each child and issue reads to all
429 * children. If any child succeeds, it will copy its
430 * data into zio->io_data in vdev_mirror_scrub_done.
431 */
432 for (c = 0; c < mm->mm_children; c++) {
433 mc = &mm->mm_child[c];
434 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
435 mc->mc_vd, mc->mc_offset,
436 zio_buf_alloc(zio->io_size), zio->io_size,
b128c09f 437 zio->io_type, zio->io_priority, 0,
34dc7c2f
BB
438 vdev_mirror_scrub_done, mc));
439 }
98b25418
GW
440 zio_execute(zio);
441 return;
34dc7c2f
BB
442 }
443 /*
444 * For normal reads just pick one child.
445 */
446 c = vdev_mirror_child_select(zio);
447 children = (c >= 0);
448 } else {
449 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
450
451 /*
fb5f0bc8 452 * Writes go to all children.
34dc7c2f 453 */
fb5f0bc8
BB
454 c = 0;
455 children = mm->mm_children;
34dc7c2f
BB
456 }
457
458 while (children--) {
459 mc = &mm->mm_child[c];
460 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
b128c09f
BB
461 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
462 zio->io_type, zio->io_priority, 0,
463 vdev_mirror_child_done, mc));
34dc7c2f
BB
464 c++;
465 }
466
98b25418 467 zio_execute(zio);
34dc7c2f
BB
468}
469
470static int
b128c09f
BB
471vdev_mirror_worst_error(mirror_map_t *mm)
472{
d6320ddb 473 int c, error[2] = { 0, 0 };
b128c09f 474
d6320ddb 475 for (c = 0; c < mm->mm_children; c++) {
b128c09f
BB
476 mirror_child_t *mc = &mm->mm_child[c];
477 int s = mc->mc_speculative;
478 error[s] = zio_worst_error(error[s], mc->mc_error);
479 }
480
481 return (error[0] ? error[0] : error[1]);
482}
483
484static void
34dc7c2f
BB
485vdev_mirror_io_done(zio_t *zio)
486{
487 mirror_map_t *mm = zio->io_vsd;
488 mirror_child_t *mc;
489 int c;
490 int good_copies = 0;
491 int unexpected_errors = 0;
492
34dc7c2f
BB
493 for (c = 0; c < mm->mm_children; c++) {
494 mc = &mm->mm_child[c];
495
34dc7c2f 496 if (mc->mc_error) {
34dc7c2f
BB
497 if (!mc->mc_skipped)
498 unexpected_errors++;
b128c09f
BB
499 } else if (mc->mc_tried) {
500 good_copies++;
34dc7c2f
BB
501 }
502 }
503
504 if (zio->io_type == ZIO_TYPE_WRITE) {
505 /*
506 * XXX -- for now, treat partial writes as success.
b128c09f
BB
507 *
508 * Now that we support write reallocation, it would be better
509 * to treat partial failure as real failure unless there are
510 * no non-degraded top-level vdevs left, and not update DTLs
511 * if we intend to reallocate.
34dc7c2f
BB
512 */
513 /* XXPOLICY */
b128c09f
BB
514 if (good_copies != mm->mm_children) {
515 /*
516 * Always require at least one good copy.
517 *
518 * For ditto blocks (io_vd == NULL), require
519 * all copies to be good.
520 *
521 * XXX -- for replacing vdevs, there's no great answer.
522 * If the old device is really dead, we may not even
523 * be able to access it -- so we only want to
524 * require good writes to the new device. But if
525 * the new device turns out to be flaky, we want
526 * to be able to detach it -- which requires all
527 * writes to the old device to have succeeded.
528 */
529 if (good_copies == 0 || zio->io_vd == NULL)
530 zio->io_error = vdev_mirror_worst_error(mm);
531 }
532 return;
34dc7c2f
BB
533 }
534
535 ASSERT(zio->io_type == ZIO_TYPE_READ);
536
537 /*
538 * If we don't have a good copy yet, keep trying other children.
539 */
540 /* XXPOLICY */
541 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
542 ASSERT(c >= 0 && c < mm->mm_children);
543 mc = &mm->mm_child[c];
34dc7c2f
BB
544 zio_vdev_io_redone(zio);
545 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
546 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
b128c09f 547 ZIO_TYPE_READ, zio->io_priority, 0,
34dc7c2f 548 vdev_mirror_child_done, mc));
b128c09f 549 return;
34dc7c2f
BB
550 }
551
552 /* XXPOLICY */
b128c09f
BB
553 if (good_copies == 0) {
554 zio->io_error = vdev_mirror_worst_error(mm);
34dc7c2f 555 ASSERT(zio->io_error != 0);
b128c09f 556 }
34dc7c2f 557
fb5f0bc8 558 if (good_copies && spa_writeable(zio->io_spa) &&
34dc7c2f
BB
559 (unexpected_errors ||
560 (zio->io_flags & ZIO_FLAG_RESILVER) ||
561 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) {
34dc7c2f
BB
562 /*
563 * Use the good data we have in hand to repair damaged children.
34dc7c2f 564 */
34dc7c2f
BB
565 for (c = 0; c < mm->mm_children; c++) {
566 /*
567 * Don't rewrite known good children.
568 * Not only is it unnecessary, it could
569 * actually be harmful: if the system lost
570 * power while rewriting the only good copy,
571 * there would be no good copies left!
572 */
573 mc = &mm->mm_child[c];
574
575 if (mc->mc_error == 0) {
576 if (mc->mc_tried)
577 continue;
578 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
fb5f0bc8 579 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
34dc7c2f
BB
580 zio->io_txg, 1))
581 continue;
2e528b49 582 mc->mc_error = SET_ERROR(ESTALE);
34dc7c2f
BB
583 }
584
b128c09f
BB
585 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
586 mc->mc_vd, mc->mc_offset,
587 zio->io_data, zio->io_size,
e8b96c60 588 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
fb5f0bc8
BB
589 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
590 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
34dc7c2f 591 }
34dc7c2f 592 }
34dc7c2f
BB
593}
594
595static void
596vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
597{
598 if (faulted == vd->vdev_children)
599 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
600 VDEV_AUX_NO_REPLICAS);
601 else if (degraded + faulted != 0)
602 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
603 else
604 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
605}
606
607vdev_ops_t vdev_mirror_ops = {
608 vdev_mirror_open,
609 vdev_mirror_close,
34dc7c2f
BB
610 vdev_default_asize,
611 vdev_mirror_io_start,
612 vdev_mirror_io_done,
613 vdev_mirror_state_change,
428870ff
BB
614 NULL,
615 NULL,
34dc7c2f
BB
616 VDEV_TYPE_MIRROR, /* name of this vdev type */
617 B_FALSE /* not a leaf vdev */
618};
619
620vdev_ops_t vdev_replacing_ops = {
621 vdev_mirror_open,
622 vdev_mirror_close,
34dc7c2f
BB
623 vdev_default_asize,
624 vdev_mirror_io_start,
625 vdev_mirror_io_done,
626 vdev_mirror_state_change,
428870ff
BB
627 NULL,
628 NULL,
34dc7c2f
BB
629 VDEV_TYPE_REPLACING, /* name of this vdev type */
630 B_FALSE /* not a leaf vdev */
631};
632
633vdev_ops_t vdev_spare_ops = {
634 vdev_mirror_open,
635 vdev_mirror_close,
34dc7c2f
BB
636 vdev_default_asize,
637 vdev_mirror_io_start,
638 vdev_mirror_io_done,
639 vdev_mirror_state_change,
428870ff
BB
640 NULL,
641 NULL,
34dc7c2f
BB
642 VDEV_TYPE_SPARE, /* name of this vdev type */
643 B_FALSE /* not a leaf vdev */
644};
556011db
BB
645
646#if defined(_KERNEL) && defined(HAVE_SPL)
9f500936 647module_param(zfs_vdev_mirror_rotating_inc, int, 0644);
648MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc,
649 "Rotating media load increment for non-seeking I/O's");
650
651module_param(zfs_vdev_mirror_rotating_seek_inc, int, 0644);
652MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_inc,
653 "Rotating media load increment for seeking I/O's");
654
655module_param(zfs_vdev_mirror_rotating_seek_offset, int, 0644);
656MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset,
657 "Offset in bytes from the last I/O which "
658 "triggers a reduced rotating media seek increment");
659
660module_param(zfs_vdev_mirror_non_rotating_inc, int, 0644);
661MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_inc,
662 "Non-rotating media load increment for non-seeking I/O's");
663
664module_param(zfs_vdev_mirror_non_rotating_seek_inc, int, 0644);
665MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc,
666 "Non-rotating media load increment for seeking I/O's");
667
556011db 668#endif