]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright 2010 Sun Microsystems, Inc. All rights reserved. |
34dc7c2f BB |
23 | * Use is subject to license terms. |
24 | */ | |
25 | ||
1bd201e7 | 26 | /* |
3dfb57a3 | 27 | * Copyright (c) 2012, 2015 by Delphix. All rights reserved. |
1bd201e7 CS |
28 | */ |
29 | ||
34dc7c2f BB |
30 | #include <sys/zfs_context.h> |
31 | #include <sys/spa.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/zio.h> | |
a6255b7f | 34 | #include <sys/abd.h> |
34dc7c2f BB |
35 | #include <sys/fs/zfs.h> |
36 | ||
551905dd GN |
37 | /* |
38 | * Vdev mirror kstats | |
39 | */ | |
40 | static kstat_t *mirror_ksp = NULL; | |
41 | ||
42 | typedef struct mirror_stats { | |
43 | kstat_named_t vdev_mirror_stat_rotating_linear; | |
44 | kstat_named_t vdev_mirror_stat_rotating_offset; | |
45 | kstat_named_t vdev_mirror_stat_rotating_seek; | |
46 | kstat_named_t vdev_mirror_stat_non_rotating_linear; | |
47 | kstat_named_t vdev_mirror_stat_non_rotating_seek; | |
48 | ||
49 | kstat_named_t vdev_mirror_stat_preferred_found; | |
50 | kstat_named_t vdev_mirror_stat_preferred_not_found; | |
51 | } mirror_stats_t; | |
52 | ||
53 | static mirror_stats_t mirror_stats = { | |
54 | /* New I/O follows directly the last I/O */ | |
55 | { "rotating_linear", KSTAT_DATA_UINT64 }, | |
56 | /* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */ | |
57 | { "rotating_offset", KSTAT_DATA_UINT64 }, | |
58 | /* New I/O requires random seek */ | |
59 | { "rotating_seek", KSTAT_DATA_UINT64 }, | |
60 | /* New I/O follows directly the last I/O (nonrot) */ | |
61 | { "non_rotating_linear", KSTAT_DATA_UINT64 }, | |
62 | /* New I/O requires random seek (nonrot) */ | |
63 | { "non_rotating_seek", KSTAT_DATA_UINT64 }, | |
64 | /* Preferred child vdev found */ | |
65 | { "preferred_found", KSTAT_DATA_UINT64 }, | |
66 | /* Preferred child vdev not found or equal load */ | |
67 | { "preferred_not_found", KSTAT_DATA_UINT64 }, | |
68 | ||
69 | }; | |
70 | ||
71 | #define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64) | |
72 | #define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val) | |
73 | #define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1) | |
74 | ||
75 | void | |
76 | vdev_mirror_stat_init(void) | |
77 | { | |
78 | mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats", | |
79 | "misc", KSTAT_TYPE_NAMED, | |
80 | sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
81 | if (mirror_ksp != NULL) { | |
82 | mirror_ksp->ks_data = &mirror_stats; | |
83 | kstat_install(mirror_ksp); | |
84 | } | |
85 | } | |
86 | ||
87 | void | |
88 | vdev_mirror_stat_fini(void) | |
89 | { | |
90 | if (mirror_ksp != NULL) { | |
91 | kstat_delete(mirror_ksp); | |
92 | mirror_ksp = NULL; | |
93 | } | |
94 | } | |
95 | ||
34dc7c2f BB |
96 | /* |
97 | * Virtual device vector for mirroring. | |
98 | */ | |
99 | ||
100 | typedef struct mirror_child { | |
101 | vdev_t *mc_vd; | |
102 | uint64_t mc_offset; | |
103 | int mc_error; | |
9f500936 | 104 | int mc_load; |
b128c09f BB |
105 | uint8_t mc_tried; |
106 | uint8_t mc_skipped; | |
107 | uint8_t mc_speculative; | |
34dc7c2f BB |
108 | } mirror_child_t; |
109 | ||
110 | typedef struct mirror_map { | |
9f500936 | 111 | int *mm_preferred; |
112 | int mm_preferred_cnt; | |
34dc7c2f | 113 | int mm_children; |
9f500936 | 114 | boolean_t mm_replacing; |
115 | boolean_t mm_root; | |
116 | mirror_child_t mm_child[]; | |
34dc7c2f BB |
117 | } mirror_map_t; |
118 | ||
9f500936 | 119 | static int vdev_mirror_shift = 21; |
120 | ||
556011db | 121 | /* |
9f500936 | 122 | * The load configuration settings below are tuned by default for |
123 | * the case where all devices are of the same rotational type. | |
556011db | 124 | * |
9f500936 | 125 | * If there is a mixture of rotating and non-rotating media, setting |
126 | * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results | |
127 | * as it will direct more reads to the non-rotating vdevs which are more likely | |
128 | * to have a higher performance. | |
556011db | 129 | */ |
9f500936 | 130 | |
131 | /* Rotating media load calculation configuration. */ | |
132 | static int zfs_vdev_mirror_rotating_inc = 0; | |
133 | static int zfs_vdev_mirror_rotating_seek_inc = 5; | |
134 | static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024; | |
135 | ||
136 | /* Non-rotating media load calculation configuration. */ | |
137 | static int zfs_vdev_mirror_non_rotating_inc = 0; | |
138 | static int zfs_vdev_mirror_non_rotating_seek_inc = 1; | |
139 | ||
140 | static inline size_t | |
141 | vdev_mirror_map_size(int children) | |
142 | { | |
143 | return (offsetof(mirror_map_t, mm_child[children]) + | |
144 | sizeof (int) * children); | |
145 | } | |
146 | ||
147 | static inline mirror_map_t * | |
148 | vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root) | |
149 | { | |
150 | mirror_map_t *mm; | |
151 | ||
152 | mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP); | |
153 | mm->mm_children = children; | |
154 | mm->mm_replacing = replacing; | |
155 | mm->mm_root = root; | |
156 | mm->mm_preferred = (int *)((uintptr_t)mm + | |
157 | offsetof(mirror_map_t, mm_child[children])); | |
158 | ||
159 | return (mm); | |
160 | } | |
34dc7c2f | 161 | |
b128c09f BB |
162 | static void |
163 | vdev_mirror_map_free(zio_t *zio) | |
164 | { | |
165 | mirror_map_t *mm = zio->io_vsd; | |
166 | ||
9f500936 | 167 | kmem_free(mm, vdev_mirror_map_size(mm->mm_children)); |
b128c09f BB |
168 | } |
169 | ||
428870ff BB |
170 | static const zio_vsd_ops_t vdev_mirror_vsd_ops = { |
171 | vdev_mirror_map_free, | |
172 | zio_vsd_default_cksum_report | |
173 | }; | |
174 | ||
556011db | 175 | static int |
9f500936 | 176 | vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset) |
556011db | 177 | { |
d6c6590c GN |
178 | uint64_t last_offset; |
179 | int64_t offset_diff; | |
9f500936 | 180 | int load; |
181 | ||
182 | /* All DVAs have equal weight at the root. */ | |
183 | if (mm->mm_root) | |
184 | return (INT_MAX); | |
185 | ||
186 | /* | |
187 | * We don't return INT_MAX if the device is resilvering i.e. | |
188 | * vdev_resilver_txg != 0 as when tested performance was slightly | |
189 | * worse overall when resilvering with compared to without. | |
190 | */ | |
191 | ||
d6c6590c GN |
192 | /* Fix zio_offset for leaf vdevs */ |
193 | if (vd->vdev_ops->vdev_op_leaf) | |
194 | zio_offset += VDEV_LABEL_START_SIZE; | |
195 | ||
9f500936 | 196 | /* Standard load based on pending queue length. */ |
197 | load = vdev_queue_length(vd); | |
d6c6590c | 198 | last_offset = vdev_queue_last_offset(vd); |
9f500936 | 199 | |
200 | if (vd->vdev_nonrot) { | |
201 | /* Non-rotating media. */ | |
551905dd GN |
202 | if (last_offset == zio_offset) { |
203 | MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear); | |
9f500936 | 204 | return (load + zfs_vdev_mirror_non_rotating_inc); |
551905dd | 205 | } |
9f500936 | 206 | |
207 | /* | |
208 | * Apply a seek penalty even for non-rotating devices as | |
209 | * sequential I/O's can be aggregated into fewer operations on | |
210 | * the device, thus avoiding unnecessary per-command overhead | |
211 | * and boosting performance. | |
212 | */ | |
551905dd | 213 | MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek); |
9f500936 | 214 | return (load + zfs_vdev_mirror_non_rotating_seek_inc); |
215 | } | |
216 | ||
217 | /* Rotating media I/O's which directly follow the last I/O. */ | |
551905dd GN |
218 | if (last_offset == zio_offset) { |
219 | MIRROR_BUMP(vdev_mirror_stat_rotating_linear); | |
9f500936 | 220 | return (load + zfs_vdev_mirror_rotating_inc); |
551905dd | 221 | } |
9f500936 | 222 | |
223 | /* | |
224 | * Apply half the seek increment to I/O's within seek offset | |
d6c6590c | 225 | * of the last I/O issued to this vdev as they should incur less |
9f500936 | 226 | * of a seek increment. |
227 | */ | |
d6c6590c | 228 | offset_diff = (int64_t)(last_offset - zio_offset); |
551905dd GN |
229 | if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) { |
230 | MIRROR_BUMP(vdev_mirror_stat_rotating_offset); | |
9f500936 | 231 | return (load + (zfs_vdev_mirror_rotating_seek_inc / 2)); |
551905dd | 232 | } |
9f500936 | 233 | |
234 | /* Apply the full seek increment to all other I/O's. */ | |
551905dd | 235 | MIRROR_BUMP(vdev_mirror_stat_rotating_seek); |
9f500936 | 236 | return (load + zfs_vdev_mirror_rotating_seek_inc); |
556011db BB |
237 | } |
238 | ||
a1687880 BB |
239 | /* |
240 | * Avoid inlining the function to keep vdev_mirror_io_start(), which | |
241 | * is this functions only caller, as small as possible on the stack. | |
242 | */ | |
243 | noinline static mirror_map_t * | |
9f500936 | 244 | vdev_mirror_map_init(zio_t *zio) |
34dc7c2f BB |
245 | { |
246 | mirror_map_t *mm = NULL; | |
247 | mirror_child_t *mc; | |
248 | vdev_t *vd = zio->io_vd; | |
9f500936 | 249 | int c; |
34dc7c2f BB |
250 | |
251 | if (vd == NULL) { | |
252 | dva_t *dva = zio->io_bp->blk_dva; | |
253 | spa_t *spa = zio->io_spa; | |
254 | ||
9f500936 | 255 | mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE, |
256 | B_TRUE); | |
34dc7c2f BB |
257 | for (c = 0; c < mm->mm_children; c++) { |
258 | mc = &mm->mm_child[c]; | |
259 | ||
260 | mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c])); | |
261 | mc->mc_offset = DVA_GET_OFFSET(&dva[c]); | |
262 | } | |
263 | } else { | |
9f500936 | 264 | mm = vdev_mirror_map_alloc(vd->vdev_children, |
265 | (vd->vdev_ops == &vdev_replacing_ops || | |
266 | vd->vdev_ops == &vdev_spare_ops), B_FALSE); | |
34dc7c2f BB |
267 | for (c = 0; c < mm->mm_children; c++) { |
268 | mc = &mm->mm_child[c]; | |
269 | mc->mc_vd = vd->vdev_child[c]; | |
270 | mc->mc_offset = zio->io_offset; | |
271 | } | |
272 | } | |
273 | ||
274 | zio->io_vsd = mm; | |
428870ff | 275 | zio->io_vsd_ops = &vdev_mirror_vsd_ops; |
34dc7c2f BB |
276 | return (mm); |
277 | } | |
278 | ||
34dc7c2f | 279 | static int |
1bd201e7 CS |
280 | vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize, |
281 | uint64_t *ashift) | |
34dc7c2f | 282 | { |
34dc7c2f | 283 | int numerrors = 0; |
45d1cae3 | 284 | int lasterror = 0; |
34dc7c2f BB |
285 | |
286 | if (vd->vdev_children == 0) { | |
287 | vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; | |
2e528b49 | 288 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
289 | } |
290 | ||
45d1cae3 | 291 | vdev_open_children(vd); |
34dc7c2f | 292 | |
1c27024e | 293 | for (int c = 0; c < vd->vdev_children; c++) { |
45d1cae3 BB |
294 | vdev_t *cvd = vd->vdev_child[c]; |
295 | ||
296 | if (cvd->vdev_open_error) { | |
297 | lasterror = cvd->vdev_open_error; | |
34dc7c2f BB |
298 | numerrors++; |
299 | continue; | |
300 | } | |
301 | ||
302 | *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1; | |
1bd201e7 | 303 | *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1; |
34dc7c2f BB |
304 | *ashift = MAX(*ashift, cvd->vdev_ashift); |
305 | } | |
306 | ||
307 | if (numerrors == vd->vdev_children) { | |
308 | vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; | |
309 | return (lasterror); | |
310 | } | |
311 | ||
312 | return (0); | |
313 | } | |
314 | ||
315 | static void | |
316 | vdev_mirror_close(vdev_t *vd) | |
317 | { | |
1c27024e | 318 | for (int c = 0; c < vd->vdev_children; c++) |
34dc7c2f BB |
319 | vdev_close(vd->vdev_child[c]); |
320 | } | |
321 | ||
322 | static void | |
323 | vdev_mirror_child_done(zio_t *zio) | |
324 | { | |
325 | mirror_child_t *mc = zio->io_private; | |
326 | ||
327 | mc->mc_error = zio->io_error; | |
328 | mc->mc_tried = 1; | |
329 | mc->mc_skipped = 0; | |
330 | } | |
331 | ||
332 | static void | |
333 | vdev_mirror_scrub_done(zio_t *zio) | |
334 | { | |
335 | mirror_child_t *mc = zio->io_private; | |
336 | ||
337 | if (zio->io_error == 0) { | |
d164b209 | 338 | zio_t *pio; |
3dfb57a3 | 339 | zio_link_t *zl = NULL; |
d164b209 BB |
340 | |
341 | mutex_enter(&zio->io_lock); | |
3dfb57a3 | 342 | while ((pio = zio_walk_parents(zio, &zl)) != NULL) { |
d164b209 BB |
343 | mutex_enter(&pio->io_lock); |
344 | ASSERT3U(zio->io_size, >=, pio->io_size); | |
a6255b7f | 345 | abd_copy(pio->io_abd, zio->io_abd, pio->io_size); |
d164b209 BB |
346 | mutex_exit(&pio->io_lock); |
347 | } | |
348 | mutex_exit(&zio->io_lock); | |
34dc7c2f BB |
349 | } |
350 | ||
a6255b7f | 351 | abd_free(zio->io_abd); |
34dc7c2f BB |
352 | |
353 | mc->mc_error = zio->io_error; | |
354 | mc->mc_tried = 1; | |
355 | mc->mc_skipped = 0; | |
356 | } | |
357 | ||
34dc7c2f | 358 | /* |
9f500936 | 359 | * Check the other, lower-index DVAs to see if they're on the same |
360 | * vdev as the child we picked. If they are, use them since they | |
361 | * are likely to have been allocated from the primary metaslab in | |
362 | * use at the time, and hence are more likely to have locality with | |
363 | * single-copy data. | |
364 | */ | |
365 | static int | |
366 | vdev_mirror_dva_select(zio_t *zio, int p) | |
367 | { | |
368 | dva_t *dva = zio->io_bp->blk_dva; | |
369 | mirror_map_t *mm = zio->io_vsd; | |
370 | int preferred; | |
371 | int c; | |
372 | ||
373 | preferred = mm->mm_preferred[p]; | |
374 | for (p--; p >= 0; p--) { | |
375 | c = mm->mm_preferred[p]; | |
376 | if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred])) | |
377 | preferred = c; | |
378 | } | |
379 | return (preferred); | |
380 | } | |
381 | ||
382 | static int | |
383 | vdev_mirror_preferred_child_randomize(zio_t *zio) | |
384 | { | |
385 | mirror_map_t *mm = zio->io_vsd; | |
386 | int p; | |
387 | ||
388 | if (mm->mm_root) { | |
389 | p = spa_get_random(mm->mm_preferred_cnt); | |
390 | return (vdev_mirror_dva_select(zio, p)); | |
391 | } | |
392 | ||
393 | /* | |
394 | * To ensure we don't always favour the first matching vdev, | |
395 | * which could lead to wear leveling issues on SSD's, we | |
396 | * use the I/O offset as a pseudo random seed into the vdevs | |
397 | * which have the lowest load. | |
398 | */ | |
399 | p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt; | |
400 | return (mm->mm_preferred[p]); | |
401 | } | |
402 | ||
403 | /* | |
404 | * Try to find a vdev whose DTL doesn't contain the block we want to read | |
405 | * prefering vdevs based on determined load. | |
406 | * | |
34dc7c2f BB |
407 | * Try to find a child whose DTL doesn't contain the block we want to read. |
408 | * If we can't, try the read on any vdev we haven't already tried. | |
409 | */ | |
410 | static int | |
411 | vdev_mirror_child_select(zio_t *zio) | |
412 | { | |
413 | mirror_map_t *mm = zio->io_vsd; | |
34dc7c2f | 414 | uint64_t txg = zio->io_txg; |
9f500936 | 415 | int c, lowest_load; |
34dc7c2f | 416 | |
428870ff | 417 | ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg); |
34dc7c2f | 418 | |
9f500936 | 419 | lowest_load = INT_MAX; |
420 | mm->mm_preferred_cnt = 0; | |
421 | for (c = 0; c < mm->mm_children; c++) { | |
422 | mirror_child_t *mc; | |
423 | ||
34dc7c2f BB |
424 | mc = &mm->mm_child[c]; |
425 | if (mc->mc_tried || mc->mc_skipped) | |
426 | continue; | |
9f500936 | 427 | |
33074f22 | 428 | if (mc->mc_vd == NULL || !vdev_readable(mc->mc_vd)) { |
2e528b49 | 429 | mc->mc_error = SET_ERROR(ENXIO); |
34dc7c2f BB |
430 | mc->mc_tried = 1; /* don't even try */ |
431 | mc->mc_skipped = 1; | |
432 | continue; | |
433 | } | |
9f500936 | 434 | |
435 | if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) { | |
436 | mc->mc_error = SET_ERROR(ESTALE); | |
437 | mc->mc_skipped = 1; | |
438 | mc->mc_speculative = 1; | |
439 | continue; | |
440 | } | |
441 | ||
442 | mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset); | |
443 | if (mc->mc_load > lowest_load) | |
444 | continue; | |
445 | ||
446 | if (mc->mc_load < lowest_load) { | |
447 | lowest_load = mc->mc_load; | |
448 | mm->mm_preferred_cnt = 0; | |
449 | } | |
450 | mm->mm_preferred[mm->mm_preferred_cnt] = c; | |
451 | mm->mm_preferred_cnt++; | |
452 | } | |
453 | ||
551905dd GN |
454 | if (mm->mm_preferred_cnt == 1) { |
455 | MIRROR_BUMP(vdev_mirror_stat_preferred_found); | |
9f500936 | 456 | return (mm->mm_preferred[0]); |
551905dd | 457 | } |
9f500936 | 458 | |
551905dd GN |
459 | if (mm->mm_preferred_cnt > 1) { |
460 | MIRROR_BUMP(vdev_mirror_stat_preferred_not_found); | |
d6c6590c | 461 | return (vdev_mirror_preferred_child_randomize(zio)); |
551905dd | 462 | } |
34dc7c2f BB |
463 | |
464 | /* | |
465 | * Every device is either missing or has this txg in its DTL. | |
466 | * Look for any child we haven't already tried before giving up. | |
467 | */ | |
9f500936 | 468 | for (c = 0; c < mm->mm_children; c++) { |
d6c6590c | 469 | if (!mm->mm_child[c].mc_tried) |
34dc7c2f | 470 | return (c); |
9f500936 | 471 | } |
34dc7c2f BB |
472 | |
473 | /* | |
474 | * Every child failed. There's no place left to look. | |
475 | */ | |
476 | return (-1); | |
477 | } | |
478 | ||
98b25418 | 479 | static void |
34dc7c2f BB |
480 | vdev_mirror_io_start(zio_t *zio) |
481 | { | |
482 | mirror_map_t *mm; | |
483 | mirror_child_t *mc; | |
484 | int c, children; | |
485 | ||
9f500936 | 486 | mm = vdev_mirror_map_init(zio); |
34dc7c2f BB |
487 | |
488 | if (zio->io_type == ZIO_TYPE_READ) { | |
489 | if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) { | |
490 | /* | |
491 | * For scrubbing reads we need to allocate a read | |
492 | * buffer for each child and issue reads to all | |
493 | * children. If any child succeeds, it will copy its | |
494 | * data into zio->io_data in vdev_mirror_scrub_done. | |
495 | */ | |
496 | for (c = 0; c < mm->mm_children; c++) { | |
497 | mc = &mm->mm_child[c]; | |
498 | zio_nowait(zio_vdev_child_io(zio, zio->io_bp, | |
499 | mc->mc_vd, mc->mc_offset, | |
a6255b7f DQ |
500 | abd_alloc_sametype(zio->io_abd, |
501 | zio->io_size), zio->io_size, | |
b128c09f | 502 | zio->io_type, zio->io_priority, 0, |
34dc7c2f BB |
503 | vdev_mirror_scrub_done, mc)); |
504 | } | |
98b25418 GW |
505 | zio_execute(zio); |
506 | return; | |
34dc7c2f BB |
507 | } |
508 | /* | |
509 | * For normal reads just pick one child. | |
510 | */ | |
511 | c = vdev_mirror_child_select(zio); | |
512 | children = (c >= 0); | |
513 | } else { | |
514 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
515 | ||
516 | /* | |
fb5f0bc8 | 517 | * Writes go to all children. |
34dc7c2f | 518 | */ |
fb5f0bc8 BB |
519 | c = 0; |
520 | children = mm->mm_children; | |
34dc7c2f BB |
521 | } |
522 | ||
523 | while (children--) { | |
524 | mc = &mm->mm_child[c]; | |
525 | zio_nowait(zio_vdev_child_io(zio, zio->io_bp, | |
a6255b7f | 526 | mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size, |
b128c09f BB |
527 | zio->io_type, zio->io_priority, 0, |
528 | vdev_mirror_child_done, mc)); | |
34dc7c2f BB |
529 | c++; |
530 | } | |
531 | ||
98b25418 | 532 | zio_execute(zio); |
34dc7c2f BB |
533 | } |
534 | ||
535 | static int | |
b128c09f BB |
536 | vdev_mirror_worst_error(mirror_map_t *mm) |
537 | { | |
1c27024e | 538 | int error[2] = { 0, 0 }; |
b128c09f | 539 | |
1c27024e | 540 | for (int c = 0; c < mm->mm_children; c++) { |
b128c09f BB |
541 | mirror_child_t *mc = &mm->mm_child[c]; |
542 | int s = mc->mc_speculative; | |
543 | error[s] = zio_worst_error(error[s], mc->mc_error); | |
544 | } | |
545 | ||
546 | return (error[0] ? error[0] : error[1]); | |
547 | } | |
548 | ||
549 | static void | |
34dc7c2f BB |
550 | vdev_mirror_io_done(zio_t *zio) |
551 | { | |
552 | mirror_map_t *mm = zio->io_vsd; | |
553 | mirror_child_t *mc; | |
554 | int c; | |
555 | int good_copies = 0; | |
556 | int unexpected_errors = 0; | |
557 | ||
34dc7c2f BB |
558 | for (c = 0; c < mm->mm_children; c++) { |
559 | mc = &mm->mm_child[c]; | |
560 | ||
34dc7c2f | 561 | if (mc->mc_error) { |
34dc7c2f BB |
562 | if (!mc->mc_skipped) |
563 | unexpected_errors++; | |
b128c09f BB |
564 | } else if (mc->mc_tried) { |
565 | good_copies++; | |
34dc7c2f BB |
566 | } |
567 | } | |
568 | ||
569 | if (zio->io_type == ZIO_TYPE_WRITE) { | |
570 | /* | |
571 | * XXX -- for now, treat partial writes as success. | |
b128c09f BB |
572 | * |
573 | * Now that we support write reallocation, it would be better | |
574 | * to treat partial failure as real failure unless there are | |
575 | * no non-degraded top-level vdevs left, and not update DTLs | |
576 | * if we intend to reallocate. | |
34dc7c2f BB |
577 | */ |
578 | /* XXPOLICY */ | |
b128c09f BB |
579 | if (good_copies != mm->mm_children) { |
580 | /* | |
581 | * Always require at least one good copy. | |
582 | * | |
583 | * For ditto blocks (io_vd == NULL), require | |
584 | * all copies to be good. | |
585 | * | |
586 | * XXX -- for replacing vdevs, there's no great answer. | |
587 | * If the old device is really dead, we may not even | |
588 | * be able to access it -- so we only want to | |
589 | * require good writes to the new device. But if | |
590 | * the new device turns out to be flaky, we want | |
591 | * to be able to detach it -- which requires all | |
592 | * writes to the old device to have succeeded. | |
593 | */ | |
594 | if (good_copies == 0 || zio->io_vd == NULL) | |
595 | zio->io_error = vdev_mirror_worst_error(mm); | |
596 | } | |
597 | return; | |
34dc7c2f BB |
598 | } |
599 | ||
600 | ASSERT(zio->io_type == ZIO_TYPE_READ); | |
601 | ||
602 | /* | |
603 | * If we don't have a good copy yet, keep trying other children. | |
604 | */ | |
605 | /* XXPOLICY */ | |
606 | if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) { | |
607 | ASSERT(c >= 0 && c < mm->mm_children); | |
608 | mc = &mm->mm_child[c]; | |
34dc7c2f BB |
609 | zio_vdev_io_redone(zio); |
610 | zio_nowait(zio_vdev_child_io(zio, zio->io_bp, | |
a6255b7f | 611 | mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size, |
b128c09f | 612 | ZIO_TYPE_READ, zio->io_priority, 0, |
34dc7c2f | 613 | vdev_mirror_child_done, mc)); |
b128c09f | 614 | return; |
34dc7c2f BB |
615 | } |
616 | ||
617 | /* XXPOLICY */ | |
b128c09f BB |
618 | if (good_copies == 0) { |
619 | zio->io_error = vdev_mirror_worst_error(mm); | |
34dc7c2f | 620 | ASSERT(zio->io_error != 0); |
b128c09f | 621 | } |
34dc7c2f | 622 | |
fb5f0bc8 | 623 | if (good_copies && spa_writeable(zio->io_spa) && |
34dc7c2f BB |
624 | (unexpected_errors || |
625 | (zio->io_flags & ZIO_FLAG_RESILVER) || | |
626 | ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) { | |
34dc7c2f BB |
627 | /* |
628 | * Use the good data we have in hand to repair damaged children. | |
34dc7c2f | 629 | */ |
34dc7c2f BB |
630 | for (c = 0; c < mm->mm_children; c++) { |
631 | /* | |
632 | * Don't rewrite known good children. | |
633 | * Not only is it unnecessary, it could | |
634 | * actually be harmful: if the system lost | |
635 | * power while rewriting the only good copy, | |
636 | * there would be no good copies left! | |
637 | */ | |
638 | mc = &mm->mm_child[c]; | |
639 | ||
640 | if (mc->mc_error == 0) { | |
641 | if (mc->mc_tried) | |
642 | continue; | |
643 | if (!(zio->io_flags & ZIO_FLAG_SCRUB) && | |
fb5f0bc8 | 644 | !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL, |
34dc7c2f BB |
645 | zio->io_txg, 1)) |
646 | continue; | |
2e528b49 | 647 | mc->mc_error = SET_ERROR(ESTALE); |
34dc7c2f BB |
648 | } |
649 | ||
b128c09f BB |
650 | zio_nowait(zio_vdev_child_io(zio, zio->io_bp, |
651 | mc->mc_vd, mc->mc_offset, | |
a6255b7f | 652 | zio->io_abd, zio->io_size, |
e8b96c60 | 653 | ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE, |
fb5f0bc8 BB |
654 | ZIO_FLAG_IO_REPAIR | (unexpected_errors ? |
655 | ZIO_FLAG_SELF_HEAL : 0), NULL, NULL)); | |
34dc7c2f | 656 | } |
34dc7c2f | 657 | } |
34dc7c2f BB |
658 | } |
659 | ||
660 | static void | |
661 | vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded) | |
662 | { | |
663 | if (faulted == vd->vdev_children) | |
664 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
665 | VDEV_AUX_NO_REPLICAS); | |
666 | else if (degraded + faulted != 0) | |
667 | vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); | |
668 | else | |
669 | vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); | |
670 | } | |
671 | ||
672 | vdev_ops_t vdev_mirror_ops = { | |
673 | vdev_mirror_open, | |
674 | vdev_mirror_close, | |
34dc7c2f BB |
675 | vdev_default_asize, |
676 | vdev_mirror_io_start, | |
677 | vdev_mirror_io_done, | |
678 | vdev_mirror_state_change, | |
428870ff BB |
679 | NULL, |
680 | NULL, | |
3d6da72d | 681 | NULL, |
34dc7c2f BB |
682 | VDEV_TYPE_MIRROR, /* name of this vdev type */ |
683 | B_FALSE /* not a leaf vdev */ | |
684 | }; | |
685 | ||
686 | vdev_ops_t vdev_replacing_ops = { | |
687 | vdev_mirror_open, | |
688 | vdev_mirror_close, | |
34dc7c2f BB |
689 | vdev_default_asize, |
690 | vdev_mirror_io_start, | |
691 | vdev_mirror_io_done, | |
692 | vdev_mirror_state_change, | |
428870ff BB |
693 | NULL, |
694 | NULL, | |
3d6da72d | 695 | NULL, |
34dc7c2f BB |
696 | VDEV_TYPE_REPLACING, /* name of this vdev type */ |
697 | B_FALSE /* not a leaf vdev */ | |
698 | }; | |
699 | ||
700 | vdev_ops_t vdev_spare_ops = { | |
701 | vdev_mirror_open, | |
702 | vdev_mirror_close, | |
34dc7c2f BB |
703 | vdev_default_asize, |
704 | vdev_mirror_io_start, | |
705 | vdev_mirror_io_done, | |
706 | vdev_mirror_state_change, | |
428870ff BB |
707 | NULL, |
708 | NULL, | |
3d6da72d | 709 | NULL, |
34dc7c2f BB |
710 | VDEV_TYPE_SPARE, /* name of this vdev type */ |
711 | B_FALSE /* not a leaf vdev */ | |
712 | }; | |
556011db BB |
713 | |
714 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
4ea3f864 | 715 | /* BEGIN CSTYLED */ |
9f500936 | 716 | module_param(zfs_vdev_mirror_rotating_inc, int, 0644); |
717 | MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc, | |
718 | "Rotating media load increment for non-seeking I/O's"); | |
719 | ||
720 | module_param(zfs_vdev_mirror_rotating_seek_inc, int, 0644); | |
721 | MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_inc, | |
722 | "Rotating media load increment for seeking I/O's"); | |
723 | ||
724 | module_param(zfs_vdev_mirror_rotating_seek_offset, int, 0644); | |
4ea3f864 | 725 | |
9f500936 | 726 | MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset, |
727 | "Offset in bytes from the last I/O which " | |
728 | "triggers a reduced rotating media seek increment"); | |
729 | ||
730 | module_param(zfs_vdev_mirror_non_rotating_inc, int, 0644); | |
731 | MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_inc, | |
732 | "Non-rotating media load increment for non-seeking I/O's"); | |
733 | ||
734 | module_param(zfs_vdev_mirror_non_rotating_seek_inc, int, 0644); | |
735 | MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc, | |
736 | "Non-rotating media load increment for seeking I/O's"); | |
4ea3f864 | 737 | /* END CSTYLED */ |
556011db | 738 | #endif |