]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev.c
13739017382ab0d02257b35d461323209063e792
[mirror_zfs.git] / module / zfs / vdev.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/fm/fs/zfs.h>
30 #include <sys/spa.h>
31 #include <sys/spa_impl.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/uberblock_impl.h>
36 #include <sys/metaslab.h>
37 #include <sys/metaslab_impl.h>
38 #include <sys/space_map.h>
39 #include <sys/space_reftree.h>
40 #include <sys/zio.h>
41 #include <sys/zap.h>
42 #include <sys/fs/zfs.h>
43 #include <sys/arc.h>
44 #include <sys/zil.h>
45 #include <sys/dsl_scan.h>
46 #include <sys/zvol.h>
47
48 /*
49 * When a vdev is added, it will be divided into approximately (but no
50 * more than) this number of metaslabs.
51 */
52 int metaslabs_per_vdev = 200;
53
54 /*
55 * Virtual device management.
56 */
57
58 static vdev_ops_t *vdev_ops_table[] = {
59 &vdev_root_ops,
60 &vdev_raidz_ops,
61 &vdev_mirror_ops,
62 &vdev_replacing_ops,
63 &vdev_spare_ops,
64 &vdev_disk_ops,
65 &vdev_file_ops,
66 &vdev_missing_ops,
67 &vdev_hole_ops,
68 NULL
69 };
70
71 /*
72 * Given a vdev type, return the appropriate ops vector.
73 */
74 static vdev_ops_t *
75 vdev_getops(const char *type)
76 {
77 vdev_ops_t *ops, **opspp;
78
79 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
80 if (strcmp(ops->vdev_op_type, type) == 0)
81 break;
82
83 return (ops);
84 }
85
86 /*
87 * Default asize function: return the MAX of psize with the asize of
88 * all children. This is what's used by anything other than RAID-Z.
89 */
90 uint64_t
91 vdev_default_asize(vdev_t *vd, uint64_t psize)
92 {
93 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
94 uint64_t csize;
95 int c;
96
97 for (c = 0; c < vd->vdev_children; c++) {
98 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
99 asize = MAX(asize, csize);
100 }
101
102 return (asize);
103 }
104
105 /*
106 * Get the minimum allocatable size. We define the allocatable size as
107 * the vdev's asize rounded to the nearest metaslab. This allows us to
108 * replace or attach devices which don't have the same physical size but
109 * can still satisfy the same number of allocations.
110 */
111 uint64_t
112 vdev_get_min_asize(vdev_t *vd)
113 {
114 vdev_t *pvd = vd->vdev_parent;
115
116 /*
117 * If our parent is NULL (inactive spare or cache) or is the root,
118 * just return our own asize.
119 */
120 if (pvd == NULL)
121 return (vd->vdev_asize);
122
123 /*
124 * The top-level vdev just returns the allocatable size rounded
125 * to the nearest metaslab.
126 */
127 if (vd == vd->vdev_top)
128 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
129
130 /*
131 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
132 * so each child must provide at least 1/Nth of its asize.
133 */
134 if (pvd->vdev_ops == &vdev_raidz_ops)
135 return (pvd->vdev_min_asize / pvd->vdev_children);
136
137 return (pvd->vdev_min_asize);
138 }
139
140 void
141 vdev_set_min_asize(vdev_t *vd)
142 {
143 int c;
144 vd->vdev_min_asize = vdev_get_min_asize(vd);
145
146 for (c = 0; c < vd->vdev_children; c++)
147 vdev_set_min_asize(vd->vdev_child[c]);
148 }
149
150 vdev_t *
151 vdev_lookup_top(spa_t *spa, uint64_t vdev)
152 {
153 vdev_t *rvd = spa->spa_root_vdev;
154
155 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
156
157 if (vdev < rvd->vdev_children) {
158 ASSERT(rvd->vdev_child[vdev] != NULL);
159 return (rvd->vdev_child[vdev]);
160 }
161
162 return (NULL);
163 }
164
165 vdev_t *
166 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
167 {
168 vdev_t *mvd;
169 int c;
170
171 if (vd->vdev_guid == guid)
172 return (vd);
173
174 for (c = 0; c < vd->vdev_children; c++)
175 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
176 NULL)
177 return (mvd);
178
179 return (NULL);
180 }
181
182 static int
183 vdev_count_leaves_impl(vdev_t *vd)
184 {
185 int n = 0;
186 int c;
187
188 if (vd->vdev_ops->vdev_op_leaf)
189 return (1);
190
191 for (c = 0; c < vd->vdev_children; c++)
192 n += vdev_count_leaves_impl(vd->vdev_child[c]);
193
194 return (n);
195 }
196
197 int
198 vdev_count_leaves(spa_t *spa)
199 {
200 return (vdev_count_leaves_impl(spa->spa_root_vdev));
201 }
202
203 void
204 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
205 {
206 size_t oldsize, newsize;
207 uint64_t id = cvd->vdev_id;
208 vdev_t **newchild;
209
210 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
211 ASSERT(cvd->vdev_parent == NULL);
212
213 cvd->vdev_parent = pvd;
214
215 if (pvd == NULL)
216 return;
217
218 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
219
220 oldsize = pvd->vdev_children * sizeof (vdev_t *);
221 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
222 newsize = pvd->vdev_children * sizeof (vdev_t *);
223
224 newchild = kmem_alloc(newsize, KM_SLEEP);
225 if (pvd->vdev_child != NULL) {
226 bcopy(pvd->vdev_child, newchild, oldsize);
227 kmem_free(pvd->vdev_child, oldsize);
228 }
229
230 pvd->vdev_child = newchild;
231 pvd->vdev_child[id] = cvd;
232
233 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
234 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
235
236 /*
237 * Walk up all ancestors to update guid sum.
238 */
239 for (; pvd != NULL; pvd = pvd->vdev_parent)
240 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
241 }
242
243 void
244 vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
245 {
246 int c;
247 uint_t id = cvd->vdev_id;
248
249 ASSERT(cvd->vdev_parent == pvd);
250
251 if (pvd == NULL)
252 return;
253
254 ASSERT(id < pvd->vdev_children);
255 ASSERT(pvd->vdev_child[id] == cvd);
256
257 pvd->vdev_child[id] = NULL;
258 cvd->vdev_parent = NULL;
259
260 for (c = 0; c < pvd->vdev_children; c++)
261 if (pvd->vdev_child[c])
262 break;
263
264 if (c == pvd->vdev_children) {
265 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
266 pvd->vdev_child = NULL;
267 pvd->vdev_children = 0;
268 }
269
270 /*
271 * Walk up all ancestors to update guid sum.
272 */
273 for (; pvd != NULL; pvd = pvd->vdev_parent)
274 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
275 }
276
277 /*
278 * Remove any holes in the child array.
279 */
280 void
281 vdev_compact_children(vdev_t *pvd)
282 {
283 vdev_t **newchild, *cvd;
284 int oldc = pvd->vdev_children;
285 int newc;
286 int c;
287
288 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
289
290 for (c = newc = 0; c < oldc; c++)
291 if (pvd->vdev_child[c])
292 newc++;
293
294 newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
295
296 for (c = newc = 0; c < oldc; c++) {
297 if ((cvd = pvd->vdev_child[c]) != NULL) {
298 newchild[newc] = cvd;
299 cvd->vdev_id = newc++;
300 }
301 }
302
303 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
304 pvd->vdev_child = newchild;
305 pvd->vdev_children = newc;
306 }
307
308 /*
309 * Allocate and minimally initialize a vdev_t.
310 */
311 vdev_t *
312 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
313 {
314 vdev_t *vd;
315 int t;
316
317 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
318
319 if (spa->spa_root_vdev == NULL) {
320 ASSERT(ops == &vdev_root_ops);
321 spa->spa_root_vdev = vd;
322 spa->spa_load_guid = spa_generate_guid(NULL);
323 }
324
325 if (guid == 0 && ops != &vdev_hole_ops) {
326 if (spa->spa_root_vdev == vd) {
327 /*
328 * The root vdev's guid will also be the pool guid,
329 * which must be unique among all pools.
330 */
331 guid = spa_generate_guid(NULL);
332 } else {
333 /*
334 * Any other vdev's guid must be unique within the pool.
335 */
336 guid = spa_generate_guid(spa);
337 }
338 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
339 }
340
341 vd->vdev_spa = spa;
342 vd->vdev_id = id;
343 vd->vdev_guid = guid;
344 vd->vdev_guid_sum = guid;
345 vd->vdev_ops = ops;
346 vd->vdev_state = VDEV_STATE_CLOSED;
347 vd->vdev_ishole = (ops == &vdev_hole_ops);
348
349 list_link_init(&vd->vdev_config_dirty_node);
350 list_link_init(&vd->vdev_state_dirty_node);
351 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
352 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
353 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
354 for (t = 0; t < DTL_TYPES; t++) {
355 vd->vdev_dtl[t] = range_tree_create(NULL, NULL,
356 &vd->vdev_dtl_lock);
357 }
358 txg_list_create(&vd->vdev_ms_list,
359 offsetof(struct metaslab, ms_txg_node));
360 txg_list_create(&vd->vdev_dtl_list,
361 offsetof(struct vdev, vdev_dtl_node));
362 vd->vdev_stat.vs_timestamp = gethrtime();
363 vdev_queue_init(vd);
364 vdev_cache_init(vd);
365
366 return (vd);
367 }
368
369 /*
370 * Allocate a new vdev. The 'alloctype' is used to control whether we are
371 * creating a new vdev or loading an existing one - the behavior is slightly
372 * different for each case.
373 */
374 int
375 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
376 int alloctype)
377 {
378 vdev_ops_t *ops;
379 char *type;
380 uint64_t guid = 0, islog, nparity;
381 vdev_t *vd;
382
383 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
384
385 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
386 return (SET_ERROR(EINVAL));
387
388 if ((ops = vdev_getops(type)) == NULL)
389 return (SET_ERROR(EINVAL));
390
391 /*
392 * If this is a load, get the vdev guid from the nvlist.
393 * Otherwise, vdev_alloc_common() will generate one for us.
394 */
395 if (alloctype == VDEV_ALLOC_LOAD) {
396 uint64_t label_id;
397
398 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
399 label_id != id)
400 return (SET_ERROR(EINVAL));
401
402 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
403 return (SET_ERROR(EINVAL));
404 } else if (alloctype == VDEV_ALLOC_SPARE) {
405 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
406 return (SET_ERROR(EINVAL));
407 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
408 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
409 return (SET_ERROR(EINVAL));
410 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
411 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
412 return (SET_ERROR(EINVAL));
413 }
414
415 /*
416 * The first allocated vdev must be of type 'root'.
417 */
418 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
419 return (SET_ERROR(EINVAL));
420
421 /*
422 * Determine whether we're a log vdev.
423 */
424 islog = 0;
425 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
426 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
427 return (SET_ERROR(ENOTSUP));
428
429 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
430 return (SET_ERROR(ENOTSUP));
431
432 /*
433 * Set the nparity property for RAID-Z vdevs.
434 */
435 nparity = -1ULL;
436 if (ops == &vdev_raidz_ops) {
437 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
438 &nparity) == 0) {
439 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
440 return (SET_ERROR(EINVAL));
441 /*
442 * Previous versions could only support 1 or 2 parity
443 * device.
444 */
445 if (nparity > 1 &&
446 spa_version(spa) < SPA_VERSION_RAIDZ2)
447 return (SET_ERROR(ENOTSUP));
448 if (nparity > 2 &&
449 spa_version(spa) < SPA_VERSION_RAIDZ3)
450 return (SET_ERROR(ENOTSUP));
451 } else {
452 /*
453 * We require the parity to be specified for SPAs that
454 * support multiple parity levels.
455 */
456 if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
457 return (SET_ERROR(EINVAL));
458 /*
459 * Otherwise, we default to 1 parity device for RAID-Z.
460 */
461 nparity = 1;
462 }
463 } else {
464 nparity = 0;
465 }
466 ASSERT(nparity != -1ULL);
467
468 vd = vdev_alloc_common(spa, id, guid, ops);
469
470 vd->vdev_islog = islog;
471 vd->vdev_nparity = nparity;
472
473 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
474 vd->vdev_path = spa_strdup(vd->vdev_path);
475 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
476 vd->vdev_devid = spa_strdup(vd->vdev_devid);
477 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
478 &vd->vdev_physpath) == 0)
479 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
480 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
481 vd->vdev_fru = spa_strdup(vd->vdev_fru);
482
483 /*
484 * Set the whole_disk property. If it's not specified, leave the value
485 * as -1.
486 */
487 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
488 &vd->vdev_wholedisk) != 0)
489 vd->vdev_wholedisk = -1ULL;
490
491 /*
492 * Look for the 'not present' flag. This will only be set if the device
493 * was not present at the time of import.
494 */
495 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
496 &vd->vdev_not_present);
497
498 /*
499 * Get the alignment requirement.
500 */
501 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
502
503 /*
504 * Retrieve the vdev creation time.
505 */
506 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
507 &vd->vdev_crtxg);
508
509 /*
510 * If we're a top-level vdev, try to load the allocation parameters.
511 */
512 if (parent && !parent->vdev_parent &&
513 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
514 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
515 &vd->vdev_ms_array);
516 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
517 &vd->vdev_ms_shift);
518 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
519 &vd->vdev_asize);
520 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
521 &vd->vdev_removing);
522 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
523 &vd->vdev_top_zap);
524 } else {
525 ASSERT0(vd->vdev_top_zap);
526 }
527
528 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
529 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
530 alloctype == VDEV_ALLOC_ADD ||
531 alloctype == VDEV_ALLOC_SPLIT ||
532 alloctype == VDEV_ALLOC_ROOTPOOL);
533 vd->vdev_mg = metaslab_group_create(islog ?
534 spa_log_class(spa) : spa_normal_class(spa), vd);
535 }
536
537 if (vd->vdev_ops->vdev_op_leaf &&
538 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
539 (void) nvlist_lookup_uint64(nv,
540 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
541 } else {
542 ASSERT0(vd->vdev_leaf_zap);
543 }
544
545 /*
546 * If we're a leaf vdev, try to load the DTL object and other state.
547 */
548
549 if (vd->vdev_ops->vdev_op_leaf &&
550 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
551 alloctype == VDEV_ALLOC_ROOTPOOL)) {
552 if (alloctype == VDEV_ALLOC_LOAD) {
553 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
554 &vd->vdev_dtl_object);
555 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
556 &vd->vdev_unspare);
557 }
558
559 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
560 uint64_t spare = 0;
561
562 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
563 &spare) == 0 && spare)
564 spa_spare_add(vd);
565 }
566
567 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
568 &vd->vdev_offline);
569
570 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
571 &vd->vdev_resilver_txg);
572
573 /*
574 * When importing a pool, we want to ignore the persistent fault
575 * state, as the diagnosis made on another system may not be
576 * valid in the current context. Local vdevs will
577 * remain in the faulted state.
578 */
579 if (spa_load_state(spa) == SPA_LOAD_OPEN) {
580 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
581 &vd->vdev_faulted);
582 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
583 &vd->vdev_degraded);
584 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
585 &vd->vdev_removed);
586
587 if (vd->vdev_faulted || vd->vdev_degraded) {
588 char *aux;
589
590 vd->vdev_label_aux =
591 VDEV_AUX_ERR_EXCEEDED;
592 if (nvlist_lookup_string(nv,
593 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
594 strcmp(aux, "external") == 0)
595 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
596 }
597 }
598 }
599
600 /*
601 * Add ourselves to the parent's list of children.
602 */
603 vdev_add_child(parent, vd);
604
605 *vdp = vd;
606
607 return (0);
608 }
609
610 void
611 vdev_free(vdev_t *vd)
612 {
613 int c, t;
614 spa_t *spa = vd->vdev_spa;
615
616 /*
617 * vdev_free() implies closing the vdev first. This is simpler than
618 * trying to ensure complicated semantics for all callers.
619 */
620 vdev_close(vd);
621
622 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
623 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
624
625 /*
626 * Free all children.
627 */
628 for (c = 0; c < vd->vdev_children; c++)
629 vdev_free(vd->vdev_child[c]);
630
631 ASSERT(vd->vdev_child == NULL);
632 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
633
634 /*
635 * Discard allocation state.
636 */
637 if (vd->vdev_mg != NULL) {
638 vdev_metaslab_fini(vd);
639 metaslab_group_destroy(vd->vdev_mg);
640 }
641
642 ASSERT0(vd->vdev_stat.vs_space);
643 ASSERT0(vd->vdev_stat.vs_dspace);
644 ASSERT0(vd->vdev_stat.vs_alloc);
645
646 /*
647 * Remove this vdev from its parent's child list.
648 */
649 vdev_remove_child(vd->vdev_parent, vd);
650
651 ASSERT(vd->vdev_parent == NULL);
652
653 /*
654 * Clean up vdev structure.
655 */
656 vdev_queue_fini(vd);
657 vdev_cache_fini(vd);
658
659 if (vd->vdev_path)
660 spa_strfree(vd->vdev_path);
661 if (vd->vdev_devid)
662 spa_strfree(vd->vdev_devid);
663 if (vd->vdev_physpath)
664 spa_strfree(vd->vdev_physpath);
665 if (vd->vdev_fru)
666 spa_strfree(vd->vdev_fru);
667
668 if (vd->vdev_isspare)
669 spa_spare_remove(vd);
670 if (vd->vdev_isl2cache)
671 spa_l2cache_remove(vd);
672
673 txg_list_destroy(&vd->vdev_ms_list);
674 txg_list_destroy(&vd->vdev_dtl_list);
675
676 mutex_enter(&vd->vdev_dtl_lock);
677 space_map_close(vd->vdev_dtl_sm);
678 for (t = 0; t < DTL_TYPES; t++) {
679 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
680 range_tree_destroy(vd->vdev_dtl[t]);
681 }
682 mutex_exit(&vd->vdev_dtl_lock);
683
684 mutex_destroy(&vd->vdev_dtl_lock);
685 mutex_destroy(&vd->vdev_stat_lock);
686 mutex_destroy(&vd->vdev_probe_lock);
687
688 if (vd == spa->spa_root_vdev)
689 spa->spa_root_vdev = NULL;
690
691 kmem_free(vd, sizeof (vdev_t));
692 }
693
694 /*
695 * Transfer top-level vdev state from svd to tvd.
696 */
697 static void
698 vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
699 {
700 spa_t *spa = svd->vdev_spa;
701 metaslab_t *msp;
702 vdev_t *vd;
703 int t;
704
705 ASSERT(tvd == tvd->vdev_top);
706
707 tvd->vdev_ms_array = svd->vdev_ms_array;
708 tvd->vdev_ms_shift = svd->vdev_ms_shift;
709 tvd->vdev_ms_count = svd->vdev_ms_count;
710 tvd->vdev_top_zap = svd->vdev_top_zap;
711
712 svd->vdev_ms_array = 0;
713 svd->vdev_ms_shift = 0;
714 svd->vdev_ms_count = 0;
715 svd->vdev_top_zap = 0;
716
717 if (tvd->vdev_mg)
718 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
719 tvd->vdev_mg = svd->vdev_mg;
720 tvd->vdev_ms = svd->vdev_ms;
721
722 svd->vdev_mg = NULL;
723 svd->vdev_ms = NULL;
724
725 if (tvd->vdev_mg != NULL)
726 tvd->vdev_mg->mg_vd = tvd;
727
728 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
729 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
730 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
731
732 svd->vdev_stat.vs_alloc = 0;
733 svd->vdev_stat.vs_space = 0;
734 svd->vdev_stat.vs_dspace = 0;
735
736 for (t = 0; t < TXG_SIZE; t++) {
737 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
738 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
739 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
740 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
741 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
742 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
743 }
744
745 if (list_link_active(&svd->vdev_config_dirty_node)) {
746 vdev_config_clean(svd);
747 vdev_config_dirty(tvd);
748 }
749
750 if (list_link_active(&svd->vdev_state_dirty_node)) {
751 vdev_state_clean(svd);
752 vdev_state_dirty(tvd);
753 }
754
755 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
756 svd->vdev_deflate_ratio = 0;
757
758 tvd->vdev_islog = svd->vdev_islog;
759 svd->vdev_islog = 0;
760 }
761
762 static void
763 vdev_top_update(vdev_t *tvd, vdev_t *vd)
764 {
765 int c;
766
767 if (vd == NULL)
768 return;
769
770 vd->vdev_top = tvd;
771
772 for (c = 0; c < vd->vdev_children; c++)
773 vdev_top_update(tvd, vd->vdev_child[c]);
774 }
775
776 /*
777 * Add a mirror/replacing vdev above an existing vdev.
778 */
779 vdev_t *
780 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
781 {
782 spa_t *spa = cvd->vdev_spa;
783 vdev_t *pvd = cvd->vdev_parent;
784 vdev_t *mvd;
785
786 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
787
788 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
789
790 mvd->vdev_asize = cvd->vdev_asize;
791 mvd->vdev_min_asize = cvd->vdev_min_asize;
792 mvd->vdev_max_asize = cvd->vdev_max_asize;
793 mvd->vdev_ashift = cvd->vdev_ashift;
794 mvd->vdev_state = cvd->vdev_state;
795 mvd->vdev_crtxg = cvd->vdev_crtxg;
796
797 vdev_remove_child(pvd, cvd);
798 vdev_add_child(pvd, mvd);
799 cvd->vdev_id = mvd->vdev_children;
800 vdev_add_child(mvd, cvd);
801 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
802
803 if (mvd == mvd->vdev_top)
804 vdev_top_transfer(cvd, mvd);
805
806 return (mvd);
807 }
808
809 /*
810 * Remove a 1-way mirror/replacing vdev from the tree.
811 */
812 void
813 vdev_remove_parent(vdev_t *cvd)
814 {
815 vdev_t *mvd = cvd->vdev_parent;
816 vdev_t *pvd = mvd->vdev_parent;
817
818 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
819
820 ASSERT(mvd->vdev_children == 1);
821 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
822 mvd->vdev_ops == &vdev_replacing_ops ||
823 mvd->vdev_ops == &vdev_spare_ops);
824 cvd->vdev_ashift = mvd->vdev_ashift;
825
826 vdev_remove_child(mvd, cvd);
827 vdev_remove_child(pvd, mvd);
828
829 /*
830 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
831 * Otherwise, we could have detached an offline device, and when we
832 * go to import the pool we'll think we have two top-level vdevs,
833 * instead of a different version of the same top-level vdev.
834 */
835 if (mvd->vdev_top == mvd) {
836 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
837 cvd->vdev_orig_guid = cvd->vdev_guid;
838 cvd->vdev_guid += guid_delta;
839 cvd->vdev_guid_sum += guid_delta;
840
841 /*
842 * If pool not set for autoexpand, we need to also preserve
843 * mvd's asize to prevent automatic expansion of cvd.
844 * Otherwise if we are adjusting the mirror by attaching and
845 * detaching children of non-uniform sizes, the mirror could
846 * autoexpand, unexpectedly requiring larger devices to
847 * re-establish the mirror.
848 */
849 if (!cvd->vdev_spa->spa_autoexpand)
850 cvd->vdev_asize = mvd->vdev_asize;
851 }
852 cvd->vdev_id = mvd->vdev_id;
853 vdev_add_child(pvd, cvd);
854 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
855
856 if (cvd == cvd->vdev_top)
857 vdev_top_transfer(mvd, cvd);
858
859 ASSERT(mvd->vdev_children == 0);
860 vdev_free(mvd);
861 }
862
863 int
864 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
865 {
866 spa_t *spa = vd->vdev_spa;
867 objset_t *mos = spa->spa_meta_objset;
868 uint64_t m;
869 uint64_t oldc = vd->vdev_ms_count;
870 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
871 metaslab_t **mspp;
872 int error;
873
874 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
875
876 /*
877 * This vdev is not being allocated from yet or is a hole.
878 */
879 if (vd->vdev_ms_shift == 0)
880 return (0);
881
882 ASSERT(!vd->vdev_ishole);
883
884 /*
885 * Compute the raidz-deflation ratio. Note, we hard-code
886 * in 128k (1 << 17) because it is the "typical" blocksize.
887 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
888 * otherwise it would inconsistently account for existing bp's.
889 */
890 vd->vdev_deflate_ratio = (1 << 17) /
891 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
892
893 ASSERT(oldc <= newc);
894
895 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
896
897 if (oldc != 0) {
898 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
899 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
900 }
901
902 vd->vdev_ms = mspp;
903 vd->vdev_ms_count = newc;
904
905 for (m = oldc; m < newc; m++) {
906 uint64_t object = 0;
907
908 if (txg == 0) {
909 error = dmu_read(mos, vd->vdev_ms_array,
910 m * sizeof (uint64_t), sizeof (uint64_t), &object,
911 DMU_READ_PREFETCH);
912 if (error)
913 return (error);
914 }
915
916 error = metaslab_init(vd->vdev_mg, m, object, txg,
917 &(vd->vdev_ms[m]));
918 if (error)
919 return (error);
920 }
921
922 if (txg == 0)
923 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
924
925 /*
926 * If the vdev is being removed we don't activate
927 * the metaslabs since we want to ensure that no new
928 * allocations are performed on this device.
929 */
930 if (oldc == 0 && !vd->vdev_removing)
931 metaslab_group_activate(vd->vdev_mg);
932
933 if (txg == 0)
934 spa_config_exit(spa, SCL_ALLOC, FTAG);
935
936 return (0);
937 }
938
939 void
940 vdev_metaslab_fini(vdev_t *vd)
941 {
942 uint64_t m;
943 uint64_t count = vd->vdev_ms_count;
944
945 if (vd->vdev_ms != NULL) {
946 metaslab_group_passivate(vd->vdev_mg);
947 for (m = 0; m < count; m++) {
948 metaslab_t *msp = vd->vdev_ms[m];
949
950 if (msp != NULL)
951 metaslab_fini(msp);
952 }
953 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
954 vd->vdev_ms = NULL;
955 }
956
957 ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
958 }
959
960 typedef struct vdev_probe_stats {
961 boolean_t vps_readable;
962 boolean_t vps_writeable;
963 int vps_flags;
964 } vdev_probe_stats_t;
965
966 static void
967 vdev_probe_done(zio_t *zio)
968 {
969 spa_t *spa = zio->io_spa;
970 vdev_t *vd = zio->io_vd;
971 vdev_probe_stats_t *vps = zio->io_private;
972
973 ASSERT(vd->vdev_probe_zio != NULL);
974
975 if (zio->io_type == ZIO_TYPE_READ) {
976 if (zio->io_error == 0)
977 vps->vps_readable = 1;
978 if (zio->io_error == 0 && spa_writeable(spa)) {
979 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
980 zio->io_offset, zio->io_size, zio->io_data,
981 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
982 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
983 } else {
984 zio_buf_free(zio->io_data, zio->io_size);
985 }
986 } else if (zio->io_type == ZIO_TYPE_WRITE) {
987 if (zio->io_error == 0)
988 vps->vps_writeable = 1;
989 zio_buf_free(zio->io_data, zio->io_size);
990 } else if (zio->io_type == ZIO_TYPE_NULL) {
991 zio_t *pio;
992
993 vd->vdev_cant_read |= !vps->vps_readable;
994 vd->vdev_cant_write |= !vps->vps_writeable;
995
996 if (vdev_readable(vd) &&
997 (vdev_writeable(vd) || !spa_writeable(spa))) {
998 zio->io_error = 0;
999 } else {
1000 ASSERT(zio->io_error != 0);
1001 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
1002 spa, vd, NULL, 0, 0);
1003 zio->io_error = SET_ERROR(ENXIO);
1004 }
1005
1006 mutex_enter(&vd->vdev_probe_lock);
1007 ASSERT(vd->vdev_probe_zio == zio);
1008 vd->vdev_probe_zio = NULL;
1009 mutex_exit(&vd->vdev_probe_lock);
1010
1011 while ((pio = zio_walk_parents(zio)) != NULL)
1012 if (!vdev_accessible(vd, pio))
1013 pio->io_error = SET_ERROR(ENXIO);
1014
1015 kmem_free(vps, sizeof (*vps));
1016 }
1017 }
1018
1019 /*
1020 * Determine whether this device is accessible.
1021 *
1022 * Read and write to several known locations: the pad regions of each
1023 * vdev label but the first, which we leave alone in case it contains
1024 * a VTOC.
1025 */
1026 zio_t *
1027 vdev_probe(vdev_t *vd, zio_t *zio)
1028 {
1029 spa_t *spa = vd->vdev_spa;
1030 vdev_probe_stats_t *vps = NULL;
1031 zio_t *pio;
1032 int l;
1033
1034 ASSERT(vd->vdev_ops->vdev_op_leaf);
1035
1036 /*
1037 * Don't probe the probe.
1038 */
1039 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1040 return (NULL);
1041
1042 /*
1043 * To prevent 'probe storms' when a device fails, we create
1044 * just one probe i/o at a time. All zios that want to probe
1045 * this vdev will become parents of the probe io.
1046 */
1047 mutex_enter(&vd->vdev_probe_lock);
1048
1049 if ((pio = vd->vdev_probe_zio) == NULL) {
1050 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
1051
1052 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1053 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
1054 ZIO_FLAG_TRYHARD;
1055
1056 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1057 /*
1058 * vdev_cant_read and vdev_cant_write can only
1059 * transition from TRUE to FALSE when we have the
1060 * SCL_ZIO lock as writer; otherwise they can only
1061 * transition from FALSE to TRUE. This ensures that
1062 * any zio looking at these values can assume that
1063 * failures persist for the life of the I/O. That's
1064 * important because when a device has intermittent
1065 * connectivity problems, we want to ensure that
1066 * they're ascribed to the device (ENXIO) and not
1067 * the zio (EIO).
1068 *
1069 * Since we hold SCL_ZIO as writer here, clear both
1070 * values so the probe can reevaluate from first
1071 * principles.
1072 */
1073 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1074 vd->vdev_cant_read = B_FALSE;
1075 vd->vdev_cant_write = B_FALSE;
1076 }
1077
1078 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1079 vdev_probe_done, vps,
1080 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1081
1082 /*
1083 * We can't change the vdev state in this context, so we
1084 * kick off an async task to do it on our behalf.
1085 */
1086 if (zio != NULL) {
1087 vd->vdev_probe_wanted = B_TRUE;
1088 spa_async_request(spa, SPA_ASYNC_PROBE);
1089 }
1090 }
1091
1092 if (zio != NULL)
1093 zio_add_child(zio, pio);
1094
1095 mutex_exit(&vd->vdev_probe_lock);
1096
1097 if (vps == NULL) {
1098 ASSERT(zio != NULL);
1099 return (NULL);
1100 }
1101
1102 for (l = 1; l < VDEV_LABELS; l++) {
1103 zio_nowait(zio_read_phys(pio, vd,
1104 vdev_label_offset(vd->vdev_psize, l,
1105 offsetof(vdev_label_t, vl_pad2)),
1106 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
1107 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1108 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1109 }
1110
1111 if (zio == NULL)
1112 return (pio);
1113
1114 zio_nowait(pio);
1115 return (NULL);
1116 }
1117
1118 static void
1119 vdev_open_child(void *arg)
1120 {
1121 vdev_t *vd = arg;
1122
1123 vd->vdev_open_thread = curthread;
1124 vd->vdev_open_error = vdev_open(vd);
1125 vd->vdev_open_thread = NULL;
1126 vd->vdev_parent->vdev_nonrot &= vd->vdev_nonrot;
1127 }
1128
1129 static boolean_t
1130 vdev_uses_zvols(vdev_t *vd)
1131 {
1132 int c;
1133
1134 #ifdef _KERNEL
1135 if (zvol_is_zvol(vd->vdev_path))
1136 return (B_TRUE);
1137 #endif
1138
1139 for (c = 0; c < vd->vdev_children; c++)
1140 if (vdev_uses_zvols(vd->vdev_child[c]))
1141 return (B_TRUE);
1142
1143 return (B_FALSE);
1144 }
1145
1146 void
1147 vdev_open_children(vdev_t *vd)
1148 {
1149 taskq_t *tq;
1150 int children = vd->vdev_children;
1151 int c;
1152
1153 vd->vdev_nonrot = B_TRUE;
1154
1155 /*
1156 * in order to handle pools on top of zvols, do the opens
1157 * in a single thread so that the same thread holds the
1158 * spa_namespace_lock
1159 */
1160 if (vdev_uses_zvols(vd)) {
1161 for (c = 0; c < children; c++) {
1162 vd->vdev_child[c]->vdev_open_error =
1163 vdev_open(vd->vdev_child[c]);
1164 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
1165 }
1166 return;
1167 }
1168 tq = taskq_create("vdev_open", children, minclsyspri,
1169 children, children, TASKQ_PREPOPULATE);
1170
1171 for (c = 0; c < children; c++)
1172 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1173 TQ_SLEEP) != 0);
1174
1175 taskq_destroy(tq);
1176
1177 for (c = 0; c < children; c++)
1178 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
1179 }
1180
1181 /*
1182 * Prepare a virtual device for access.
1183 */
1184 int
1185 vdev_open(vdev_t *vd)
1186 {
1187 spa_t *spa = vd->vdev_spa;
1188 int error;
1189 uint64_t osize = 0;
1190 uint64_t max_osize = 0;
1191 uint64_t asize, max_asize, psize;
1192 uint64_t ashift = 0;
1193 int c;
1194
1195 ASSERT(vd->vdev_open_thread == curthread ||
1196 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1197 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1198 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1199 vd->vdev_state == VDEV_STATE_OFFLINE);
1200
1201 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1202 vd->vdev_cant_read = B_FALSE;
1203 vd->vdev_cant_write = B_FALSE;
1204 vd->vdev_min_asize = vdev_get_min_asize(vd);
1205
1206 /*
1207 * If this vdev is not removed, check its fault status. If it's
1208 * faulted, bail out of the open.
1209 */
1210 if (!vd->vdev_removed && vd->vdev_faulted) {
1211 ASSERT(vd->vdev_children == 0);
1212 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1213 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1214 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1215 vd->vdev_label_aux);
1216 return (SET_ERROR(ENXIO));
1217 } else if (vd->vdev_offline) {
1218 ASSERT(vd->vdev_children == 0);
1219 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1220 return (SET_ERROR(ENXIO));
1221 }
1222
1223 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
1224
1225 /*
1226 * Reset the vdev_reopening flag so that we actually close
1227 * the vdev on error.
1228 */
1229 vd->vdev_reopening = B_FALSE;
1230 if (zio_injection_enabled && error == 0)
1231 error = zio_handle_device_injection(vd, NULL, ENXIO);
1232
1233 if (error) {
1234 if (vd->vdev_removed &&
1235 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1236 vd->vdev_removed = B_FALSE;
1237
1238 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1239 vd->vdev_stat.vs_aux);
1240 return (error);
1241 }
1242
1243 vd->vdev_removed = B_FALSE;
1244
1245 /*
1246 * Recheck the faulted flag now that we have confirmed that
1247 * the vdev is accessible. If we're faulted, bail.
1248 */
1249 if (vd->vdev_faulted) {
1250 ASSERT(vd->vdev_children == 0);
1251 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1252 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1253 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1254 vd->vdev_label_aux);
1255 return (SET_ERROR(ENXIO));
1256 }
1257
1258 if (vd->vdev_degraded) {
1259 ASSERT(vd->vdev_children == 0);
1260 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1261 VDEV_AUX_ERR_EXCEEDED);
1262 } else {
1263 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
1264 }
1265
1266 /*
1267 * For hole or missing vdevs we just return success.
1268 */
1269 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1270 return (0);
1271
1272 for (c = 0; c < vd->vdev_children; c++) {
1273 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1274 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1275 VDEV_AUX_NONE);
1276 break;
1277 }
1278 }
1279
1280 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1281 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
1282
1283 if (vd->vdev_children == 0) {
1284 if (osize < SPA_MINDEVSIZE) {
1285 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1286 VDEV_AUX_TOO_SMALL);
1287 return (SET_ERROR(EOVERFLOW));
1288 }
1289 psize = osize;
1290 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1291 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
1292 VDEV_LABEL_END_SIZE);
1293 } else {
1294 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1295 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1296 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1297 VDEV_AUX_TOO_SMALL);
1298 return (SET_ERROR(EOVERFLOW));
1299 }
1300 psize = 0;
1301 asize = osize;
1302 max_asize = max_osize;
1303 }
1304
1305 vd->vdev_psize = psize;
1306
1307 /*
1308 * Make sure the allocatable size hasn't shrunk.
1309 */
1310 if (asize < vd->vdev_min_asize) {
1311 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1312 VDEV_AUX_BAD_LABEL);
1313 return (SET_ERROR(EINVAL));
1314 }
1315
1316 if (vd->vdev_asize == 0) {
1317 /*
1318 * This is the first-ever open, so use the computed values.
1319 * For compatibility, a different ashift can be requested.
1320 */
1321 vd->vdev_asize = asize;
1322 vd->vdev_max_asize = max_asize;
1323 if (vd->vdev_ashift == 0)
1324 vd->vdev_ashift = ashift;
1325 } else {
1326 /*
1327 * Detect if the alignment requirement has increased.
1328 * We don't want to make the pool unavailable, just
1329 * post an event instead.
1330 */
1331 if (ashift > vd->vdev_top->vdev_ashift &&
1332 vd->vdev_ops->vdev_op_leaf) {
1333 zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
1334 spa, vd, NULL, 0, 0);
1335 }
1336
1337 vd->vdev_max_asize = max_asize;
1338 }
1339
1340 /*
1341 * If all children are healthy and the asize has increased,
1342 * then we've experienced dynamic LUN growth. If automatic
1343 * expansion is enabled then use the additional space.
1344 */
1345 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize &&
1346 (vd->vdev_expanding || spa->spa_autoexpand))
1347 vd->vdev_asize = asize;
1348
1349 vdev_set_min_asize(vd);
1350
1351 /*
1352 * Ensure we can issue some IO before declaring the
1353 * vdev open for business.
1354 */
1355 if (vd->vdev_ops->vdev_op_leaf &&
1356 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
1357 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1358 VDEV_AUX_ERR_EXCEEDED);
1359 return (error);
1360 }
1361
1362 /*
1363 * Track the min and max ashift values for normal data devices.
1364 */
1365 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1366 !vd->vdev_islog && vd->vdev_aux == NULL) {
1367 if (vd->vdev_ashift > spa->spa_max_ashift)
1368 spa->spa_max_ashift = vd->vdev_ashift;
1369 if (vd->vdev_ashift < spa->spa_min_ashift)
1370 spa->spa_min_ashift = vd->vdev_ashift;
1371 }
1372
1373 /*
1374 * If a leaf vdev has a DTL, and seems healthy, then kick off a
1375 * resilver. But don't do this if we are doing a reopen for a scrub,
1376 * since this would just restart the scrub we are already doing.
1377 */
1378 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1379 vdev_resilver_needed(vd, NULL, NULL))
1380 spa_async_request(spa, SPA_ASYNC_RESILVER);
1381
1382 return (0);
1383 }
1384
1385 /*
1386 * Called once the vdevs are all opened, this routine validates the label
1387 * contents. This needs to be done before vdev_load() so that we don't
1388 * inadvertently do repair I/Os to the wrong device.
1389 *
1390 * If 'strict' is false ignore the spa guid check. This is necessary because
1391 * if the machine crashed during a re-guid the new guid might have been written
1392 * to all of the vdev labels, but not the cached config. The strict check
1393 * will be performed when the pool is opened again using the mos config.
1394 *
1395 * This function will only return failure if one of the vdevs indicates that it
1396 * has since been destroyed or exported. This is only possible if
1397 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
1398 * will be updated but the function will return 0.
1399 */
1400 int
1401 vdev_validate(vdev_t *vd, boolean_t strict)
1402 {
1403 spa_t *spa = vd->vdev_spa;
1404 nvlist_t *label;
1405 uint64_t guid = 0, top_guid;
1406 uint64_t state;
1407 int c;
1408
1409 for (c = 0; c < vd->vdev_children; c++)
1410 if (vdev_validate(vd->vdev_child[c], strict) != 0)
1411 return (SET_ERROR(EBADF));
1412
1413 /*
1414 * If the device has already failed, or was marked offline, don't do
1415 * any further validation. Otherwise, label I/O will fail and we will
1416 * overwrite the previous state.
1417 */
1418 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1419 uint64_t aux_guid = 0;
1420 nvlist_t *nvl;
1421 uint64_t txg = spa_last_synced_txg(spa) != 0 ?
1422 spa_last_synced_txg(spa) : -1ULL;
1423
1424 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1425 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1426 VDEV_AUX_BAD_LABEL);
1427 return (0);
1428 }
1429
1430 /*
1431 * Determine if this vdev has been split off into another
1432 * pool. If so, then refuse to open it.
1433 */
1434 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
1435 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
1436 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1437 VDEV_AUX_SPLIT_POOL);
1438 nvlist_free(label);
1439 return (0);
1440 }
1441
1442 if (strict && (nvlist_lookup_uint64(label,
1443 ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
1444 guid != spa_guid(spa))) {
1445 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1446 VDEV_AUX_CORRUPT_DATA);
1447 nvlist_free(label);
1448 return (0);
1449 }
1450
1451 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
1452 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
1453 &aux_guid) != 0)
1454 aux_guid = 0;
1455
1456 /*
1457 * If this vdev just became a top-level vdev because its
1458 * sibling was detached, it will have adopted the parent's
1459 * vdev guid -- but the label may or may not be on disk yet.
1460 * Fortunately, either version of the label will have the
1461 * same top guid, so if we're a top-level vdev, we can
1462 * safely compare to that instead.
1463 *
1464 * If we split this vdev off instead, then we also check the
1465 * original pool's guid. We don't want to consider the vdev
1466 * corrupt if it is partway through a split operation.
1467 */
1468 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
1469 &guid) != 0 ||
1470 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
1471 &top_guid) != 0 ||
1472 ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) &&
1473 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
1474 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1475 VDEV_AUX_CORRUPT_DATA);
1476 nvlist_free(label);
1477 return (0);
1478 }
1479
1480 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1481 &state) != 0) {
1482 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1483 VDEV_AUX_CORRUPT_DATA);
1484 nvlist_free(label);
1485 return (0);
1486 }
1487
1488 nvlist_free(label);
1489
1490 /*
1491 * If this is a verbatim import, no need to check the
1492 * state of the pool.
1493 */
1494 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
1495 spa_load_state(spa) == SPA_LOAD_OPEN &&
1496 state != POOL_STATE_ACTIVE)
1497 return (SET_ERROR(EBADF));
1498
1499 /*
1500 * If we were able to open and validate a vdev that was
1501 * previously marked permanently unavailable, clear that state
1502 * now.
1503 */
1504 if (vd->vdev_not_present)
1505 vd->vdev_not_present = 0;
1506 }
1507
1508 return (0);
1509 }
1510
1511 /*
1512 * Close a virtual device.
1513 */
1514 void
1515 vdev_close(vdev_t *vd)
1516 {
1517 vdev_t *pvd = vd->vdev_parent;
1518 ASSERTV(spa_t *spa = vd->vdev_spa);
1519
1520 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1521
1522 /*
1523 * If our parent is reopening, then we are as well, unless we are
1524 * going offline.
1525 */
1526 if (pvd != NULL && pvd->vdev_reopening)
1527 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
1528
1529 vd->vdev_ops->vdev_op_close(vd);
1530
1531 vdev_cache_purge(vd);
1532
1533 /*
1534 * We record the previous state before we close it, so that if we are
1535 * doing a reopen(), we don't generate FMA ereports if we notice that
1536 * it's still faulted.
1537 */
1538 vd->vdev_prevstate = vd->vdev_state;
1539
1540 if (vd->vdev_offline)
1541 vd->vdev_state = VDEV_STATE_OFFLINE;
1542 else
1543 vd->vdev_state = VDEV_STATE_CLOSED;
1544 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1545 }
1546
1547 void
1548 vdev_hold(vdev_t *vd)
1549 {
1550 spa_t *spa = vd->vdev_spa;
1551 int c;
1552
1553 ASSERT(spa_is_root(spa));
1554 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1555 return;
1556
1557 for (c = 0; c < vd->vdev_children; c++)
1558 vdev_hold(vd->vdev_child[c]);
1559
1560 if (vd->vdev_ops->vdev_op_leaf)
1561 vd->vdev_ops->vdev_op_hold(vd);
1562 }
1563
1564 void
1565 vdev_rele(vdev_t *vd)
1566 {
1567 int c;
1568
1569 ASSERT(spa_is_root(vd->vdev_spa));
1570 for (c = 0; c < vd->vdev_children; c++)
1571 vdev_rele(vd->vdev_child[c]);
1572
1573 if (vd->vdev_ops->vdev_op_leaf)
1574 vd->vdev_ops->vdev_op_rele(vd);
1575 }
1576
1577 /*
1578 * Reopen all interior vdevs and any unopened leaves. We don't actually
1579 * reopen leaf vdevs which had previously been opened as they might deadlock
1580 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
1581 * If the leaf has never been opened then open it, as usual.
1582 */
1583 void
1584 vdev_reopen(vdev_t *vd)
1585 {
1586 spa_t *spa = vd->vdev_spa;
1587
1588 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1589
1590 /* set the reopening flag unless we're taking the vdev offline */
1591 vd->vdev_reopening = !vd->vdev_offline;
1592 vdev_close(vd);
1593 (void) vdev_open(vd);
1594
1595 /*
1596 * Call vdev_validate() here to make sure we have the same device.
1597 * Otherwise, a device with an invalid label could be successfully
1598 * opened in response to vdev_reopen().
1599 */
1600 if (vd->vdev_aux) {
1601 (void) vdev_validate_aux(vd);
1602 if (vdev_readable(vd) && vdev_writeable(vd) &&
1603 vd->vdev_aux == &spa->spa_l2cache &&
1604 !l2arc_vdev_present(vd))
1605 l2arc_add_vdev(spa, vd);
1606 } else {
1607 (void) vdev_validate(vd, B_TRUE);
1608 }
1609
1610 /*
1611 * Reassess parent vdev's health.
1612 */
1613 vdev_propagate_state(vd);
1614 }
1615
1616 int
1617 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1618 {
1619 int error;
1620
1621 /*
1622 * Normally, partial opens (e.g. of a mirror) are allowed.
1623 * For a create, however, we want to fail the request if
1624 * there are any components we can't open.
1625 */
1626 error = vdev_open(vd);
1627
1628 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1629 vdev_close(vd);
1630 return (error ? error : ENXIO);
1631 }
1632
1633 /*
1634 * Recursively load DTLs and initialize all labels.
1635 */
1636 if ((error = vdev_dtl_load(vd)) != 0 ||
1637 (error = vdev_label_init(vd, txg, isreplacing ?
1638 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1639 vdev_close(vd);
1640 return (error);
1641 }
1642
1643 return (0);
1644 }
1645
1646 void
1647 vdev_metaslab_set_size(vdev_t *vd)
1648 {
1649 /*
1650 * Aim for roughly metaslabs_per_vdev (default 200) metaslabs per vdev.
1651 */
1652 vd->vdev_ms_shift = highbit64(vd->vdev_asize / metaslabs_per_vdev);
1653 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1654 }
1655
1656 void
1657 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1658 {
1659 ASSERT(vd == vd->vdev_top);
1660 ASSERT(!vd->vdev_ishole);
1661 ASSERT(ISP2(flags));
1662 ASSERT(spa_writeable(vd->vdev_spa));
1663
1664 if (flags & VDD_METASLAB)
1665 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1666
1667 if (flags & VDD_DTL)
1668 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1669
1670 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1671 }
1672
1673 void
1674 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
1675 {
1676 int c;
1677
1678 for (c = 0; c < vd->vdev_children; c++)
1679 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
1680
1681 if (vd->vdev_ops->vdev_op_leaf)
1682 vdev_dirty(vd->vdev_top, flags, vd, txg);
1683 }
1684
1685 /*
1686 * DTLs.
1687 *
1688 * A vdev's DTL (dirty time log) is the set of transaction groups for which
1689 * the vdev has less than perfect replication. There are four kinds of DTL:
1690 *
1691 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
1692 *
1693 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
1694 *
1695 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
1696 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
1697 * txgs that was scrubbed.
1698 *
1699 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
1700 * persistent errors or just some device being offline.
1701 * Unlike the other three, the DTL_OUTAGE map is not generally
1702 * maintained; it's only computed when needed, typically to
1703 * determine whether a device can be detached.
1704 *
1705 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
1706 * either has the data or it doesn't.
1707 *
1708 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
1709 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
1710 * if any child is less than fully replicated, then so is its parent.
1711 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
1712 * comprising only those txgs which appear in 'maxfaults' or more children;
1713 * those are the txgs we don't have enough replication to read. For example,
1714 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
1715 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
1716 * two child DTL_MISSING maps.
1717 *
1718 * It should be clear from the above that to compute the DTLs and outage maps
1719 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
1720 * Therefore, that is all we keep on disk. When loading the pool, or after
1721 * a configuration change, we generate all other DTLs from first principles.
1722 */
1723 void
1724 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1725 {
1726 range_tree_t *rt = vd->vdev_dtl[t];
1727
1728 ASSERT(t < DTL_TYPES);
1729 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1730 ASSERT(spa_writeable(vd->vdev_spa));
1731
1732 mutex_enter(rt->rt_lock);
1733 if (!range_tree_contains(rt, txg, size))
1734 range_tree_add(rt, txg, size);
1735 mutex_exit(rt->rt_lock);
1736 }
1737
1738 boolean_t
1739 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1740 {
1741 range_tree_t *rt = vd->vdev_dtl[t];
1742 boolean_t dirty = B_FALSE;
1743
1744 ASSERT(t < DTL_TYPES);
1745 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1746
1747 mutex_enter(rt->rt_lock);
1748 if (range_tree_space(rt) != 0)
1749 dirty = range_tree_contains(rt, txg, size);
1750 mutex_exit(rt->rt_lock);
1751
1752 return (dirty);
1753 }
1754
1755 boolean_t
1756 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
1757 {
1758 range_tree_t *rt = vd->vdev_dtl[t];
1759 boolean_t empty;
1760
1761 mutex_enter(rt->rt_lock);
1762 empty = (range_tree_space(rt) == 0);
1763 mutex_exit(rt->rt_lock);
1764
1765 return (empty);
1766 }
1767
1768 /*
1769 * Returns the lowest txg in the DTL range.
1770 */
1771 static uint64_t
1772 vdev_dtl_min(vdev_t *vd)
1773 {
1774 range_seg_t *rs;
1775
1776 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
1777 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
1778 ASSERT0(vd->vdev_children);
1779
1780 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root);
1781 return (rs->rs_start - 1);
1782 }
1783
1784 /*
1785 * Returns the highest txg in the DTL.
1786 */
1787 static uint64_t
1788 vdev_dtl_max(vdev_t *vd)
1789 {
1790 range_seg_t *rs;
1791
1792 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
1793 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
1794 ASSERT0(vd->vdev_children);
1795
1796 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root);
1797 return (rs->rs_end);
1798 }
1799
1800 /*
1801 * Determine if a resilvering vdev should remove any DTL entries from
1802 * its range. If the vdev was resilvering for the entire duration of the
1803 * scan then it should excise that range from its DTLs. Otherwise, this
1804 * vdev is considered partially resilvered and should leave its DTL
1805 * entries intact. The comment in vdev_dtl_reassess() describes how we
1806 * excise the DTLs.
1807 */
1808 static boolean_t
1809 vdev_dtl_should_excise(vdev_t *vd)
1810 {
1811 spa_t *spa = vd->vdev_spa;
1812 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
1813
1814 ASSERT0(scn->scn_phys.scn_errors);
1815 ASSERT0(vd->vdev_children);
1816
1817 if (vd->vdev_resilver_txg == 0 ||
1818 range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0)
1819 return (B_TRUE);
1820
1821 /*
1822 * When a resilver is initiated the scan will assign the scn_max_txg
1823 * value to the highest txg value that exists in all DTLs. If this
1824 * device's max DTL is not part of this scan (i.e. it is not in
1825 * the range (scn_min_txg, scn_max_txg] then it is not eligible
1826 * for excision.
1827 */
1828 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
1829 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
1830 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
1831 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
1832 return (B_TRUE);
1833 }
1834 return (B_FALSE);
1835 }
1836
1837 /*
1838 * Reassess DTLs after a config change or scrub completion.
1839 */
1840 void
1841 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1842 {
1843 spa_t *spa = vd->vdev_spa;
1844 avl_tree_t reftree;
1845 int c, t, minref;
1846
1847 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1848
1849 for (c = 0; c < vd->vdev_children; c++)
1850 vdev_dtl_reassess(vd->vdev_child[c], txg,
1851 scrub_txg, scrub_done);
1852
1853 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
1854 return;
1855
1856 if (vd->vdev_ops->vdev_op_leaf) {
1857 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
1858
1859 mutex_enter(&vd->vdev_dtl_lock);
1860
1861 /*
1862 * If we've completed a scan cleanly then determine
1863 * if this vdev should remove any DTLs. We only want to
1864 * excise regions on vdevs that were available during
1865 * the entire duration of this scan.
1866 */
1867 if (scrub_txg != 0 &&
1868 (spa->spa_scrub_started ||
1869 (scn != NULL && scn->scn_phys.scn_errors == 0)) &&
1870 vdev_dtl_should_excise(vd)) {
1871 /*
1872 * We completed a scrub up to scrub_txg. If we
1873 * did it without rebooting, then the scrub dtl
1874 * will be valid, so excise the old region and
1875 * fold in the scrub dtl. Otherwise, leave the
1876 * dtl as-is if there was an error.
1877 *
1878 * There's little trick here: to excise the beginning
1879 * of the DTL_MISSING map, we put it into a reference
1880 * tree and then add a segment with refcnt -1 that
1881 * covers the range [0, scrub_txg). This means
1882 * that each txg in that range has refcnt -1 or 0.
1883 * We then add DTL_SCRUB with a refcnt of 2, so that
1884 * entries in the range [0, scrub_txg) will have a
1885 * positive refcnt -- either 1 or 2. We then convert
1886 * the reference tree into the new DTL_MISSING map.
1887 */
1888 space_reftree_create(&reftree);
1889 space_reftree_add_map(&reftree,
1890 vd->vdev_dtl[DTL_MISSING], 1);
1891 space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
1892 space_reftree_add_map(&reftree,
1893 vd->vdev_dtl[DTL_SCRUB], 2);
1894 space_reftree_generate_map(&reftree,
1895 vd->vdev_dtl[DTL_MISSING], 1);
1896 space_reftree_destroy(&reftree);
1897 }
1898 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
1899 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
1900 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
1901 if (scrub_done)
1902 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
1903 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
1904 if (!vdev_readable(vd))
1905 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
1906 else
1907 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
1908 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
1909
1910 /*
1911 * If the vdev was resilvering and no longer has any
1912 * DTLs then reset its resilvering flag.
1913 */
1914 if (vd->vdev_resilver_txg != 0 &&
1915 range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0 &&
1916 range_tree_space(vd->vdev_dtl[DTL_OUTAGE]) == 0)
1917 vd->vdev_resilver_txg = 0;
1918
1919 mutex_exit(&vd->vdev_dtl_lock);
1920
1921 if (txg != 0)
1922 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1923 return;
1924 }
1925
1926 mutex_enter(&vd->vdev_dtl_lock);
1927 for (t = 0; t < DTL_TYPES; t++) {
1928 int c;
1929
1930 /* account for child's outage in parent's missing map */
1931 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
1932 if (t == DTL_SCRUB)
1933 continue; /* leaf vdevs only */
1934 if (t == DTL_PARTIAL)
1935 minref = 1; /* i.e. non-zero */
1936 else if (vd->vdev_nparity != 0)
1937 minref = vd->vdev_nparity + 1; /* RAID-Z */
1938 else
1939 minref = vd->vdev_children; /* any kind of mirror */
1940 space_reftree_create(&reftree);
1941 for (c = 0; c < vd->vdev_children; c++) {
1942 vdev_t *cvd = vd->vdev_child[c];
1943 mutex_enter(&cvd->vdev_dtl_lock);
1944 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
1945 mutex_exit(&cvd->vdev_dtl_lock);
1946 }
1947 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
1948 space_reftree_destroy(&reftree);
1949 }
1950 mutex_exit(&vd->vdev_dtl_lock);
1951 }
1952
1953 int
1954 vdev_dtl_load(vdev_t *vd)
1955 {
1956 spa_t *spa = vd->vdev_spa;
1957 objset_t *mos = spa->spa_meta_objset;
1958 int error = 0;
1959 int c;
1960
1961 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
1962 ASSERT(!vd->vdev_ishole);
1963
1964 error = space_map_open(&vd->vdev_dtl_sm, mos,
1965 vd->vdev_dtl_object, 0, -1ULL, 0, &vd->vdev_dtl_lock);
1966 if (error)
1967 return (error);
1968 ASSERT(vd->vdev_dtl_sm != NULL);
1969
1970 mutex_enter(&vd->vdev_dtl_lock);
1971
1972 /*
1973 * Now that we've opened the space_map we need to update
1974 * the in-core DTL.
1975 */
1976 space_map_update(vd->vdev_dtl_sm);
1977
1978 error = space_map_load(vd->vdev_dtl_sm,
1979 vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
1980 mutex_exit(&vd->vdev_dtl_lock);
1981
1982 return (error);
1983 }
1984
1985 for (c = 0; c < vd->vdev_children; c++) {
1986 error = vdev_dtl_load(vd->vdev_child[c]);
1987 if (error != 0)
1988 break;
1989 }
1990
1991 return (error);
1992 }
1993
1994 void
1995 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
1996 {
1997 spa_t *spa = vd->vdev_spa;
1998
1999 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
2000 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2001 zapobj, tx));
2002 }
2003
2004 uint64_t
2005 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
2006 {
2007 spa_t *spa = vd->vdev_spa;
2008 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
2009 DMU_OT_NONE, 0, tx);
2010
2011 ASSERT(zap != 0);
2012 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2013 zap, tx));
2014
2015 return (zap);
2016 }
2017
2018 void
2019 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
2020 {
2021 uint64_t i;
2022
2023 if (vd->vdev_ops != &vdev_hole_ops &&
2024 vd->vdev_ops != &vdev_missing_ops &&
2025 vd->vdev_ops != &vdev_root_ops &&
2026 !vd->vdev_top->vdev_removing) {
2027 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
2028 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
2029 }
2030 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
2031 vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
2032 }
2033 }
2034 for (i = 0; i < vd->vdev_children; i++) {
2035 vdev_construct_zaps(vd->vdev_child[i], tx);
2036 }
2037 }
2038
2039 void
2040 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
2041 {
2042 spa_t *spa = vd->vdev_spa;
2043 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
2044 objset_t *mos = spa->spa_meta_objset;
2045 range_tree_t *rtsync;
2046 kmutex_t rtlock;
2047 dmu_tx_t *tx;
2048 uint64_t object = space_map_object(vd->vdev_dtl_sm);
2049
2050 ASSERT(!vd->vdev_ishole);
2051 ASSERT(vd->vdev_ops->vdev_op_leaf);
2052
2053 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2054
2055 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
2056 mutex_enter(&vd->vdev_dtl_lock);
2057 space_map_free(vd->vdev_dtl_sm, tx);
2058 space_map_close(vd->vdev_dtl_sm);
2059 vd->vdev_dtl_sm = NULL;
2060 mutex_exit(&vd->vdev_dtl_lock);
2061
2062 /*
2063 * We only destroy the leaf ZAP for detached leaves or for
2064 * removed log devices. Removed data devices handle leaf ZAP
2065 * cleanup later, once cancellation is no longer possible.
2066 */
2067 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
2068 vd->vdev_top->vdev_islog)) {
2069 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
2070 vd->vdev_leaf_zap = 0;
2071 }
2072
2073 dmu_tx_commit(tx);
2074 return;
2075 }
2076
2077 if (vd->vdev_dtl_sm == NULL) {
2078 uint64_t new_object;
2079
2080 new_object = space_map_alloc(mos, tx);
2081 VERIFY3U(new_object, !=, 0);
2082
2083 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
2084 0, -1ULL, 0, &vd->vdev_dtl_lock));
2085 ASSERT(vd->vdev_dtl_sm != NULL);
2086 }
2087
2088 mutex_init(&rtlock, NULL, MUTEX_DEFAULT, NULL);
2089
2090 rtsync = range_tree_create(NULL, NULL, &rtlock);
2091
2092 mutex_enter(&rtlock);
2093
2094 mutex_enter(&vd->vdev_dtl_lock);
2095 range_tree_walk(rt, range_tree_add, rtsync);
2096 mutex_exit(&vd->vdev_dtl_lock);
2097
2098 space_map_truncate(vd->vdev_dtl_sm, tx);
2099 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, tx);
2100 range_tree_vacate(rtsync, NULL, NULL);
2101
2102 range_tree_destroy(rtsync);
2103
2104 mutex_exit(&rtlock);
2105 mutex_destroy(&rtlock);
2106
2107 /*
2108 * If the object for the space map has changed then dirty
2109 * the top level so that we update the config.
2110 */
2111 if (object != space_map_object(vd->vdev_dtl_sm)) {
2112 zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
2113 "new object %llu", txg, spa_name(spa), object,
2114 space_map_object(vd->vdev_dtl_sm));
2115 vdev_config_dirty(vd->vdev_top);
2116 }
2117
2118 dmu_tx_commit(tx);
2119
2120 mutex_enter(&vd->vdev_dtl_lock);
2121 space_map_update(vd->vdev_dtl_sm);
2122 mutex_exit(&vd->vdev_dtl_lock);
2123 }
2124
2125 /*
2126 * Determine whether the specified vdev can be offlined/detached/removed
2127 * without losing data.
2128 */
2129 boolean_t
2130 vdev_dtl_required(vdev_t *vd)
2131 {
2132 spa_t *spa = vd->vdev_spa;
2133 vdev_t *tvd = vd->vdev_top;
2134 uint8_t cant_read = vd->vdev_cant_read;
2135 boolean_t required;
2136
2137 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2138
2139 if (vd == spa->spa_root_vdev || vd == tvd)
2140 return (B_TRUE);
2141
2142 /*
2143 * Temporarily mark the device as unreadable, and then determine
2144 * whether this results in any DTL outages in the top-level vdev.
2145 * If not, we can safely offline/detach/remove the device.
2146 */
2147 vd->vdev_cant_read = B_TRUE;
2148 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2149 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
2150 vd->vdev_cant_read = cant_read;
2151 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2152
2153 if (!required && zio_injection_enabled)
2154 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
2155
2156 return (required);
2157 }
2158
2159 /*
2160 * Determine if resilver is needed, and if so the txg range.
2161 */
2162 boolean_t
2163 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
2164 {
2165 boolean_t needed = B_FALSE;
2166 uint64_t thismin = UINT64_MAX;
2167 uint64_t thismax = 0;
2168 int c;
2169
2170 if (vd->vdev_children == 0) {
2171 mutex_enter(&vd->vdev_dtl_lock);
2172 if (range_tree_space(vd->vdev_dtl[DTL_MISSING]) != 0 &&
2173 vdev_writeable(vd)) {
2174
2175 thismin = vdev_dtl_min(vd);
2176 thismax = vdev_dtl_max(vd);
2177 needed = B_TRUE;
2178 }
2179 mutex_exit(&vd->vdev_dtl_lock);
2180 } else {
2181 for (c = 0; c < vd->vdev_children; c++) {
2182 vdev_t *cvd = vd->vdev_child[c];
2183 uint64_t cmin, cmax;
2184
2185 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
2186 thismin = MIN(thismin, cmin);
2187 thismax = MAX(thismax, cmax);
2188 needed = B_TRUE;
2189 }
2190 }
2191 }
2192
2193 if (needed && minp) {
2194 *minp = thismin;
2195 *maxp = thismax;
2196 }
2197 return (needed);
2198 }
2199
2200 void
2201 vdev_load(vdev_t *vd)
2202 {
2203 int c;
2204
2205 /*
2206 * Recursively load all children.
2207 */
2208 for (c = 0; c < vd->vdev_children; c++)
2209 vdev_load(vd->vdev_child[c]);
2210
2211 /*
2212 * If this is a top-level vdev, initialize its metaslabs.
2213 */
2214 if (vd == vd->vdev_top && !vd->vdev_ishole &&
2215 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
2216 vdev_metaslab_init(vd, 0) != 0))
2217 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2218 VDEV_AUX_CORRUPT_DATA);
2219
2220 /*
2221 * If this is a leaf vdev, load its DTL.
2222 */
2223 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
2224 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2225 VDEV_AUX_CORRUPT_DATA);
2226 }
2227
2228 /*
2229 * The special vdev case is used for hot spares and l2cache devices. Its
2230 * sole purpose it to set the vdev state for the associated vdev. To do this,
2231 * we make sure that we can open the underlying device, then try to read the
2232 * label, and make sure that the label is sane and that it hasn't been
2233 * repurposed to another pool.
2234 */
2235 int
2236 vdev_validate_aux(vdev_t *vd)
2237 {
2238 nvlist_t *label;
2239 uint64_t guid, version;
2240 uint64_t state;
2241
2242 if (!vdev_readable(vd))
2243 return (0);
2244
2245 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
2246 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2247 VDEV_AUX_CORRUPT_DATA);
2248 return (-1);
2249 }
2250
2251 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
2252 !SPA_VERSION_IS_SUPPORTED(version) ||
2253 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
2254 guid != vd->vdev_guid ||
2255 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
2256 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2257 VDEV_AUX_CORRUPT_DATA);
2258 nvlist_free(label);
2259 return (-1);
2260 }
2261
2262 /*
2263 * We don't actually check the pool state here. If it's in fact in
2264 * use by another pool, we update this fact on the fly when requested.
2265 */
2266 nvlist_free(label);
2267 return (0);
2268 }
2269
2270 void
2271 vdev_remove(vdev_t *vd, uint64_t txg)
2272 {
2273 spa_t *spa = vd->vdev_spa;
2274 objset_t *mos = spa->spa_meta_objset;
2275 dmu_tx_t *tx;
2276 int m, i;
2277
2278 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2279 ASSERT(vd == vd->vdev_top);
2280 ASSERT3U(txg, ==, spa_syncing_txg(spa));
2281
2282 if (vd->vdev_ms != NULL) {
2283 metaslab_group_t *mg = vd->vdev_mg;
2284
2285 metaslab_group_histogram_verify(mg);
2286 metaslab_class_histogram_verify(mg->mg_class);
2287
2288 for (m = 0; m < vd->vdev_ms_count; m++) {
2289 metaslab_t *msp = vd->vdev_ms[m];
2290
2291 if (msp == NULL || msp->ms_sm == NULL)
2292 continue;
2293
2294 mutex_enter(&msp->ms_lock);
2295 /*
2296 * If the metaslab was not loaded when the vdev
2297 * was removed then the histogram accounting may
2298 * not be accurate. Update the histogram information
2299 * here so that we ensure that the metaslab group
2300 * and metaslab class are up-to-date.
2301 */
2302 metaslab_group_histogram_remove(mg, msp);
2303
2304 VERIFY0(space_map_allocated(msp->ms_sm));
2305 space_map_free(msp->ms_sm, tx);
2306 space_map_close(msp->ms_sm);
2307 msp->ms_sm = NULL;
2308 mutex_exit(&msp->ms_lock);
2309 }
2310
2311 metaslab_group_histogram_verify(mg);
2312 metaslab_class_histogram_verify(mg->mg_class);
2313 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
2314 ASSERT0(mg->mg_histogram[i]);
2315
2316 }
2317
2318 if (vd->vdev_ms_array) {
2319 (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
2320 vd->vdev_ms_array = 0;
2321 }
2322
2323 if (vd->vdev_islog && vd->vdev_top_zap != 0) {
2324 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
2325 vd->vdev_top_zap = 0;
2326 }
2327 dmu_tx_commit(tx);
2328 }
2329
2330 void
2331 vdev_sync_done(vdev_t *vd, uint64_t txg)
2332 {
2333 metaslab_t *msp;
2334 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
2335
2336 ASSERT(!vd->vdev_ishole);
2337
2338 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
2339 metaslab_sync_done(msp, txg);
2340
2341 if (reassess)
2342 metaslab_sync_reassess(vd->vdev_mg);
2343 }
2344
2345 void
2346 vdev_sync(vdev_t *vd, uint64_t txg)
2347 {
2348 spa_t *spa = vd->vdev_spa;
2349 vdev_t *lvd;
2350 metaslab_t *msp;
2351 dmu_tx_t *tx;
2352
2353 ASSERT(!vd->vdev_ishole);
2354
2355 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
2356 ASSERT(vd == vd->vdev_top);
2357 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2358 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
2359 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
2360 ASSERT(vd->vdev_ms_array != 0);
2361 vdev_config_dirty(vd);
2362 dmu_tx_commit(tx);
2363 }
2364
2365 /*
2366 * Remove the metadata associated with this vdev once it's empty.
2367 */
2368 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
2369 vdev_remove(vd, txg);
2370
2371 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
2372 metaslab_sync(msp, txg);
2373 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
2374 }
2375
2376 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
2377 vdev_dtl_sync(lvd, txg);
2378
2379 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
2380 }
2381
2382 uint64_t
2383 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
2384 {
2385 return (vd->vdev_ops->vdev_op_asize(vd, psize));
2386 }
2387
2388 /*
2389 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
2390 * not be opened, and no I/O is attempted.
2391 */
2392 int
2393 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
2394 {
2395 vdev_t *vd, *tvd;
2396
2397 spa_vdev_state_enter(spa, SCL_NONE);
2398
2399 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2400 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2401
2402 if (!vd->vdev_ops->vdev_op_leaf)
2403 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2404
2405 tvd = vd->vdev_top;
2406
2407 /*
2408 * We don't directly use the aux state here, but if we do a
2409 * vdev_reopen(), we need this value to be present to remember why we
2410 * were faulted.
2411 */
2412 vd->vdev_label_aux = aux;
2413
2414 /*
2415 * Faulted state takes precedence over degraded.
2416 */
2417 vd->vdev_delayed_close = B_FALSE;
2418 vd->vdev_faulted = 1ULL;
2419 vd->vdev_degraded = 0ULL;
2420 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
2421
2422 /*
2423 * If this device has the only valid copy of the data, then
2424 * back off and simply mark the vdev as degraded instead.
2425 */
2426 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
2427 vd->vdev_degraded = 1ULL;
2428 vd->vdev_faulted = 0ULL;
2429
2430 /*
2431 * If we reopen the device and it's not dead, only then do we
2432 * mark it degraded.
2433 */
2434 vdev_reopen(tvd);
2435
2436 if (vdev_readable(vd))
2437 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
2438 }
2439
2440 return (spa_vdev_state_exit(spa, vd, 0));
2441 }
2442
2443 /*
2444 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
2445 * user that something is wrong. The vdev continues to operate as normal as far
2446 * as I/O is concerned.
2447 */
2448 int
2449 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
2450 {
2451 vdev_t *vd;
2452
2453 spa_vdev_state_enter(spa, SCL_NONE);
2454
2455 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2456 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2457
2458 if (!vd->vdev_ops->vdev_op_leaf)
2459 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2460
2461 /*
2462 * If the vdev is already faulted, then don't do anything.
2463 */
2464 if (vd->vdev_faulted || vd->vdev_degraded)
2465 return (spa_vdev_state_exit(spa, NULL, 0));
2466
2467 vd->vdev_degraded = 1ULL;
2468 if (!vdev_is_dead(vd))
2469 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
2470 aux);
2471
2472 return (spa_vdev_state_exit(spa, vd, 0));
2473 }
2474
2475 /*
2476 * Online the given vdev.
2477 *
2478 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
2479 * spare device should be detached when the device finishes resilvering.
2480 * Second, the online should be treated like a 'test' online case, so no FMA
2481 * events are generated if the device fails to open.
2482 */
2483 int
2484 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
2485 {
2486 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
2487
2488 spa_vdev_state_enter(spa, SCL_NONE);
2489
2490 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2491 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2492
2493 if (!vd->vdev_ops->vdev_op_leaf)
2494 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2495
2496 tvd = vd->vdev_top;
2497 vd->vdev_offline = B_FALSE;
2498 vd->vdev_tmpoffline = B_FALSE;
2499 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
2500 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
2501
2502 /* XXX - L2ARC 1.0 does not support expansion */
2503 if (!vd->vdev_aux) {
2504 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2505 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
2506 }
2507
2508 vdev_reopen(tvd);
2509 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
2510
2511 if (!vd->vdev_aux) {
2512 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2513 pvd->vdev_expanding = B_FALSE;
2514 }
2515
2516 if (newstate)
2517 *newstate = vd->vdev_state;
2518 if ((flags & ZFS_ONLINE_UNSPARE) &&
2519 !vdev_is_dead(vd) && vd->vdev_parent &&
2520 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2521 vd->vdev_parent->vdev_child[0] == vd)
2522 vd->vdev_unspare = B_TRUE;
2523
2524 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
2525
2526 /* XXX - L2ARC 1.0 does not support expansion */
2527 if (vd->vdev_aux)
2528 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
2529 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2530 }
2531 return (spa_vdev_state_exit(spa, vd, 0));
2532 }
2533
2534 static int
2535 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
2536 {
2537 vdev_t *vd, *tvd;
2538 int error = 0;
2539 uint64_t generation;
2540 metaslab_group_t *mg;
2541
2542 top:
2543 spa_vdev_state_enter(spa, SCL_ALLOC);
2544
2545 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2546 return (spa_vdev_state_exit(spa, NULL, ENODEV));
2547
2548 if (!vd->vdev_ops->vdev_op_leaf)
2549 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2550
2551 tvd = vd->vdev_top;
2552 mg = tvd->vdev_mg;
2553 generation = spa->spa_config_generation + 1;
2554
2555 /*
2556 * If the device isn't already offline, try to offline it.
2557 */
2558 if (!vd->vdev_offline) {
2559 /*
2560 * If this device has the only valid copy of some data,
2561 * don't allow it to be offlined. Log devices are always
2562 * expendable.
2563 */
2564 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2565 vdev_dtl_required(vd))
2566 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2567
2568 /*
2569 * If the top-level is a slog and it has had allocations
2570 * then proceed. We check that the vdev's metaslab group
2571 * is not NULL since it's possible that we may have just
2572 * added this vdev but not yet initialized its metaslabs.
2573 */
2574 if (tvd->vdev_islog && mg != NULL) {
2575 /*
2576 * Prevent any future allocations.
2577 */
2578 metaslab_group_passivate(mg);
2579 (void) spa_vdev_state_exit(spa, vd, 0);
2580
2581 error = spa_offline_log(spa);
2582
2583 spa_vdev_state_enter(spa, SCL_ALLOC);
2584
2585 /*
2586 * Check to see if the config has changed.
2587 */
2588 if (error || generation != spa->spa_config_generation) {
2589 metaslab_group_activate(mg);
2590 if (error)
2591 return (spa_vdev_state_exit(spa,
2592 vd, error));
2593 (void) spa_vdev_state_exit(spa, vd, 0);
2594 goto top;
2595 }
2596 ASSERT0(tvd->vdev_stat.vs_alloc);
2597 }
2598
2599 /*
2600 * Offline this device and reopen its top-level vdev.
2601 * If the top-level vdev is a log device then just offline
2602 * it. Otherwise, if this action results in the top-level
2603 * vdev becoming unusable, undo it and fail the request.
2604 */
2605 vd->vdev_offline = B_TRUE;
2606 vdev_reopen(tvd);
2607
2608 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2609 vdev_is_dead(tvd)) {
2610 vd->vdev_offline = B_FALSE;
2611 vdev_reopen(tvd);
2612 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2613 }
2614
2615 /*
2616 * Add the device back into the metaslab rotor so that
2617 * once we online the device it's open for business.
2618 */
2619 if (tvd->vdev_islog && mg != NULL)
2620 metaslab_group_activate(mg);
2621 }
2622
2623 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
2624
2625 return (spa_vdev_state_exit(spa, vd, 0));
2626 }
2627
2628 int
2629 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
2630 {
2631 int error;
2632
2633 mutex_enter(&spa->spa_vdev_top_lock);
2634 error = vdev_offline_locked(spa, guid, flags);
2635 mutex_exit(&spa->spa_vdev_top_lock);
2636
2637 return (error);
2638 }
2639
2640 /*
2641 * Clear the error counts associated with this vdev. Unlike vdev_online() and
2642 * vdev_offline(), we assume the spa config is locked. We also clear all
2643 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
2644 */
2645 void
2646 vdev_clear(spa_t *spa, vdev_t *vd)
2647 {
2648 vdev_t *rvd = spa->spa_root_vdev;
2649 int c;
2650
2651 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2652
2653 if (vd == NULL)
2654 vd = rvd;
2655
2656 vd->vdev_stat.vs_read_errors = 0;
2657 vd->vdev_stat.vs_write_errors = 0;
2658 vd->vdev_stat.vs_checksum_errors = 0;
2659
2660 for (c = 0; c < vd->vdev_children; c++)
2661 vdev_clear(spa, vd->vdev_child[c]);
2662
2663 /*
2664 * If we're in the FAULTED state or have experienced failed I/O, then
2665 * clear the persistent state and attempt to reopen the device. We
2666 * also mark the vdev config dirty, so that the new faulted state is
2667 * written out to disk.
2668 */
2669 if (vd->vdev_faulted || vd->vdev_degraded ||
2670 !vdev_readable(vd) || !vdev_writeable(vd)) {
2671
2672 /*
2673 * When reopening in reponse to a clear event, it may be due to
2674 * a fmadm repair request. In this case, if the device is
2675 * still broken, we want to still post the ereport again.
2676 */
2677 vd->vdev_forcefault = B_TRUE;
2678
2679 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
2680 vd->vdev_cant_read = B_FALSE;
2681 vd->vdev_cant_write = B_FALSE;
2682
2683 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
2684
2685 vd->vdev_forcefault = B_FALSE;
2686
2687 if (vd != rvd && vdev_writeable(vd->vdev_top))
2688 vdev_state_dirty(vd->vdev_top);
2689
2690 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
2691 spa_async_request(spa, SPA_ASYNC_RESILVER);
2692
2693 spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_CLEAR);
2694 }
2695
2696 /*
2697 * When clearing a FMA-diagnosed fault, we always want to
2698 * unspare the device, as we assume that the original spare was
2699 * done in response to the FMA fault.
2700 */
2701 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
2702 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2703 vd->vdev_parent->vdev_child[0] == vd)
2704 vd->vdev_unspare = B_TRUE;
2705 }
2706
2707 boolean_t
2708 vdev_is_dead(vdev_t *vd)
2709 {
2710 /*
2711 * Holes and missing devices are always considered "dead".
2712 * This simplifies the code since we don't have to check for
2713 * these types of devices in the various code paths.
2714 * Instead we rely on the fact that we skip over dead devices
2715 * before issuing I/O to them.
2716 */
2717 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
2718 vd->vdev_ops == &vdev_missing_ops);
2719 }
2720
2721 boolean_t
2722 vdev_readable(vdev_t *vd)
2723 {
2724 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
2725 }
2726
2727 boolean_t
2728 vdev_writeable(vdev_t *vd)
2729 {
2730 return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
2731 }
2732
2733 boolean_t
2734 vdev_allocatable(vdev_t *vd)
2735 {
2736 uint64_t state = vd->vdev_state;
2737
2738 /*
2739 * We currently allow allocations from vdevs which may be in the
2740 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
2741 * fails to reopen then we'll catch it later when we're holding
2742 * the proper locks. Note that we have to get the vdev state
2743 * in a local variable because although it changes atomically,
2744 * we're asking two separate questions about it.
2745 */
2746 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
2747 !vd->vdev_cant_write && !vd->vdev_ishole);
2748 }
2749
2750 boolean_t
2751 vdev_accessible(vdev_t *vd, zio_t *zio)
2752 {
2753 ASSERT(zio->io_vd == vd);
2754
2755 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
2756 return (B_FALSE);
2757
2758 if (zio->io_type == ZIO_TYPE_READ)
2759 return (!vd->vdev_cant_read);
2760
2761 if (zio->io_type == ZIO_TYPE_WRITE)
2762 return (!vd->vdev_cant_write);
2763
2764 return (B_TRUE);
2765 }
2766
2767 static void
2768 vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
2769 {
2770 int t;
2771 for (t = 0; t < ZIO_TYPES; t++) {
2772 vs->vs_ops[t] += cvs->vs_ops[t];
2773 vs->vs_bytes[t] += cvs->vs_bytes[t];
2774 }
2775
2776 cvs->vs_scan_removing = cvd->vdev_removing;
2777 }
2778
2779 /*
2780 * Get extended stats
2781 */
2782 static void
2783 vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
2784 {
2785 int t, b;
2786 for (t = 0; t < ZIO_TYPES; t++) {
2787 for (b = 0; b < VDEV_HISTO_BUCKETS; b++) {
2788 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
2789 vsx->vsx_total_histo[t][b] +=
2790 cvsx->vsx_total_histo[t][b];
2791 }
2792 }
2793
2794 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
2795 for (b = 0; b < VDEV_HISTO_BUCKETS; b++) {
2796 vsx->vsx_queue_histo[t][b] +=
2797 cvsx->vsx_queue_histo[t][b];
2798 }
2799 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
2800 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
2801 }
2802 }
2803
2804 /*
2805 * Get statistics for the given vdev.
2806 */
2807 static void
2808 vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
2809 {
2810 int c, t;
2811 /*
2812 * If we're getting stats on the root vdev, aggregate the I/O counts
2813 * over all top-level vdevs (i.e. the direct children of the root).
2814 */
2815 if (!vd->vdev_ops->vdev_op_leaf) {
2816 if (vs) {
2817 memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
2818 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
2819 }
2820 if (vsx)
2821 memset(vsx, 0, sizeof (*vsx));
2822
2823 for (c = 0; c < vd->vdev_children; c++) {
2824 vdev_t *cvd = vd->vdev_child[c];
2825 vdev_stat_t *cvs = &cvd->vdev_stat;
2826 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
2827
2828 vdev_get_stats_ex_impl(cvd, cvs, cvsx);
2829 if (vs)
2830 vdev_get_child_stat(cvd, vs, cvs);
2831 if (vsx)
2832 vdev_get_child_stat_ex(cvd, vsx, cvsx);
2833
2834 }
2835 } else {
2836 /*
2837 * We're a leaf. Just copy our ZIO active queue stats in. The
2838 * other leaf stats are updated in vdev_stat_update().
2839 */
2840 if (!vsx)
2841 return;
2842
2843 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
2844
2845 for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
2846 vsx->vsx_active_queue[t] =
2847 vd->vdev_queue.vq_class[t].vqc_active;
2848 vsx->vsx_pend_queue[t] = avl_numnodes(
2849 &vd->vdev_queue.vq_class[t].vqc_queued_tree);
2850 }
2851 }
2852 }
2853
2854 void
2855 vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
2856 {
2857 mutex_enter(&vd->vdev_stat_lock);
2858 if (vs) {
2859 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
2860 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
2861 vs->vs_state = vd->vdev_state;
2862 vs->vs_rsize = vdev_get_min_asize(vd);
2863 if (vd->vdev_ops->vdev_op_leaf)
2864 vs->vs_rsize += VDEV_LABEL_START_SIZE +
2865 VDEV_LABEL_END_SIZE;
2866 vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
2867 if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
2868 !vd->vdev_ishole) {
2869 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
2870 }
2871 }
2872
2873 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_READER) != 0);
2874 vdev_get_stats_ex_impl(vd, vs, vsx);
2875 mutex_exit(&vd->vdev_stat_lock);
2876 }
2877
2878 void
2879 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2880 {
2881 return (vdev_get_stats_ex(vd, vs, NULL));
2882 }
2883
2884 void
2885 vdev_clear_stats(vdev_t *vd)
2886 {
2887 mutex_enter(&vd->vdev_stat_lock);
2888 vd->vdev_stat.vs_space = 0;
2889 vd->vdev_stat.vs_dspace = 0;
2890 vd->vdev_stat.vs_alloc = 0;
2891 mutex_exit(&vd->vdev_stat_lock);
2892 }
2893
2894 void
2895 vdev_scan_stat_init(vdev_t *vd)
2896 {
2897 vdev_stat_t *vs = &vd->vdev_stat;
2898 int c;
2899
2900 for (c = 0; c < vd->vdev_children; c++)
2901 vdev_scan_stat_init(vd->vdev_child[c]);
2902
2903 mutex_enter(&vd->vdev_stat_lock);
2904 vs->vs_scan_processed = 0;
2905 mutex_exit(&vd->vdev_stat_lock);
2906 }
2907
2908 void
2909 vdev_stat_update(zio_t *zio, uint64_t psize)
2910 {
2911 spa_t *spa = zio->io_spa;
2912 vdev_t *rvd = spa->spa_root_vdev;
2913 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
2914 vdev_t *pvd;
2915 uint64_t txg = zio->io_txg;
2916 vdev_stat_t *vs = &vd->vdev_stat;
2917 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
2918 zio_type_t type = zio->io_type;
2919 int flags = zio->io_flags;
2920
2921 /*
2922 * If this i/o is a gang leader, it didn't do any actual work.
2923 */
2924 if (zio->io_gang_tree)
2925 return;
2926
2927 if (zio->io_error == 0) {
2928 /*
2929 * If this is a root i/o, don't count it -- we've already
2930 * counted the top-level vdevs, and vdev_get_stats() will
2931 * aggregate them when asked. This reduces contention on
2932 * the root vdev_stat_lock and implicitly handles blocks
2933 * that compress away to holes, for which there is no i/o.
2934 * (Holes never create vdev children, so all the counters
2935 * remain zero, which is what we want.)
2936 *
2937 * Note: this only applies to successful i/o (io_error == 0)
2938 * because unlike i/o counts, errors are not additive.
2939 * When reading a ditto block, for example, failure of
2940 * one top-level vdev does not imply a root-level error.
2941 */
2942 if (vd == rvd)
2943 return;
2944
2945 ASSERT(vd == zio->io_vd);
2946
2947 if (flags & ZIO_FLAG_IO_BYPASS)
2948 return;
2949
2950 mutex_enter(&vd->vdev_stat_lock);
2951
2952 if (flags & ZIO_FLAG_IO_REPAIR) {
2953 if (flags & ZIO_FLAG_SCAN_THREAD) {
2954 dsl_scan_phys_t *scn_phys =
2955 &spa->spa_dsl_pool->dp_scan->scn_phys;
2956 uint64_t *processed = &scn_phys->scn_processed;
2957
2958 /* XXX cleanup? */
2959 if (vd->vdev_ops->vdev_op_leaf)
2960 atomic_add_64(processed, psize);
2961 vs->vs_scan_processed += psize;
2962 }
2963
2964 if (flags & ZIO_FLAG_SELF_HEAL)
2965 vs->vs_self_healed += psize;
2966 }
2967
2968 /*
2969 * The bytes/ops/histograms are recorded at the leaf level and
2970 * aggregated into the higher level vdevs in vdev_get_stats().
2971 */
2972 if (vd->vdev_ops->vdev_op_leaf) {
2973
2974 vs->vs_ops[type]++;
2975 vs->vs_bytes[type] += psize;
2976
2977 if (zio->io_delta && zio->io_delay) {
2978 vsx->vsx_queue_histo[zio->io_priority]
2979 [HISTO(zio->io_delta - zio->io_delay)]++;
2980 vsx->vsx_disk_histo[type]
2981 [HISTO(zio->io_delay)]++;
2982 vsx->vsx_total_histo[type]
2983 [HISTO(zio->io_delta)]++;
2984 }
2985 }
2986
2987 mutex_exit(&vd->vdev_stat_lock);
2988 return;
2989 }
2990
2991 if (flags & ZIO_FLAG_SPECULATIVE)
2992 return;
2993
2994 /*
2995 * If this is an I/O error that is going to be retried, then ignore the
2996 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
2997 * hard errors, when in reality they can happen for any number of
2998 * innocuous reasons (bus resets, MPxIO link failure, etc).
2999 */
3000 if (zio->io_error == EIO &&
3001 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
3002 return;
3003
3004 /*
3005 * Intent logs writes won't propagate their error to the root
3006 * I/O so don't mark these types of failures as pool-level
3007 * errors.
3008 */
3009 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
3010 return;
3011
3012 mutex_enter(&vd->vdev_stat_lock);
3013 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
3014 if (zio->io_error == ECKSUM)
3015 vs->vs_checksum_errors++;
3016 else
3017 vs->vs_read_errors++;
3018 }
3019 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
3020 vs->vs_write_errors++;
3021 mutex_exit(&vd->vdev_stat_lock);
3022
3023 if (type == ZIO_TYPE_WRITE && txg != 0 &&
3024 (!(flags & ZIO_FLAG_IO_REPAIR) ||
3025 (flags & ZIO_FLAG_SCAN_THREAD) ||
3026 spa->spa_claiming)) {
3027 /*
3028 * This is either a normal write (not a repair), or it's
3029 * a repair induced by the scrub thread, or it's a repair
3030 * made by zil_claim() during spa_load() in the first txg.
3031 * In the normal case, we commit the DTL change in the same
3032 * txg as the block was born. In the scrub-induced repair
3033 * case, we know that scrubs run in first-pass syncing context,
3034 * so we commit the DTL change in spa_syncing_txg(spa).
3035 * In the zil_claim() case, we commit in spa_first_txg(spa).
3036 *
3037 * We currently do not make DTL entries for failed spontaneous
3038 * self-healing writes triggered by normal (non-scrubbing)
3039 * reads, because we have no transactional context in which to
3040 * do so -- and it's not clear that it'd be desirable anyway.
3041 */
3042 if (vd->vdev_ops->vdev_op_leaf) {
3043 uint64_t commit_txg = txg;
3044 if (flags & ZIO_FLAG_SCAN_THREAD) {
3045 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3046 ASSERT(spa_sync_pass(spa) == 1);
3047 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
3048 commit_txg = spa_syncing_txg(spa);
3049 } else if (spa->spa_claiming) {
3050 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3051 commit_txg = spa_first_txg(spa);
3052 }
3053 ASSERT(commit_txg >= spa_syncing_txg(spa));
3054 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
3055 return;
3056 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3057 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
3058 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
3059 }
3060 if (vd != rvd)
3061 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
3062 }
3063 }
3064
3065 /*
3066 * Update the in-core space usage stats for this vdev, its metaslab class,
3067 * and the root vdev.
3068 */
3069 void
3070 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
3071 int64_t space_delta)
3072 {
3073 int64_t dspace_delta = space_delta;
3074 spa_t *spa = vd->vdev_spa;
3075 vdev_t *rvd = spa->spa_root_vdev;
3076 metaslab_group_t *mg = vd->vdev_mg;
3077 metaslab_class_t *mc = mg ? mg->mg_class : NULL;
3078
3079 ASSERT(vd == vd->vdev_top);
3080
3081 /*
3082 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
3083 * factor. We must calculate this here and not at the root vdev
3084 * because the root vdev's psize-to-asize is simply the max of its
3085 * childrens', thus not accurate enough for us.
3086 */
3087 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
3088 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
3089 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
3090 vd->vdev_deflate_ratio;
3091
3092 mutex_enter(&vd->vdev_stat_lock);
3093 vd->vdev_stat.vs_alloc += alloc_delta;
3094 vd->vdev_stat.vs_space += space_delta;
3095 vd->vdev_stat.vs_dspace += dspace_delta;
3096 mutex_exit(&vd->vdev_stat_lock);
3097
3098 if (mc == spa_normal_class(spa)) {
3099 mutex_enter(&rvd->vdev_stat_lock);
3100 rvd->vdev_stat.vs_alloc += alloc_delta;
3101 rvd->vdev_stat.vs_space += space_delta;
3102 rvd->vdev_stat.vs_dspace += dspace_delta;
3103 mutex_exit(&rvd->vdev_stat_lock);
3104 }
3105
3106 if (mc != NULL) {
3107 ASSERT(rvd == vd->vdev_parent);
3108 ASSERT(vd->vdev_ms_count != 0);
3109
3110 metaslab_class_space_update(mc,
3111 alloc_delta, defer_delta, space_delta, dspace_delta);
3112 }
3113 }
3114
3115 /*
3116 * Mark a top-level vdev's config as dirty, placing it on the dirty list
3117 * so that it will be written out next time the vdev configuration is synced.
3118 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
3119 */
3120 void
3121 vdev_config_dirty(vdev_t *vd)
3122 {
3123 spa_t *spa = vd->vdev_spa;
3124 vdev_t *rvd = spa->spa_root_vdev;
3125 int c;
3126
3127 ASSERT(spa_writeable(spa));
3128
3129 /*
3130 * If this is an aux vdev (as with l2cache and spare devices), then we
3131 * update the vdev config manually and set the sync flag.
3132 */
3133 if (vd->vdev_aux != NULL) {
3134 spa_aux_vdev_t *sav = vd->vdev_aux;
3135 nvlist_t **aux;
3136 uint_t naux;
3137
3138 for (c = 0; c < sav->sav_count; c++) {
3139 if (sav->sav_vdevs[c] == vd)
3140 break;
3141 }
3142
3143 if (c == sav->sav_count) {
3144 /*
3145 * We're being removed. There's nothing more to do.
3146 */
3147 ASSERT(sav->sav_sync == B_TRUE);
3148 return;
3149 }
3150
3151 sav->sav_sync = B_TRUE;
3152
3153 if (nvlist_lookup_nvlist_array(sav->sav_config,
3154 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
3155 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
3156 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
3157 }
3158
3159 ASSERT(c < naux);
3160
3161 /*
3162 * Setting the nvlist in the middle if the array is a little
3163 * sketchy, but it will work.
3164 */
3165 nvlist_free(aux[c]);
3166 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
3167
3168 return;
3169 }
3170
3171 /*
3172 * The dirty list is protected by the SCL_CONFIG lock. The caller
3173 * must either hold SCL_CONFIG as writer, or must be the sync thread
3174 * (which holds SCL_CONFIG as reader). There's only one sync thread,
3175 * so this is sufficient to ensure mutual exclusion.
3176 */
3177 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3178 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3179 spa_config_held(spa, SCL_CONFIG, RW_READER)));
3180
3181 if (vd == rvd) {
3182 for (c = 0; c < rvd->vdev_children; c++)
3183 vdev_config_dirty(rvd->vdev_child[c]);
3184 } else {
3185 ASSERT(vd == vd->vdev_top);
3186
3187 if (!list_link_active(&vd->vdev_config_dirty_node) &&
3188 !vd->vdev_ishole)
3189 list_insert_head(&spa->spa_config_dirty_list, vd);
3190 }
3191 }
3192
3193 void
3194 vdev_config_clean(vdev_t *vd)
3195 {
3196 spa_t *spa = vd->vdev_spa;
3197
3198 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3199 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3200 spa_config_held(spa, SCL_CONFIG, RW_READER)));
3201
3202 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
3203 list_remove(&spa->spa_config_dirty_list, vd);
3204 }
3205
3206 /*
3207 * Mark a top-level vdev's state as dirty, so that the next pass of
3208 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
3209 * the state changes from larger config changes because they require
3210 * much less locking, and are often needed for administrative actions.
3211 */
3212 void
3213 vdev_state_dirty(vdev_t *vd)
3214 {
3215 spa_t *spa = vd->vdev_spa;
3216
3217 ASSERT(spa_writeable(spa));
3218 ASSERT(vd == vd->vdev_top);
3219
3220 /*
3221 * The state list is protected by the SCL_STATE lock. The caller
3222 * must either hold SCL_STATE as writer, or must be the sync thread
3223 * (which holds SCL_STATE as reader). There's only one sync thread,
3224 * so this is sufficient to ensure mutual exclusion.
3225 */
3226 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3227 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3228 spa_config_held(spa, SCL_STATE, RW_READER)));
3229
3230 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
3231 list_insert_head(&spa->spa_state_dirty_list, vd);
3232 }
3233
3234 void
3235 vdev_state_clean(vdev_t *vd)
3236 {
3237 spa_t *spa = vd->vdev_spa;
3238
3239 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3240 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3241 spa_config_held(spa, SCL_STATE, RW_READER)));
3242
3243 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
3244 list_remove(&spa->spa_state_dirty_list, vd);
3245 }
3246
3247 /*
3248 * Propagate vdev state up from children to parent.
3249 */
3250 void
3251 vdev_propagate_state(vdev_t *vd)
3252 {
3253 spa_t *spa = vd->vdev_spa;
3254 vdev_t *rvd = spa->spa_root_vdev;
3255 int degraded = 0, faulted = 0;
3256 int corrupted = 0;
3257 vdev_t *child;
3258 int c;
3259
3260 if (vd->vdev_children > 0) {
3261 for (c = 0; c < vd->vdev_children; c++) {
3262 child = vd->vdev_child[c];
3263
3264 /*
3265 * Don't factor holes into the decision.
3266 */
3267 if (child->vdev_ishole)
3268 continue;
3269
3270 if (!vdev_readable(child) ||
3271 (!vdev_writeable(child) && spa_writeable(spa))) {
3272 /*
3273 * Root special: if there is a top-level log
3274 * device, treat the root vdev as if it were
3275 * degraded.
3276 */
3277 if (child->vdev_islog && vd == rvd)
3278 degraded++;
3279 else
3280 faulted++;
3281 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
3282 degraded++;
3283 }
3284
3285 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
3286 corrupted++;
3287 }
3288
3289 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
3290
3291 /*
3292 * Root special: if there is a top-level vdev that cannot be
3293 * opened due to corrupted metadata, then propagate the root
3294 * vdev's aux state as 'corrupt' rather than 'insufficient
3295 * replicas'.
3296 */
3297 if (corrupted && vd == rvd &&
3298 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
3299 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
3300 VDEV_AUX_CORRUPT_DATA);
3301 }
3302
3303 if (vd->vdev_parent)
3304 vdev_propagate_state(vd->vdev_parent);
3305 }
3306
3307 /*
3308 * Set a vdev's state. If this is during an open, we don't update the parent
3309 * state, because we're in the process of opening children depth-first.
3310 * Otherwise, we propagate the change to the parent.
3311 *
3312 * If this routine places a device in a faulted state, an appropriate ereport is
3313 * generated.
3314 */
3315 void
3316 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
3317 {
3318 uint64_t save_state;
3319 spa_t *spa = vd->vdev_spa;
3320
3321 if (state == vd->vdev_state) {
3322 vd->vdev_stat.vs_aux = aux;
3323 return;
3324 }
3325
3326 save_state = vd->vdev_state;
3327
3328 vd->vdev_state = state;
3329 vd->vdev_stat.vs_aux = aux;
3330
3331 /*
3332 * If we are setting the vdev state to anything but an open state, then
3333 * always close the underlying device unless the device has requested
3334 * a delayed close (i.e. we're about to remove or fault the device).
3335 * Otherwise, we keep accessible but invalid devices open forever.
3336 * We don't call vdev_close() itself, because that implies some extra
3337 * checks (offline, etc) that we don't want here. This is limited to
3338 * leaf devices, because otherwise closing the device will affect other
3339 * children.
3340 */
3341 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
3342 vd->vdev_ops->vdev_op_leaf)
3343 vd->vdev_ops->vdev_op_close(vd);
3344
3345 /*
3346 * If we have brought this vdev back into service, we need
3347 * to notify fmd so that it can gracefully repair any outstanding
3348 * cases due to a missing device. We do this in all cases, even those
3349 * that probably don't correlate to a repaired fault. This is sure to
3350 * catch all cases, and we let the zfs-retire agent sort it out. If
3351 * this is a transient state it's OK, as the retire agent will
3352 * double-check the state of the vdev before repairing it.
3353 */
3354 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
3355 vd->vdev_prevstate != state)
3356 zfs_post_state_change(spa, vd);
3357
3358 if (vd->vdev_removed &&
3359 state == VDEV_STATE_CANT_OPEN &&
3360 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
3361 /*
3362 * If the previous state is set to VDEV_STATE_REMOVED, then this
3363 * device was previously marked removed and someone attempted to
3364 * reopen it. If this failed due to a nonexistent device, then
3365 * keep the device in the REMOVED state. We also let this be if
3366 * it is one of our special test online cases, which is only
3367 * attempting to online the device and shouldn't generate an FMA
3368 * fault.
3369 */
3370 vd->vdev_state = VDEV_STATE_REMOVED;
3371 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
3372 } else if (state == VDEV_STATE_REMOVED) {
3373 vd->vdev_removed = B_TRUE;
3374 } else if (state == VDEV_STATE_CANT_OPEN) {
3375 /*
3376 * If we fail to open a vdev during an import or recovery, we
3377 * mark it as "not available", which signifies that it was
3378 * never there to begin with. Failure to open such a device
3379 * is not considered an error.
3380 */
3381 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
3382 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
3383 vd->vdev_ops->vdev_op_leaf)
3384 vd->vdev_not_present = 1;
3385
3386 /*
3387 * Post the appropriate ereport. If the 'prevstate' field is
3388 * set to something other than VDEV_STATE_UNKNOWN, it indicates
3389 * that this is part of a vdev_reopen(). In this case, we don't
3390 * want to post the ereport if the device was already in the
3391 * CANT_OPEN state beforehand.
3392 *
3393 * If the 'checkremove' flag is set, then this is an attempt to
3394 * online the device in response to an insertion event. If we
3395 * hit this case, then we have detected an insertion event for a
3396 * faulted or offline device that wasn't in the removed state.
3397 * In this scenario, we don't post an ereport because we are
3398 * about to replace the device, or attempt an online with
3399 * vdev_forcefault, which will generate the fault for us.
3400 */
3401 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
3402 !vd->vdev_not_present && !vd->vdev_checkremove &&
3403 vd != spa->spa_root_vdev) {
3404 const char *class;
3405
3406 switch (aux) {
3407 case VDEV_AUX_OPEN_FAILED:
3408 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
3409 break;
3410 case VDEV_AUX_CORRUPT_DATA:
3411 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
3412 break;
3413 case VDEV_AUX_NO_REPLICAS:
3414 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
3415 break;
3416 case VDEV_AUX_BAD_GUID_SUM:
3417 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
3418 break;
3419 case VDEV_AUX_TOO_SMALL:
3420 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
3421 break;
3422 case VDEV_AUX_BAD_LABEL:
3423 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
3424 break;
3425 default:
3426 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
3427 }
3428
3429 zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
3430 }
3431
3432 /* Erase any notion of persistent removed state */
3433 vd->vdev_removed = B_FALSE;
3434 } else {
3435 vd->vdev_removed = B_FALSE;
3436 }
3437
3438 if (!isopen && vd->vdev_parent)
3439 vdev_propagate_state(vd->vdev_parent);
3440 }
3441
3442 /*
3443 * Check the vdev configuration to ensure that it's capable of supporting
3444 * a root pool.
3445 */
3446 boolean_t
3447 vdev_is_bootable(vdev_t *vd)
3448 {
3449 #if defined(__sun__) || defined(__sun)
3450 /*
3451 * Currently, we do not support RAID-Z or partial configuration.
3452 * In addition, only a single top-level vdev is allowed and none of the
3453 * leaves can be wholedisks.
3454 */
3455 int c;
3456
3457 if (!vd->vdev_ops->vdev_op_leaf) {
3458 char *vdev_type = vd->vdev_ops->vdev_op_type;
3459
3460 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
3461 vd->vdev_children > 1) {
3462 return (B_FALSE);
3463 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
3464 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
3465 return (B_FALSE);
3466 }
3467 } else if (vd->vdev_wholedisk == 1) {
3468 return (B_FALSE);
3469 }
3470
3471 for (c = 0; c < vd->vdev_children; c++) {
3472 if (!vdev_is_bootable(vd->vdev_child[c]))
3473 return (B_FALSE);
3474 }
3475 #endif /* __sun__ || __sun */
3476 return (B_TRUE);
3477 }
3478
3479 /*
3480 * Load the state from the original vdev tree (ovd) which
3481 * we've retrieved from the MOS config object. If the original
3482 * vdev was offline or faulted then we transfer that state to the
3483 * device in the current vdev tree (nvd).
3484 */
3485 void
3486 vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
3487 {
3488 int c;
3489
3490 ASSERT(nvd->vdev_top->vdev_islog);
3491 ASSERT(spa_config_held(nvd->vdev_spa,
3492 SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3493 ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
3494
3495 for (c = 0; c < nvd->vdev_children; c++)
3496 vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
3497
3498 if (nvd->vdev_ops->vdev_op_leaf) {
3499 /*
3500 * Restore the persistent vdev state
3501 */
3502 nvd->vdev_offline = ovd->vdev_offline;
3503 nvd->vdev_faulted = ovd->vdev_faulted;
3504 nvd->vdev_degraded = ovd->vdev_degraded;
3505 nvd->vdev_removed = ovd->vdev_removed;
3506 }
3507 }
3508
3509 /*
3510 * Determine if a log device has valid content. If the vdev was
3511 * removed or faulted in the MOS config then we know that
3512 * the content on the log device has already been written to the pool.
3513 */
3514 boolean_t
3515 vdev_log_state_valid(vdev_t *vd)
3516 {
3517 int c;
3518
3519 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
3520 !vd->vdev_removed)
3521 return (B_TRUE);
3522
3523 for (c = 0; c < vd->vdev_children; c++)
3524 if (vdev_log_state_valid(vd->vdev_child[c]))
3525 return (B_TRUE);
3526
3527 return (B_FALSE);
3528 }
3529
3530 /*
3531 * Expand a vdev if possible.
3532 */
3533 void
3534 vdev_expand(vdev_t *vd, uint64_t txg)
3535 {
3536 ASSERT(vd->vdev_top == vd);
3537 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3538
3539 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
3540 VERIFY(vdev_metaslab_init(vd, txg) == 0);
3541 vdev_config_dirty(vd);
3542 }
3543 }
3544
3545 /*
3546 * Split a vdev.
3547 */
3548 void
3549 vdev_split(vdev_t *vd)
3550 {
3551 vdev_t *cvd, *pvd = vd->vdev_parent;
3552
3553 vdev_remove_child(pvd, vd);
3554 vdev_compact_children(pvd);
3555
3556 cvd = pvd->vdev_child[0];
3557 if (pvd->vdev_children == 1) {
3558 vdev_remove_parent(cvd);
3559 cvd->vdev_splitting = B_TRUE;
3560 }
3561 vdev_propagate_state(cvd);
3562 }
3563
3564 void
3565 vdev_deadman(vdev_t *vd)
3566 {
3567 int c;
3568
3569 for (c = 0; c < vd->vdev_children; c++) {
3570 vdev_t *cvd = vd->vdev_child[c];
3571
3572 vdev_deadman(cvd);
3573 }
3574
3575 if (vd->vdev_ops->vdev_op_leaf) {
3576 vdev_queue_t *vq = &vd->vdev_queue;
3577
3578 mutex_enter(&vq->vq_lock);
3579 if (avl_numnodes(&vq->vq_active_tree) > 0) {
3580 spa_t *spa = vd->vdev_spa;
3581 zio_t *fio;
3582 uint64_t delta;
3583
3584 /*
3585 * Look at the head of all the pending queues,
3586 * if any I/O has been outstanding for longer than
3587 * the spa_deadman_synctime we log a zevent.
3588 */
3589 fio = avl_first(&vq->vq_active_tree);
3590 delta = gethrtime() - fio->io_timestamp;
3591 if (delta > spa_deadman_synctime(spa)) {
3592 zfs_dbgmsg("SLOW IO: zio timestamp %lluns, "
3593 "delta %lluns, last io %lluns",
3594 fio->io_timestamp, delta,
3595 vq->vq_io_complete_ts);
3596 zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
3597 spa, vd, fio, 0, 0);
3598 }
3599 }
3600 mutex_exit(&vq->vq_lock);
3601 }
3602 }
3603
3604 #if defined(_KERNEL) && defined(HAVE_SPL)
3605 EXPORT_SYMBOL(vdev_fault);
3606 EXPORT_SYMBOL(vdev_degrade);
3607 EXPORT_SYMBOL(vdev_online);
3608 EXPORT_SYMBOL(vdev_offline);
3609 EXPORT_SYMBOL(vdev_clear);
3610
3611 module_param(metaslabs_per_vdev, int, 0644);
3612 MODULE_PARM_DESC(metaslabs_per_vdev,
3613 "Divide added vdev into approximately (but no more than) this number "
3614 "of metaslabs");
3615 #endif