]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/vdev.c
Rebase master to b108
[mirror_zfs.git] / module / zfs / vdev.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
d164b209 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
24 * Use is subject to license terms.
25 */
26
34dc7c2f
BB
27#include <sys/zfs_context.h>
28#include <sys/fm/fs/zfs.h>
29#include <sys/spa.h>
30#include <sys/spa_impl.h>
31#include <sys/dmu.h>
32#include <sys/dmu_tx.h>
33#include <sys/vdev_impl.h>
34#include <sys/uberblock_impl.h>
35#include <sys/metaslab.h>
36#include <sys/metaslab_impl.h>
37#include <sys/space_map.h>
38#include <sys/zio.h>
39#include <sys/zap.h>
40#include <sys/fs/zfs.h>
b128c09f 41#include <sys/arc.h>
34dc7c2f
BB
42
43/*
44 * Virtual device management.
45 */
46
47static vdev_ops_t *vdev_ops_table[] = {
48 &vdev_root_ops,
49 &vdev_raidz_ops,
50 &vdev_mirror_ops,
51 &vdev_replacing_ops,
52 &vdev_spare_ops,
53 &vdev_disk_ops,
54 &vdev_file_ops,
55 &vdev_missing_ops,
56 NULL
57};
58
b128c09f
BB
59/* maximum scrub/resilver I/O queue per leaf vdev */
60int zfs_scrub_limit = 10;
34dc7c2f
BB
61
62/*
63 * Given a vdev type, return the appropriate ops vector.
64 */
65static vdev_ops_t *
66vdev_getops(const char *type)
67{
68 vdev_ops_t *ops, **opspp;
69
70 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
71 if (strcmp(ops->vdev_op_type, type) == 0)
72 break;
73
74 return (ops);
75}
76
77/*
78 * Default asize function: return the MAX of psize with the asize of
79 * all children. This is what's used by anything other than RAID-Z.
80 */
81uint64_t
82vdev_default_asize(vdev_t *vd, uint64_t psize)
83{
84 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
85 uint64_t csize;
86 uint64_t c;
87
88 for (c = 0; c < vd->vdev_children; c++) {
89 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
90 asize = MAX(asize, csize);
91 }
92
93 return (asize);
94}
95
96/*
97 * Get the replaceable or attachable device size.
98 * If the parent is a mirror or raidz, the replaceable size is the minimum
99 * psize of all its children. For the rest, just return our own psize.
100 *
101 * e.g.
102 * psize rsize
103 * root - -
104 * mirror/raidz - -
105 * disk1 20g 20g
106 * disk2 40g 20g
107 * disk3 80g 80g
108 */
109uint64_t
110vdev_get_rsize(vdev_t *vd)
111{
112 vdev_t *pvd, *cvd;
113 uint64_t c, rsize;
114
115 pvd = vd->vdev_parent;
116
117 /*
118 * If our parent is NULL or the root, just return our own psize.
119 */
120 if (pvd == NULL || pvd->vdev_parent == NULL)
121 return (vd->vdev_psize);
122
123 rsize = 0;
124
125 for (c = 0; c < pvd->vdev_children; c++) {
126 cvd = pvd->vdev_child[c];
127 rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1;
128 }
129
130 return (rsize);
131}
132
133vdev_t *
134vdev_lookup_top(spa_t *spa, uint64_t vdev)
135{
136 vdev_t *rvd = spa->spa_root_vdev;
137
b128c09f 138 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
34dc7c2f 139
b128c09f
BB
140 if (vdev < rvd->vdev_children) {
141 ASSERT(rvd->vdev_child[vdev] != NULL);
34dc7c2f 142 return (rvd->vdev_child[vdev]);
b128c09f 143 }
34dc7c2f
BB
144
145 return (NULL);
146}
147
148vdev_t *
149vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
150{
151 int c;
152 vdev_t *mvd;
153
154 if (vd->vdev_guid == guid)
155 return (vd);
156
157 for (c = 0; c < vd->vdev_children; c++)
158 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
159 NULL)
160 return (mvd);
161
162 return (NULL);
163}
164
165void
166vdev_add_child(vdev_t *pvd, vdev_t *cvd)
167{
168 size_t oldsize, newsize;
169 uint64_t id = cvd->vdev_id;
170 vdev_t **newchild;
171
b128c09f 172 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
173 ASSERT(cvd->vdev_parent == NULL);
174
175 cvd->vdev_parent = pvd;
176
177 if (pvd == NULL)
178 return;
179
180 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
181
182 oldsize = pvd->vdev_children * sizeof (vdev_t *);
183 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
184 newsize = pvd->vdev_children * sizeof (vdev_t *);
185
186 newchild = kmem_zalloc(newsize, KM_SLEEP);
187 if (pvd->vdev_child != NULL) {
188 bcopy(pvd->vdev_child, newchild, oldsize);
189 kmem_free(pvd->vdev_child, oldsize);
190 }
191
192 pvd->vdev_child = newchild;
193 pvd->vdev_child[id] = cvd;
194
195 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
196 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
197
198 /*
199 * Walk up all ancestors to update guid sum.
200 */
201 for (; pvd != NULL; pvd = pvd->vdev_parent)
202 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
203
204 if (cvd->vdev_ops->vdev_op_leaf)
205 cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit;
206}
207
208void
209vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
210{
211 int c;
212 uint_t id = cvd->vdev_id;
213
214 ASSERT(cvd->vdev_parent == pvd);
215
216 if (pvd == NULL)
217 return;
218
219 ASSERT(id < pvd->vdev_children);
220 ASSERT(pvd->vdev_child[id] == cvd);
221
222 pvd->vdev_child[id] = NULL;
223 cvd->vdev_parent = NULL;
224
225 for (c = 0; c < pvd->vdev_children; c++)
226 if (pvd->vdev_child[c])
227 break;
228
229 if (c == pvd->vdev_children) {
230 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
231 pvd->vdev_child = NULL;
232 pvd->vdev_children = 0;
233 }
234
235 /*
236 * Walk up all ancestors to update guid sum.
237 */
238 for (; pvd != NULL; pvd = pvd->vdev_parent)
239 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
240
241 if (cvd->vdev_ops->vdev_op_leaf)
242 cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit;
243}
244
245/*
246 * Remove any holes in the child array.
247 */
248void
249vdev_compact_children(vdev_t *pvd)
250{
251 vdev_t **newchild, *cvd;
252 int oldc = pvd->vdev_children;
253 int newc, c;
254
b128c09f 255 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
256
257 for (c = newc = 0; c < oldc; c++)
258 if (pvd->vdev_child[c])
259 newc++;
260
261 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
262
263 for (c = newc = 0; c < oldc; c++) {
264 if ((cvd = pvd->vdev_child[c]) != NULL) {
265 newchild[newc] = cvd;
266 cvd->vdev_id = newc++;
267 }
268 }
269
270 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
271 pvd->vdev_child = newchild;
272 pvd->vdev_children = newc;
273}
274
275/*
276 * Allocate and minimally initialize a vdev_t.
277 */
278static vdev_t *
279vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
280{
281 vdev_t *vd;
282
283 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
284
285 if (spa->spa_root_vdev == NULL) {
286 ASSERT(ops == &vdev_root_ops);
287 spa->spa_root_vdev = vd;
288 }
289
290 if (guid == 0) {
291 if (spa->spa_root_vdev == vd) {
292 /*
293 * The root vdev's guid will also be the pool guid,
294 * which must be unique among all pools.
295 */
296 while (guid == 0 || spa_guid_exists(guid, 0))
297 guid = spa_get_random(-1ULL);
298 } else {
299 /*
300 * Any other vdev's guid must be unique within the pool.
301 */
302 while (guid == 0 ||
303 spa_guid_exists(spa_guid(spa), guid))
304 guid = spa_get_random(-1ULL);
305 }
306 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
307 }
308
309 vd->vdev_spa = spa;
310 vd->vdev_id = id;
311 vd->vdev_guid = guid;
312 vd->vdev_guid_sum = guid;
313 vd->vdev_ops = ops;
314 vd->vdev_state = VDEV_STATE_CLOSED;
315
316 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
317 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
b128c09f 318 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
fb5f0bc8
BB
319 for (int t = 0; t < DTL_TYPES; t++) {
320 space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0,
321 &vd->vdev_dtl_lock);
322 }
34dc7c2f
BB
323 txg_list_create(&vd->vdev_ms_list,
324 offsetof(struct metaslab, ms_txg_node));
325 txg_list_create(&vd->vdev_dtl_list,
326 offsetof(struct vdev, vdev_dtl_node));
327 vd->vdev_stat.vs_timestamp = gethrtime();
328 vdev_queue_init(vd);
329 vdev_cache_init(vd);
330
331 return (vd);
332}
333
334/*
335 * Allocate a new vdev. The 'alloctype' is used to control whether we are
336 * creating a new vdev or loading an existing one - the behavior is slightly
337 * different for each case.
338 */
339int
340vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
341 int alloctype)
342{
343 vdev_ops_t *ops;
344 char *type;
345 uint64_t guid = 0, islog, nparity;
346 vdev_t *vd;
347
b128c09f 348 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
349
350 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
351 return (EINVAL);
352
353 if ((ops = vdev_getops(type)) == NULL)
354 return (EINVAL);
355
356 /*
357 * If this is a load, get the vdev guid from the nvlist.
358 * Otherwise, vdev_alloc_common() will generate one for us.
359 */
360 if (alloctype == VDEV_ALLOC_LOAD) {
361 uint64_t label_id;
362
363 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
364 label_id != id)
365 return (EINVAL);
366
367 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
368 return (EINVAL);
369 } else if (alloctype == VDEV_ALLOC_SPARE) {
370 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
371 return (EINVAL);
372 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
373 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
374 return (EINVAL);
375 }
376
377 /*
378 * The first allocated vdev must be of type 'root'.
379 */
380 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
381 return (EINVAL);
382
383 /*
384 * Determine whether we're a log vdev.
385 */
386 islog = 0;
387 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
388 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
389 return (ENOTSUP);
390
391 /*
392 * Set the nparity property for RAID-Z vdevs.
393 */
394 nparity = -1ULL;
395 if (ops == &vdev_raidz_ops) {
396 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
397 &nparity) == 0) {
398 /*
399 * Currently, we can only support 2 parity devices.
400 */
401 if (nparity == 0 || nparity > 2)
402 return (EINVAL);
403 /*
404 * Older versions can only support 1 parity device.
405 */
406 if (nparity == 2 &&
407 spa_version(spa) < SPA_VERSION_RAID6)
408 return (ENOTSUP);
409 } else {
410 /*
411 * We require the parity to be specified for SPAs that
412 * support multiple parity levels.
413 */
414 if (spa_version(spa) >= SPA_VERSION_RAID6)
415 return (EINVAL);
416 /*
417 * Otherwise, we default to 1 parity device for RAID-Z.
418 */
419 nparity = 1;
420 }
421 } else {
422 nparity = 0;
423 }
424 ASSERT(nparity != -1ULL);
425
426 vd = vdev_alloc_common(spa, id, guid, ops);
427
428 vd->vdev_islog = islog;
429 vd->vdev_nparity = nparity;
430
431 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
432 vd->vdev_path = spa_strdup(vd->vdev_path);
433 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
434 vd->vdev_devid = spa_strdup(vd->vdev_devid);
435 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
436 &vd->vdev_physpath) == 0)
437 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
438
439 /*
440 * Set the whole_disk property. If it's not specified, leave the value
441 * as -1.
442 */
443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
444 &vd->vdev_wholedisk) != 0)
445 vd->vdev_wholedisk = -1ULL;
446
447 /*
448 * Look for the 'not present' flag. This will only be set if the device
449 * was not present at the time of import.
450 */
b128c09f
BB
451 if (!spa->spa_import_faulted)
452 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
453 &vd->vdev_not_present);
34dc7c2f
BB
454
455 /*
456 * Get the alignment requirement.
457 */
458 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
459
460 /*
461 * If we're a top-level vdev, try to load the allocation parameters.
462 */
463 if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) {
464 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
465 &vd->vdev_ms_array);
466 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
467 &vd->vdev_ms_shift);
468 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
469 &vd->vdev_asize);
470 }
471
472 /*
473 * If we're a leaf vdev, try to load the DTL object and other state.
474 */
b128c09f
BB
475 if (vd->vdev_ops->vdev_op_leaf &&
476 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE)) {
477 if (alloctype == VDEV_ALLOC_LOAD) {
478 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
fb5f0bc8 479 &vd->vdev_dtl_smo.smo_object);
b128c09f
BB
480 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
481 &vd->vdev_unspare);
482 }
34dc7c2f
BB
483 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
484 &vd->vdev_offline);
b128c09f 485
34dc7c2f
BB
486 /*
487 * When importing a pool, we want to ignore the persistent fault
488 * state, as the diagnosis made on another system may not be
489 * valid in the current context.
490 */
491 if (spa->spa_load_state == SPA_LOAD_OPEN) {
492 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
493 &vd->vdev_faulted);
494 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
495 &vd->vdev_degraded);
496 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
497 &vd->vdev_removed);
498 }
499 }
500
501 /*
502 * Add ourselves to the parent's list of children.
503 */
504 vdev_add_child(parent, vd);
505
506 *vdp = vd;
507
508 return (0);
509}
510
511void
512vdev_free(vdev_t *vd)
513{
514 int c;
515 spa_t *spa = vd->vdev_spa;
516
517 /*
518 * vdev_free() implies closing the vdev first. This is simpler than
519 * trying to ensure complicated semantics for all callers.
520 */
521 vdev_close(vd);
522
b128c09f 523 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
34dc7c2f
BB
524
525 /*
526 * Free all children.
527 */
528 for (c = 0; c < vd->vdev_children; c++)
529 vdev_free(vd->vdev_child[c]);
530
531 ASSERT(vd->vdev_child == NULL);
532 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
533
534 /*
535 * Discard allocation state.
536 */
537 if (vd == vd->vdev_top)
538 vdev_metaslab_fini(vd);
539
540 ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
541 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
542 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
543
544 /*
545 * Remove this vdev from its parent's child list.
546 */
547 vdev_remove_child(vd->vdev_parent, vd);
548
549 ASSERT(vd->vdev_parent == NULL);
550
551 /*
552 * Clean up vdev structure.
553 */
554 vdev_queue_fini(vd);
555 vdev_cache_fini(vd);
556
557 if (vd->vdev_path)
558 spa_strfree(vd->vdev_path);
559 if (vd->vdev_devid)
560 spa_strfree(vd->vdev_devid);
561 if (vd->vdev_physpath)
562 spa_strfree(vd->vdev_physpath);
563
564 if (vd->vdev_isspare)
565 spa_spare_remove(vd);
566 if (vd->vdev_isl2cache)
567 spa_l2cache_remove(vd);
568
569 txg_list_destroy(&vd->vdev_ms_list);
570 txg_list_destroy(&vd->vdev_dtl_list);
fb5f0bc8 571
34dc7c2f 572 mutex_enter(&vd->vdev_dtl_lock);
fb5f0bc8
BB
573 for (int t = 0; t < DTL_TYPES; t++) {
574 space_map_unload(&vd->vdev_dtl[t]);
575 space_map_destroy(&vd->vdev_dtl[t]);
576 }
34dc7c2f 577 mutex_exit(&vd->vdev_dtl_lock);
fb5f0bc8 578
34dc7c2f
BB
579 mutex_destroy(&vd->vdev_dtl_lock);
580 mutex_destroy(&vd->vdev_stat_lock);
b128c09f 581 mutex_destroy(&vd->vdev_probe_lock);
34dc7c2f
BB
582
583 if (vd == spa->spa_root_vdev)
584 spa->spa_root_vdev = NULL;
585
586 kmem_free(vd, sizeof (vdev_t));
587}
588
589/*
590 * Transfer top-level vdev state from svd to tvd.
591 */
592static void
593vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
594{
595 spa_t *spa = svd->vdev_spa;
596 metaslab_t *msp;
597 vdev_t *vd;
598 int t;
599
600 ASSERT(tvd == tvd->vdev_top);
601
602 tvd->vdev_ms_array = svd->vdev_ms_array;
603 tvd->vdev_ms_shift = svd->vdev_ms_shift;
604 tvd->vdev_ms_count = svd->vdev_ms_count;
605
606 svd->vdev_ms_array = 0;
607 svd->vdev_ms_shift = 0;
608 svd->vdev_ms_count = 0;
609
610 tvd->vdev_mg = svd->vdev_mg;
611 tvd->vdev_ms = svd->vdev_ms;
612
613 svd->vdev_mg = NULL;
614 svd->vdev_ms = NULL;
615
616 if (tvd->vdev_mg != NULL)
617 tvd->vdev_mg->mg_vd = tvd;
618
619 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
620 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
621 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
622
623 svd->vdev_stat.vs_alloc = 0;
624 svd->vdev_stat.vs_space = 0;
625 svd->vdev_stat.vs_dspace = 0;
626
627 for (t = 0; t < TXG_SIZE; t++) {
628 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
629 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
630 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
631 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
632 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
633 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
634 }
635
b128c09f 636 if (list_link_active(&svd->vdev_config_dirty_node)) {
34dc7c2f
BB
637 vdev_config_clean(svd);
638 vdev_config_dirty(tvd);
639 }
640
b128c09f
BB
641 if (list_link_active(&svd->vdev_state_dirty_node)) {
642 vdev_state_clean(svd);
643 vdev_state_dirty(tvd);
644 }
645
34dc7c2f
BB
646 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
647 svd->vdev_deflate_ratio = 0;
648
649 tvd->vdev_islog = svd->vdev_islog;
650 svd->vdev_islog = 0;
651}
652
653static void
654vdev_top_update(vdev_t *tvd, vdev_t *vd)
655{
656 int c;
657
658 if (vd == NULL)
659 return;
660
661 vd->vdev_top = tvd;
662
663 for (c = 0; c < vd->vdev_children; c++)
664 vdev_top_update(tvd, vd->vdev_child[c]);
665}
666
667/*
668 * Add a mirror/replacing vdev above an existing vdev.
669 */
670vdev_t *
671vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
672{
673 spa_t *spa = cvd->vdev_spa;
674 vdev_t *pvd = cvd->vdev_parent;
675 vdev_t *mvd;
676
b128c09f 677 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
678
679 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
680
681 mvd->vdev_asize = cvd->vdev_asize;
682 mvd->vdev_ashift = cvd->vdev_ashift;
683 mvd->vdev_state = cvd->vdev_state;
684
685 vdev_remove_child(pvd, cvd);
686 vdev_add_child(pvd, mvd);
687 cvd->vdev_id = mvd->vdev_children;
688 vdev_add_child(mvd, cvd);
689 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
690
691 if (mvd == mvd->vdev_top)
692 vdev_top_transfer(cvd, mvd);
693
694 return (mvd);
695}
696
697/*
698 * Remove a 1-way mirror/replacing vdev from the tree.
699 */
700void
701vdev_remove_parent(vdev_t *cvd)
702{
703 vdev_t *mvd = cvd->vdev_parent;
704 vdev_t *pvd = mvd->vdev_parent;
705
b128c09f 706 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
707
708 ASSERT(mvd->vdev_children == 1);
709 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
710 mvd->vdev_ops == &vdev_replacing_ops ||
711 mvd->vdev_ops == &vdev_spare_ops);
712 cvd->vdev_ashift = mvd->vdev_ashift;
713
714 vdev_remove_child(mvd, cvd);
715 vdev_remove_child(pvd, mvd);
fb5f0bc8 716
34dc7c2f 717 /*
b128c09f
BB
718 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
719 * Otherwise, we could have detached an offline device, and when we
720 * go to import the pool we'll think we have two top-level vdevs,
721 * instead of a different version of the same top-level vdev.
34dc7c2f 722 */
fb5f0bc8
BB
723 if (mvd->vdev_top == mvd) {
724 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
725 cvd->vdev_guid += guid_delta;
726 cvd->vdev_guid_sum += guid_delta;
727 }
b128c09f
BB
728 cvd->vdev_id = mvd->vdev_id;
729 vdev_add_child(pvd, cvd);
34dc7c2f
BB
730 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
731
732 if (cvd == cvd->vdev_top)
733 vdev_top_transfer(mvd, cvd);
734
735 ASSERT(mvd->vdev_children == 0);
736 vdev_free(mvd);
737}
738
739int
740vdev_metaslab_init(vdev_t *vd, uint64_t txg)
741{
742 spa_t *spa = vd->vdev_spa;
743 objset_t *mos = spa->spa_meta_objset;
744 metaslab_class_t *mc;
745 uint64_t m;
746 uint64_t oldc = vd->vdev_ms_count;
747 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
748 metaslab_t **mspp;
749 int error;
750
751 if (vd->vdev_ms_shift == 0) /* not being allocated from yet */
752 return (0);
753
34dc7c2f
BB
754 ASSERT(oldc <= newc);
755
756 if (vd->vdev_islog)
757 mc = spa->spa_log_class;
758 else
759 mc = spa->spa_normal_class;
760
761 if (vd->vdev_mg == NULL)
762 vd->vdev_mg = metaslab_group_create(mc, vd);
763
764 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
765
766 if (oldc != 0) {
767 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
768 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
769 }
770
771 vd->vdev_ms = mspp;
772 vd->vdev_ms_count = newc;
773
774 for (m = oldc; m < newc; m++) {
775 space_map_obj_t smo = { 0, 0, 0 };
776 if (txg == 0) {
777 uint64_t object = 0;
778 error = dmu_read(mos, vd->vdev_ms_array,
779 m * sizeof (uint64_t), sizeof (uint64_t), &object);
780 if (error)
781 return (error);
782 if (object != 0) {
783 dmu_buf_t *db;
784 error = dmu_bonus_hold(mos, object, FTAG, &db);
785 if (error)
786 return (error);
787 ASSERT3U(db->db_size, >=, sizeof (smo));
788 bcopy(db->db_data, &smo, sizeof (smo));
789 ASSERT3U(smo.smo_object, ==, object);
790 dmu_buf_rele(db, FTAG);
791 }
792 }
793 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo,
794 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg);
795 }
796
797 return (0);
798}
799
800void
801vdev_metaslab_fini(vdev_t *vd)
802{
803 uint64_t m;
804 uint64_t count = vd->vdev_ms_count;
805
806 if (vd->vdev_ms != NULL) {
807 for (m = 0; m < count; m++)
808 if (vd->vdev_ms[m] != NULL)
809 metaslab_fini(vd->vdev_ms[m]);
810 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
811 vd->vdev_ms = NULL;
812 }
813}
814
b128c09f
BB
815typedef struct vdev_probe_stats {
816 boolean_t vps_readable;
817 boolean_t vps_writeable;
818 int vps_flags;
b128c09f
BB
819} vdev_probe_stats_t;
820
821static void
822vdev_probe_done(zio_t *zio)
34dc7c2f 823{
fb5f0bc8 824 spa_t *spa = zio->io_spa;
d164b209 825 vdev_t *vd = zio->io_vd;
b128c09f 826 vdev_probe_stats_t *vps = zio->io_private;
d164b209
BB
827
828 ASSERT(vd->vdev_probe_zio != NULL);
b128c09f
BB
829
830 if (zio->io_type == ZIO_TYPE_READ) {
b128c09f
BB
831 if (zio->io_error == 0)
832 vps->vps_readable = 1;
fb5f0bc8 833 if (zio->io_error == 0 && spa_writeable(spa)) {
d164b209 834 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
b128c09f
BB
835 zio->io_offset, zio->io_size, zio->io_data,
836 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
837 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
838 } else {
839 zio_buf_free(zio->io_data, zio->io_size);
840 }
841 } else if (zio->io_type == ZIO_TYPE_WRITE) {
b128c09f
BB
842 if (zio->io_error == 0)
843 vps->vps_writeable = 1;
844 zio_buf_free(zio->io_data, zio->io_size);
845 } else if (zio->io_type == ZIO_TYPE_NULL) {
d164b209 846 zio_t *pio;
b128c09f
BB
847
848 vd->vdev_cant_read |= !vps->vps_readable;
849 vd->vdev_cant_write |= !vps->vps_writeable;
850
851 if (vdev_readable(vd) &&
fb5f0bc8 852 (vdev_writeable(vd) || !spa_writeable(spa))) {
b128c09f
BB
853 zio->io_error = 0;
854 } else {
855 ASSERT(zio->io_error != 0);
856 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
fb5f0bc8 857 spa, vd, NULL, 0, 0);
b128c09f
BB
858 zio->io_error = ENXIO;
859 }
d164b209
BB
860
861 mutex_enter(&vd->vdev_probe_lock);
862 ASSERT(vd->vdev_probe_zio == zio);
863 vd->vdev_probe_zio = NULL;
864 mutex_exit(&vd->vdev_probe_lock);
865
866 while ((pio = zio_walk_parents(zio)) != NULL)
867 if (!vdev_accessible(vd, pio))
868 pio->io_error = ENXIO;
869
b128c09f
BB
870 kmem_free(vps, sizeof (*vps));
871 }
872}
34dc7c2f 873
b128c09f
BB
874/*
875 * Determine whether this device is accessible by reading and writing
876 * to several known locations: the pad regions of each vdev label
877 * but the first (which we leave alone in case it contains a VTOC).
878 */
879zio_t *
d164b209 880vdev_probe(vdev_t *vd, zio_t *zio)
b128c09f
BB
881{
882 spa_t *spa = vd->vdev_spa;
d164b209
BB
883 vdev_probe_stats_t *vps = NULL;
884 zio_t *pio;
885
886 ASSERT(vd->vdev_ops->vdev_op_leaf);
34dc7c2f 887
d164b209
BB
888 /*
889 * Don't probe the probe.
890 */
891 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
892 return (NULL);
b128c09f 893
d164b209
BB
894 /*
895 * To prevent 'probe storms' when a device fails, we create
896 * just one probe i/o at a time. All zios that want to probe
897 * this vdev will become parents of the probe io.
898 */
899 mutex_enter(&vd->vdev_probe_lock);
b128c09f 900
d164b209
BB
901 if ((pio = vd->vdev_probe_zio) == NULL) {
902 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
903
904 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
905 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
906 ZIO_FLAG_DONT_RETRY;
907
908 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
909 /*
910 * vdev_cant_read and vdev_cant_write can only
911 * transition from TRUE to FALSE when we have the
912 * SCL_ZIO lock as writer; otherwise they can only
913 * transition from FALSE to TRUE. This ensures that
914 * any zio looking at these values can assume that
915 * failures persist for the life of the I/O. That's
916 * important because when a device has intermittent
917 * connectivity problems, we want to ensure that
918 * they're ascribed to the device (ENXIO) and not
919 * the zio (EIO).
920 *
921 * Since we hold SCL_ZIO as writer here, clear both
922 * values so the probe can reevaluate from first
923 * principles.
924 */
925 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
926 vd->vdev_cant_read = B_FALSE;
927 vd->vdev_cant_write = B_FALSE;
928 }
929
930 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
931 vdev_probe_done, vps,
932 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
933
934 if (zio != NULL) {
935 vd->vdev_probe_wanted = B_TRUE;
936 spa_async_request(spa, SPA_ASYNC_PROBE);
937 }
b128c09f
BB
938 }
939
d164b209
BB
940 if (zio != NULL)
941 zio_add_child(zio, pio);
b128c09f 942
d164b209 943 mutex_exit(&vd->vdev_probe_lock);
b128c09f 944
d164b209
BB
945 if (vps == NULL) {
946 ASSERT(zio != NULL);
947 return (NULL);
948 }
b128c09f
BB
949
950 for (int l = 1; l < VDEV_LABELS; l++) {
d164b209 951 zio_nowait(zio_read_phys(pio, vd,
b128c09f
BB
952 vdev_label_offset(vd->vdev_psize, l,
953 offsetof(vdev_label_t, vl_pad)),
954 VDEV_SKIP_SIZE, zio_buf_alloc(VDEV_SKIP_SIZE),
955 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
956 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
957 }
958
d164b209
BB
959 if (zio == NULL)
960 return (pio);
961
962 zio_nowait(pio);
963 return (NULL);
34dc7c2f
BB
964}
965
966/*
967 * Prepare a virtual device for access.
968 */
969int
970vdev_open(vdev_t *vd)
971{
fb5f0bc8 972 spa_t *spa = vd->vdev_spa;
34dc7c2f
BB
973 int error;
974 int c;
975 uint64_t osize = 0;
976 uint64_t asize, psize;
977 uint64_t ashift = 0;
978
fb5f0bc8
BB
979 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
980
34dc7c2f
BB
981 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
982 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
983 vd->vdev_state == VDEV_STATE_OFFLINE);
984
34dc7c2f
BB
985 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
986
987 if (!vd->vdev_removed && vd->vdev_faulted) {
988 ASSERT(vd->vdev_children == 0);
989 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
990 VDEV_AUX_ERR_EXCEEDED);
991 return (ENXIO);
992 } else if (vd->vdev_offline) {
993 ASSERT(vd->vdev_children == 0);
994 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
995 return (ENXIO);
996 }
997
998 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
999
1000 if (zio_injection_enabled && error == 0)
1001 error = zio_handle_device_injection(vd, ENXIO);
1002
1003 if (error) {
1004 if (vd->vdev_removed &&
1005 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1006 vd->vdev_removed = B_FALSE;
1007
1008 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1009 vd->vdev_stat.vs_aux);
1010 return (error);
1011 }
1012
1013 vd->vdev_removed = B_FALSE;
1014
1015 if (vd->vdev_degraded) {
1016 ASSERT(vd->vdev_children == 0);
1017 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1018 VDEV_AUX_ERR_EXCEEDED);
1019 } else {
1020 vd->vdev_state = VDEV_STATE_HEALTHY;
1021 }
1022
1023 for (c = 0; c < vd->vdev_children; c++)
1024 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1025 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1026 VDEV_AUX_NONE);
1027 break;
1028 }
1029
1030 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1031
1032 if (vd->vdev_children == 0) {
1033 if (osize < SPA_MINDEVSIZE) {
1034 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1035 VDEV_AUX_TOO_SMALL);
1036 return (EOVERFLOW);
1037 }
1038 psize = osize;
1039 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1040 } else {
1041 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1042 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1043 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1044 VDEV_AUX_TOO_SMALL);
1045 return (EOVERFLOW);
1046 }
1047 psize = 0;
1048 asize = osize;
1049 }
1050
1051 vd->vdev_psize = psize;
1052
1053 if (vd->vdev_asize == 0) {
1054 /*
1055 * This is the first-ever open, so use the computed values.
1056 * For testing purposes, a higher ashift can be requested.
1057 */
1058 vd->vdev_asize = asize;
1059 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
1060 } else {
1061 /*
1062 * Make sure the alignment requirement hasn't increased.
1063 */
1064 if (ashift > vd->vdev_top->vdev_ashift) {
1065 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1066 VDEV_AUX_BAD_LABEL);
1067 return (EINVAL);
1068 }
1069
1070 /*
1071 * Make sure the device hasn't shrunk.
1072 */
1073 if (asize < vd->vdev_asize) {
1074 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1075 VDEV_AUX_BAD_LABEL);
1076 return (EINVAL);
1077 }
1078
1079 /*
1080 * If all children are healthy and the asize has increased,
1081 * then we've experienced dynamic LUN growth.
1082 */
1083 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
1084 asize > vd->vdev_asize) {
1085 vd->vdev_asize = asize;
1086 }
1087 }
1088
1089 /*
1090 * Ensure we can issue some IO before declaring the
1091 * vdev open for business.
1092 */
b128c09f
BB
1093 if (vd->vdev_ops->vdev_op_leaf &&
1094 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
34dc7c2f 1095 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
b128c09f 1096 VDEV_AUX_IO_FAILURE);
34dc7c2f
BB
1097 return (error);
1098 }
1099
1100 /*
1101 * If this is a top-level vdev, compute the raidz-deflation
1102 * ratio. Note, we hard-code in 128k (1<<17) because it is the
1103 * current "typical" blocksize. Even if SPA_MAXBLOCKSIZE
1104 * changes, this algorithm must never change, or we will
1105 * inconsistently account for existing bp's.
1106 */
1107 if (vd->vdev_top == vd) {
1108 vd->vdev_deflate_ratio = (1<<17) /
1109 (vdev_psize_to_asize(vd, 1<<17) >> SPA_MINBLOCKSHIFT);
1110 }
1111
1112 /*
b128c09f 1113 * If a leaf vdev has a DTL, and seems healthy, then kick off a
fb5f0bc8
BB
1114 * resilver. But don't do this if we are doing a reopen for a scrub,
1115 * since this would just restart the scrub we are already doing.
34dc7c2f 1116 */
fb5f0bc8
BB
1117 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1118 vdev_resilver_needed(vd, NULL, NULL))
1119 spa_async_request(spa, SPA_ASYNC_RESILVER);
34dc7c2f
BB
1120
1121 return (0);
1122}
1123
1124/*
1125 * Called once the vdevs are all opened, this routine validates the label
1126 * contents. This needs to be done before vdev_load() so that we don't
1127 * inadvertently do repair I/Os to the wrong device.
1128 *
1129 * This function will only return failure if one of the vdevs indicates that it
1130 * has since been destroyed or exported. This is only possible if
1131 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
1132 * will be updated but the function will return 0.
1133 */
1134int
1135vdev_validate(vdev_t *vd)
1136{
1137 spa_t *spa = vd->vdev_spa;
1138 int c;
1139 nvlist_t *label;
b128c09f 1140 uint64_t guid, top_guid;
34dc7c2f
BB
1141 uint64_t state;
1142
1143 for (c = 0; c < vd->vdev_children; c++)
1144 if (vdev_validate(vd->vdev_child[c]) != 0)
1145 return (EBADF);
1146
1147 /*
1148 * If the device has already failed, or was marked offline, don't do
1149 * any further validation. Otherwise, label I/O will fail and we will
1150 * overwrite the previous state.
1151 */
b128c09f 1152 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
34dc7c2f
BB
1153
1154 if ((label = vdev_label_read_config(vd)) == NULL) {
1155 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1156 VDEV_AUX_BAD_LABEL);
1157 return (0);
1158 }
1159
1160 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
1161 &guid) != 0 || guid != spa_guid(spa)) {
1162 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1163 VDEV_AUX_CORRUPT_DATA);
1164 nvlist_free(label);
1165 return (0);
1166 }
1167
b128c09f
BB
1168 /*
1169 * If this vdev just became a top-level vdev because its
1170 * sibling was detached, it will have adopted the parent's
1171 * vdev guid -- but the label may or may not be on disk yet.
1172 * Fortunately, either version of the label will have the
1173 * same top guid, so if we're a top-level vdev, we can
1174 * safely compare to that instead.
1175 */
34dc7c2f 1176 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
b128c09f
BB
1177 &guid) != 0 ||
1178 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
1179 &top_guid) != 0 ||
1180 (vd->vdev_guid != guid &&
1181 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
34dc7c2f
BB
1182 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1183 VDEV_AUX_CORRUPT_DATA);
1184 nvlist_free(label);
1185 return (0);
1186 }
1187
1188 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1189 &state) != 0) {
1190 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1191 VDEV_AUX_CORRUPT_DATA);
1192 nvlist_free(label);
1193 return (0);
1194 }
1195
1196 nvlist_free(label);
1197
1198 if (spa->spa_load_state == SPA_LOAD_OPEN &&
1199 state != POOL_STATE_ACTIVE)
1200 return (EBADF);
34dc7c2f 1201
b128c09f
BB
1202 /*
1203 * If we were able to open and validate a vdev that was
1204 * previously marked permanently unavailable, clear that state
1205 * now.
1206 */
1207 if (vd->vdev_not_present)
1208 vd->vdev_not_present = 0;
1209 }
34dc7c2f
BB
1210
1211 return (0);
1212}
1213
1214/*
1215 * Close a virtual device.
1216 */
1217void
1218vdev_close(vdev_t *vd)
1219{
fb5f0bc8
BB
1220 spa_t *spa = vd->vdev_spa;
1221
1222 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1223
34dc7c2f
BB
1224 vd->vdev_ops->vdev_op_close(vd);
1225
1226 vdev_cache_purge(vd);
1227
1228 /*
1229 * We record the previous state before we close it, so that if we are
1230 * doing a reopen(), we don't generate FMA ereports if we notice that
1231 * it's still faulted.
1232 */
1233 vd->vdev_prevstate = vd->vdev_state;
1234
1235 if (vd->vdev_offline)
1236 vd->vdev_state = VDEV_STATE_OFFLINE;
1237 else
1238 vd->vdev_state = VDEV_STATE_CLOSED;
1239 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1240}
1241
1242void
1243vdev_reopen(vdev_t *vd)
1244{
1245 spa_t *spa = vd->vdev_spa;
1246
b128c09f 1247 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
34dc7c2f
BB
1248
1249 vdev_close(vd);
1250 (void) vdev_open(vd);
1251
1252 /*
1253 * Call vdev_validate() here to make sure we have the same device.
1254 * Otherwise, a device with an invalid label could be successfully
1255 * opened in response to vdev_reopen().
1256 */
b128c09f
BB
1257 if (vd->vdev_aux) {
1258 (void) vdev_validate_aux(vd);
1259 if (vdev_readable(vd) && vdev_writeable(vd) &&
1260 !l2arc_vdev_present(vd)) {
1261 uint64_t size = vdev_get_rsize(vd);
1262 l2arc_add_vdev(spa, vd,
1263 VDEV_LABEL_START_SIZE,
1264 size - VDEV_LABEL_START_SIZE);
1265 }
1266 } else {
1267 (void) vdev_validate(vd);
1268 }
34dc7c2f
BB
1269
1270 /*
1271 * Reassess parent vdev's health.
1272 */
1273 vdev_propagate_state(vd);
1274}
1275
1276int
1277vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1278{
1279 int error;
1280
1281 /*
1282 * Normally, partial opens (e.g. of a mirror) are allowed.
1283 * For a create, however, we want to fail the request if
1284 * there are any components we can't open.
1285 */
1286 error = vdev_open(vd);
1287
1288 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1289 vdev_close(vd);
1290 return (error ? error : ENXIO);
1291 }
1292
1293 /*
1294 * Recursively initialize all labels.
1295 */
1296 if ((error = vdev_label_init(vd, txg, isreplacing ?
1297 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1298 vdev_close(vd);
1299 return (error);
1300 }
1301
1302 return (0);
1303}
1304
1305/*
1306 * The is the latter half of vdev_create(). It is distinct because it
1307 * involves initiating transactions in order to do metaslab creation.
1308 * For creation, we want to try to create all vdevs at once and then undo it
1309 * if anything fails; this is much harder if we have pending transactions.
1310 */
1311void
1312vdev_init(vdev_t *vd, uint64_t txg)
1313{
1314 /*
1315 * Aim for roughly 200 metaslabs per vdev.
1316 */
1317 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200);
1318 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1319
1320 /*
1321 * Initialize the vdev's metaslabs. This can't fail because
1322 * there's nothing to read when creating all new metaslabs.
1323 */
1324 VERIFY(vdev_metaslab_init(vd, txg) == 0);
1325}
1326
1327void
1328vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1329{
1330 ASSERT(vd == vd->vdev_top);
1331 ASSERT(ISP2(flags));
1332
1333 if (flags & VDD_METASLAB)
1334 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1335
1336 if (flags & VDD_DTL)
1337 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1338
1339 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1340}
1341
fb5f0bc8
BB
1342/*
1343 * DTLs.
1344 *
1345 * A vdev's DTL (dirty time log) is the set of transaction groups for which
1346 * the vdev has less than perfect replication. There are three kinds of DTL:
1347 *
1348 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
1349 *
1350 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
1351 *
1352 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
1353 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
1354 * txgs that was scrubbed.
1355 *
1356 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
1357 * persistent errors or just some device being offline.
1358 * Unlike the other three, the DTL_OUTAGE map is not generally
1359 * maintained; it's only computed when needed, typically to
1360 * determine whether a device can be detached.
1361 *
1362 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
1363 * either has the data or it doesn't.
1364 *
1365 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
1366 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
1367 * if any child is less than fully replicated, then so is its parent.
1368 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
1369 * comprising only those txgs which appear in 'maxfaults' or more children;
1370 * those are the txgs we don't have enough replication to read. For example,
1371 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
1372 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
1373 * two child DTL_MISSING maps.
1374 *
1375 * It should be clear from the above that to compute the DTLs and outage maps
1376 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
1377 * Therefore, that is all we keep on disk. When loading the pool, or after
1378 * a configuration change, we generate all other DTLs from first principles.
1379 */
34dc7c2f 1380void
fb5f0bc8 1381vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
34dc7c2f 1382{
fb5f0bc8
BB
1383 space_map_t *sm = &vd->vdev_dtl[t];
1384
1385 ASSERT(t < DTL_TYPES);
1386 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1387
34dc7c2f
BB
1388 mutex_enter(sm->sm_lock);
1389 if (!space_map_contains(sm, txg, size))
1390 space_map_add(sm, txg, size);
1391 mutex_exit(sm->sm_lock);
1392}
1393
fb5f0bc8
BB
1394boolean_t
1395vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
34dc7c2f 1396{
fb5f0bc8
BB
1397 space_map_t *sm = &vd->vdev_dtl[t];
1398 boolean_t dirty = B_FALSE;
34dc7c2f 1399
fb5f0bc8
BB
1400 ASSERT(t < DTL_TYPES);
1401 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
34dc7c2f
BB
1402
1403 mutex_enter(sm->sm_lock);
fb5f0bc8
BB
1404 if (sm->sm_space != 0)
1405 dirty = space_map_contains(sm, txg, size);
34dc7c2f
BB
1406 mutex_exit(sm->sm_lock);
1407
1408 return (dirty);
1409}
1410
fb5f0bc8
BB
1411boolean_t
1412vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
1413{
1414 space_map_t *sm = &vd->vdev_dtl[t];
1415 boolean_t empty;
1416
1417 mutex_enter(sm->sm_lock);
1418 empty = (sm->sm_space == 0);
1419 mutex_exit(sm->sm_lock);
1420
1421 return (empty);
1422}
1423
34dc7c2f
BB
1424/*
1425 * Reassess DTLs after a config change or scrub completion.
1426 */
1427void
1428vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1429{
1430 spa_t *spa = vd->vdev_spa;
fb5f0bc8
BB
1431 avl_tree_t reftree;
1432 int minref;
34dc7c2f 1433
fb5f0bc8 1434 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
34dc7c2f 1435
fb5f0bc8
BB
1436 for (int c = 0; c < vd->vdev_children; c++)
1437 vdev_dtl_reassess(vd->vdev_child[c], txg,
1438 scrub_txg, scrub_done);
1439
1440 if (vd == spa->spa_root_vdev)
1441 return;
1442
1443 if (vd->vdev_ops->vdev_op_leaf) {
34dc7c2f 1444 mutex_enter(&vd->vdev_dtl_lock);
b128c09f
BB
1445 if (scrub_txg != 0 &&
1446 (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) {
1447 /* XXX should check scrub_done? */
1448 /*
1449 * We completed a scrub up to scrub_txg. If we
1450 * did it without rebooting, then the scrub dtl
1451 * will be valid, so excise the old region and
1452 * fold in the scrub dtl. Otherwise, leave the
1453 * dtl as-is if there was an error.
fb5f0bc8
BB
1454 *
1455 * There's little trick here: to excise the beginning
1456 * of the DTL_MISSING map, we put it into a reference
1457 * tree and then add a segment with refcnt -1 that
1458 * covers the range [0, scrub_txg). This means
1459 * that each txg in that range has refcnt -1 or 0.
1460 * We then add DTL_SCRUB with a refcnt of 2, so that
1461 * entries in the range [0, scrub_txg) will have a
1462 * positive refcnt -- either 1 or 2. We then convert
1463 * the reference tree into the new DTL_MISSING map.
b128c09f 1464 */
fb5f0bc8
BB
1465 space_map_ref_create(&reftree);
1466 space_map_ref_add_map(&reftree,
1467 &vd->vdev_dtl[DTL_MISSING], 1);
1468 space_map_ref_add_seg(&reftree, 0, scrub_txg, -1);
1469 space_map_ref_add_map(&reftree,
1470 &vd->vdev_dtl[DTL_SCRUB], 2);
1471 space_map_ref_generate_map(&reftree,
1472 &vd->vdev_dtl[DTL_MISSING], 1);
1473 space_map_ref_destroy(&reftree);
34dc7c2f 1474 }
fb5f0bc8
BB
1475 space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
1476 space_map_walk(&vd->vdev_dtl[DTL_MISSING],
1477 space_map_add, &vd->vdev_dtl[DTL_PARTIAL]);
34dc7c2f 1478 if (scrub_done)
fb5f0bc8
BB
1479 space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
1480 space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
1481 if (!vdev_readable(vd))
1482 space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
1483 else
1484 space_map_walk(&vd->vdev_dtl[DTL_MISSING],
1485 space_map_add, &vd->vdev_dtl[DTL_OUTAGE]);
34dc7c2f 1486 mutex_exit(&vd->vdev_dtl_lock);
b128c09f 1487
34dc7c2f
BB
1488 if (txg != 0)
1489 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1490 return;
1491 }
1492
34dc7c2f 1493 mutex_enter(&vd->vdev_dtl_lock);
fb5f0bc8
BB
1494 for (int t = 0; t < DTL_TYPES; t++) {
1495 if (t == DTL_SCRUB)
1496 continue; /* leaf vdevs only */
1497 if (t == DTL_PARTIAL)
1498 minref = 1; /* i.e. non-zero */
1499 else if (vd->vdev_nparity != 0)
1500 minref = vd->vdev_nparity + 1; /* RAID-Z */
1501 else
1502 minref = vd->vdev_children; /* any kind of mirror */
1503 space_map_ref_create(&reftree);
1504 for (int c = 0; c < vd->vdev_children; c++) {
1505 vdev_t *cvd = vd->vdev_child[c];
1506 mutex_enter(&cvd->vdev_dtl_lock);
1507 space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1);
1508 mutex_exit(&cvd->vdev_dtl_lock);
1509 }
1510 space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref);
1511 space_map_ref_destroy(&reftree);
34dc7c2f 1512 }
fb5f0bc8 1513 mutex_exit(&vd->vdev_dtl_lock);
34dc7c2f
BB
1514}
1515
1516static int
1517vdev_dtl_load(vdev_t *vd)
1518{
1519 spa_t *spa = vd->vdev_spa;
fb5f0bc8 1520 space_map_obj_t *smo = &vd->vdev_dtl_smo;
34dc7c2f
BB
1521 objset_t *mos = spa->spa_meta_objset;
1522 dmu_buf_t *db;
1523 int error;
1524
1525 ASSERT(vd->vdev_children == 0);
1526
1527 if (smo->smo_object == 0)
1528 return (0);
1529
1530 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0)
1531 return (error);
1532
1533 ASSERT3U(db->db_size, >=, sizeof (*smo));
1534 bcopy(db->db_data, smo, sizeof (*smo));
1535 dmu_buf_rele(db, FTAG);
1536
1537 mutex_enter(&vd->vdev_dtl_lock);
fb5f0bc8
BB
1538 error = space_map_load(&vd->vdev_dtl[DTL_MISSING],
1539 NULL, SM_ALLOC, smo, mos);
34dc7c2f
BB
1540 mutex_exit(&vd->vdev_dtl_lock);
1541
1542 return (error);
1543}
1544
1545void
1546vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1547{
1548 spa_t *spa = vd->vdev_spa;
fb5f0bc8
BB
1549 space_map_obj_t *smo = &vd->vdev_dtl_smo;
1550 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
34dc7c2f
BB
1551 objset_t *mos = spa->spa_meta_objset;
1552 space_map_t smsync;
1553 kmutex_t smlock;
1554 dmu_buf_t *db;
1555 dmu_tx_t *tx;
1556
34dc7c2f
BB
1557 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1558
1559 if (vd->vdev_detached) {
1560 if (smo->smo_object != 0) {
1561 int err = dmu_object_free(mos, smo->smo_object, tx);
1562 ASSERT3U(err, ==, 0);
1563 smo->smo_object = 0;
1564 }
1565 dmu_tx_commit(tx);
34dc7c2f
BB
1566 return;
1567 }
1568
1569 if (smo->smo_object == 0) {
1570 ASSERT(smo->smo_objsize == 0);
1571 ASSERT(smo->smo_alloc == 0);
1572 smo->smo_object = dmu_object_alloc(mos,
1573 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1574 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1575 ASSERT(smo->smo_object != 0);
1576 vdev_config_dirty(vd->vdev_top);
1577 }
1578
1579 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1580
1581 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1582 &smlock);
1583
1584 mutex_enter(&smlock);
1585
1586 mutex_enter(&vd->vdev_dtl_lock);
1587 space_map_walk(sm, space_map_add, &smsync);
1588 mutex_exit(&vd->vdev_dtl_lock);
1589
1590 space_map_truncate(smo, mos, tx);
1591 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx);
1592
1593 space_map_destroy(&smsync);
1594
1595 mutex_exit(&smlock);
1596 mutex_destroy(&smlock);
1597
1598 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1599 dmu_buf_will_dirty(db, tx);
1600 ASSERT3U(db->db_size, >=, sizeof (*smo));
1601 bcopy(smo, db->db_data, sizeof (*smo));
1602 dmu_buf_rele(db, FTAG);
1603
1604 dmu_tx_commit(tx);
1605}
1606
fb5f0bc8
BB
1607/*
1608 * Determine whether the specified vdev can be offlined/detached/removed
1609 * without losing data.
1610 */
1611boolean_t
1612vdev_dtl_required(vdev_t *vd)
1613{
1614 spa_t *spa = vd->vdev_spa;
1615 vdev_t *tvd = vd->vdev_top;
1616 uint8_t cant_read = vd->vdev_cant_read;
1617 boolean_t required;
1618
1619 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1620
1621 if (vd == spa->spa_root_vdev || vd == tvd)
1622 return (B_TRUE);
1623
1624 /*
1625 * Temporarily mark the device as unreadable, and then determine
1626 * whether this results in any DTL outages in the top-level vdev.
1627 * If not, we can safely offline/detach/remove the device.
1628 */
1629 vd->vdev_cant_read = B_TRUE;
1630 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
1631 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
1632 vd->vdev_cant_read = cant_read;
1633 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
1634
1635 return (required);
1636}
1637
b128c09f
BB
1638/*
1639 * Determine if resilver is needed, and if so the txg range.
1640 */
1641boolean_t
1642vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
1643{
1644 boolean_t needed = B_FALSE;
1645 uint64_t thismin = UINT64_MAX;
1646 uint64_t thismax = 0;
1647
1648 if (vd->vdev_children == 0) {
1649 mutex_enter(&vd->vdev_dtl_lock);
fb5f0bc8
BB
1650 if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 &&
1651 vdev_writeable(vd)) {
b128c09f
BB
1652 space_seg_t *ss;
1653
fb5f0bc8 1654 ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root);
b128c09f 1655 thismin = ss->ss_start - 1;
fb5f0bc8 1656 ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root);
b128c09f
BB
1657 thismax = ss->ss_end;
1658 needed = B_TRUE;
1659 }
1660 mutex_exit(&vd->vdev_dtl_lock);
1661 } else {
fb5f0bc8 1662 for (int c = 0; c < vd->vdev_children; c++) {
b128c09f
BB
1663 vdev_t *cvd = vd->vdev_child[c];
1664 uint64_t cmin, cmax;
1665
1666 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
1667 thismin = MIN(thismin, cmin);
1668 thismax = MAX(thismax, cmax);
1669 needed = B_TRUE;
1670 }
1671 }
1672 }
1673
1674 if (needed && minp) {
1675 *minp = thismin;
1676 *maxp = thismax;
1677 }
1678 return (needed);
1679}
1680
34dc7c2f
BB
1681void
1682vdev_load(vdev_t *vd)
1683{
34dc7c2f
BB
1684 /*
1685 * Recursively load all children.
1686 */
fb5f0bc8 1687 for (int c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
1688 vdev_load(vd->vdev_child[c]);
1689
1690 /*
1691 * If this is a top-level vdev, initialize its metaslabs.
1692 */
1693 if (vd == vd->vdev_top &&
1694 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
1695 vdev_metaslab_init(vd, 0) != 0))
1696 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1697 VDEV_AUX_CORRUPT_DATA);
1698
1699 /*
1700 * If this is a leaf vdev, load its DTL.
1701 */
1702 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
1703 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1704 VDEV_AUX_CORRUPT_DATA);
1705}
1706
1707/*
1708 * The special vdev case is used for hot spares and l2cache devices. Its
1709 * sole purpose it to set the vdev state for the associated vdev. To do this,
1710 * we make sure that we can open the underlying device, then try to read the
1711 * label, and make sure that the label is sane and that it hasn't been
1712 * repurposed to another pool.
1713 */
1714int
1715vdev_validate_aux(vdev_t *vd)
1716{
1717 nvlist_t *label;
1718 uint64_t guid, version;
1719 uint64_t state;
1720
b128c09f
BB
1721 if (!vdev_readable(vd))
1722 return (0);
1723
34dc7c2f
BB
1724 if ((label = vdev_label_read_config(vd)) == NULL) {
1725 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1726 VDEV_AUX_CORRUPT_DATA);
1727 return (-1);
1728 }
1729
1730 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
1731 version > SPA_VERSION ||
1732 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
1733 guid != vd->vdev_guid ||
1734 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
1735 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1736 VDEV_AUX_CORRUPT_DATA);
1737 nvlist_free(label);
1738 return (-1);
1739 }
1740
1741 /*
1742 * We don't actually check the pool state here. If it's in fact in
1743 * use by another pool, we update this fact on the fly when requested.
1744 */
1745 nvlist_free(label);
1746 return (0);
1747}
1748
1749void
1750vdev_sync_done(vdev_t *vd, uint64_t txg)
1751{
1752 metaslab_t *msp;
1753
34dc7c2f
BB
1754 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
1755 metaslab_sync_done(msp, txg);
1756}
1757
1758void
1759vdev_sync(vdev_t *vd, uint64_t txg)
1760{
1761 spa_t *spa = vd->vdev_spa;
1762 vdev_t *lvd;
1763 metaslab_t *msp;
1764 dmu_tx_t *tx;
1765
34dc7c2f
BB
1766 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
1767 ASSERT(vd == vd->vdev_top);
1768 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1769 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
1770 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
1771 ASSERT(vd->vdev_ms_array != 0);
1772 vdev_config_dirty(vd);
1773 dmu_tx_commit(tx);
1774 }
1775
1776 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
1777 metaslab_sync(msp, txg);
1778 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
1779 }
1780
1781 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
1782 vdev_dtl_sync(lvd, txg);
1783
1784 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
1785}
1786
1787uint64_t
1788vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
1789{
1790 return (vd->vdev_ops->vdev_op_asize(vd, psize));
1791}
1792
34dc7c2f
BB
1793/*
1794 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
1795 * not be opened, and no I/O is attempted.
1796 */
1797int
1798vdev_fault(spa_t *spa, uint64_t guid)
1799{
b128c09f 1800 vdev_t *vd;
34dc7c2f 1801
b128c09f 1802 spa_vdev_state_enter(spa);
34dc7c2f 1803
b128c09f
BB
1804 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
1805 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f 1806
34dc7c2f 1807 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 1808 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f
BB
1809
1810 /*
1811 * Faulted state takes precedence over degraded.
1812 */
1813 vd->vdev_faulted = 1ULL;
1814 vd->vdev_degraded = 0ULL;
b128c09f 1815 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED);
34dc7c2f
BB
1816
1817 /*
b128c09f 1818 * If marking the vdev as faulted cause the top-level vdev to become
34dc7c2f
BB
1819 * unavailable, then back off and simply mark the vdev as degraded
1820 * instead.
1821 */
b128c09f 1822 if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) {
34dc7c2f
BB
1823 vd->vdev_degraded = 1ULL;
1824 vd->vdev_faulted = 0ULL;
1825
1826 /*
1827 * If we reopen the device and it's not dead, only then do we
1828 * mark it degraded.
1829 */
1830 vdev_reopen(vd);
1831
1832 if (vdev_readable(vd)) {
1833 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
1834 VDEV_AUX_ERR_EXCEEDED);
1835 }
1836 }
1837
b128c09f 1838 return (spa_vdev_state_exit(spa, vd, 0));
34dc7c2f
BB
1839}
1840
1841/*
1842 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
1843 * user that something is wrong. The vdev continues to operate as normal as far
1844 * as I/O is concerned.
1845 */
1846int
1847vdev_degrade(spa_t *spa, uint64_t guid)
1848{
b128c09f 1849 vdev_t *vd;
34dc7c2f 1850
b128c09f 1851 spa_vdev_state_enter(spa);
34dc7c2f 1852
b128c09f
BB
1853 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
1854 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f 1855
34dc7c2f 1856 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 1857 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f
BB
1858
1859 /*
1860 * If the vdev is already faulted, then don't do anything.
1861 */
b128c09f
BB
1862 if (vd->vdev_faulted || vd->vdev_degraded)
1863 return (spa_vdev_state_exit(spa, NULL, 0));
34dc7c2f
BB
1864
1865 vd->vdev_degraded = 1ULL;
1866 if (!vdev_is_dead(vd))
1867 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
1868 VDEV_AUX_ERR_EXCEEDED);
34dc7c2f 1869
b128c09f 1870 return (spa_vdev_state_exit(spa, vd, 0));
34dc7c2f
BB
1871}
1872
1873/*
1874 * Online the given vdev. If 'unspare' is set, it implies two things. First,
1875 * any attached spare device should be detached when the device finishes
1876 * resilvering. Second, the online should be treated like a 'test' online case,
1877 * so no FMA events are generated if the device fails to open.
1878 */
1879int
b128c09f 1880vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
34dc7c2f 1881{
b128c09f 1882 vdev_t *vd;
34dc7c2f 1883
b128c09f 1884 spa_vdev_state_enter(spa);
34dc7c2f 1885
b128c09f
BB
1886 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
1887 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f
BB
1888
1889 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 1890 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f
BB
1891
1892 vd->vdev_offline = B_FALSE;
1893 vd->vdev_tmpoffline = B_FALSE;
b128c09f
BB
1894 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
1895 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
34dc7c2f
BB
1896 vdev_reopen(vd->vdev_top);
1897 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
1898
1899 if (newstate)
1900 *newstate = vd->vdev_state;
1901 if ((flags & ZFS_ONLINE_UNSPARE) &&
1902 !vdev_is_dead(vd) && vd->vdev_parent &&
1903 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
1904 vd->vdev_parent->vdev_child[0] == vd)
1905 vd->vdev_unspare = B_TRUE;
1906
fb5f0bc8 1907 return (spa_vdev_state_exit(spa, vd, 0));
34dc7c2f
BB
1908}
1909
1910int
1911vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
1912{
b128c09f 1913 vdev_t *vd;
34dc7c2f 1914
b128c09f 1915 spa_vdev_state_enter(spa);
34dc7c2f 1916
b128c09f
BB
1917 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
1918 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f
BB
1919
1920 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 1921 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f
BB
1922
1923 /*
1924 * If the device isn't already offline, try to offline it.
1925 */
1926 if (!vd->vdev_offline) {
1927 /*
fb5f0bc8
BB
1928 * If this device has the only valid copy of some data,
1929 * don't allow it to be offlined.
34dc7c2f 1930 */
fb5f0bc8 1931 if (vd->vdev_aux == NULL && vdev_dtl_required(vd))
b128c09f 1932 return (spa_vdev_state_exit(spa, NULL, EBUSY));
34dc7c2f
BB
1933
1934 /*
1935 * Offline this device and reopen its top-level vdev.
1936 * If this action results in the top-level vdev becoming
1937 * unusable, undo it and fail the request.
1938 */
1939 vd->vdev_offline = B_TRUE;
1940 vdev_reopen(vd->vdev_top);
fb5f0bc8 1941 if (vd->vdev_aux == NULL && vdev_is_dead(vd->vdev_top)) {
34dc7c2f
BB
1942 vd->vdev_offline = B_FALSE;
1943 vdev_reopen(vd->vdev_top);
b128c09f 1944 return (spa_vdev_state_exit(spa, NULL, EBUSY));
34dc7c2f
BB
1945 }
1946 }
1947
b128c09f 1948 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
34dc7c2f 1949
b128c09f 1950 return (spa_vdev_state_exit(spa, vd, 0));
34dc7c2f
BB
1951}
1952
1953/*
1954 * Clear the error counts associated with this vdev. Unlike vdev_online() and
1955 * vdev_offline(), we assume the spa config is locked. We also clear all
1956 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
34dc7c2f
BB
1957 */
1958void
b128c09f 1959vdev_clear(spa_t *spa, vdev_t *vd)
34dc7c2f 1960{
b128c09f
BB
1961 vdev_t *rvd = spa->spa_root_vdev;
1962
1963 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
34dc7c2f
BB
1964
1965 if (vd == NULL)
b128c09f 1966 vd = rvd;
34dc7c2f
BB
1967
1968 vd->vdev_stat.vs_read_errors = 0;
1969 vd->vdev_stat.vs_write_errors = 0;
1970 vd->vdev_stat.vs_checksum_errors = 0;
34dc7c2f 1971
b128c09f
BB
1972 for (int c = 0; c < vd->vdev_children; c++)
1973 vdev_clear(spa, vd->vdev_child[c]);
34dc7c2f
BB
1974
1975 /*
b128c09f
BB
1976 * If we're in the FAULTED state or have experienced failed I/O, then
1977 * clear the persistent state and attempt to reopen the device. We
1978 * also mark the vdev config dirty, so that the new faulted state is
1979 * written out to disk.
34dc7c2f 1980 */
b128c09f
BB
1981 if (vd->vdev_faulted || vd->vdev_degraded ||
1982 !vdev_readable(vd) || !vdev_writeable(vd)) {
1983
34dc7c2f 1984 vd->vdev_faulted = vd->vdev_degraded = 0;
b128c09f
BB
1985 vd->vdev_cant_read = B_FALSE;
1986 vd->vdev_cant_write = B_FALSE;
1987
34dc7c2f 1988 vdev_reopen(vd);
34dc7c2f 1989
b128c09f
BB
1990 if (vd != rvd)
1991 vdev_state_dirty(vd->vdev_top);
1992
1993 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
34dc7c2f
BB
1994 spa_async_request(spa, SPA_ASYNC_RESILVER);
1995
1996 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
1997 }
1998}
1999
b128c09f
BB
2000boolean_t
2001vdev_is_dead(vdev_t *vd)
2002{
2003 return (vd->vdev_state < VDEV_STATE_DEGRADED);
2004}
2005
2006boolean_t
34dc7c2f
BB
2007vdev_readable(vdev_t *vd)
2008{
b128c09f 2009 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
34dc7c2f
BB
2010}
2011
b128c09f 2012boolean_t
34dc7c2f
BB
2013vdev_writeable(vdev_t *vd)
2014{
b128c09f 2015 return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
34dc7c2f
BB
2016}
2017
b128c09f
BB
2018boolean_t
2019vdev_allocatable(vdev_t *vd)
34dc7c2f 2020{
fb5f0bc8
BB
2021 uint64_t state = vd->vdev_state;
2022
b128c09f 2023 /*
fb5f0bc8 2024 * We currently allow allocations from vdevs which may be in the
b128c09f
BB
2025 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
2026 * fails to reopen then we'll catch it later when we're holding
fb5f0bc8
BB
2027 * the proper locks. Note that we have to get the vdev state
2028 * in a local variable because although it changes atomically,
2029 * we're asking two separate questions about it.
b128c09f 2030 */
fb5f0bc8 2031 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
b128c09f 2032 !vd->vdev_cant_write);
34dc7c2f
BB
2033}
2034
b128c09f
BB
2035boolean_t
2036vdev_accessible(vdev_t *vd, zio_t *zio)
34dc7c2f 2037{
b128c09f 2038 ASSERT(zio->io_vd == vd);
34dc7c2f 2039
b128c09f
BB
2040 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
2041 return (B_FALSE);
34dc7c2f 2042
b128c09f
BB
2043 if (zio->io_type == ZIO_TYPE_READ)
2044 return (!vd->vdev_cant_read);
34dc7c2f 2045
b128c09f
BB
2046 if (zio->io_type == ZIO_TYPE_WRITE)
2047 return (!vd->vdev_cant_write);
34dc7c2f 2048
b128c09f 2049 return (B_TRUE);
34dc7c2f
BB
2050}
2051
2052/*
2053 * Get statistics for the given vdev.
2054 */
2055void
2056vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2057{
2058 vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
34dc7c2f
BB
2059
2060 mutex_enter(&vd->vdev_stat_lock);
2061 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
b128c09f 2062 vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors;
34dc7c2f
BB
2063 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
2064 vs->vs_state = vd->vdev_state;
2065 vs->vs_rsize = vdev_get_rsize(vd);
2066 mutex_exit(&vd->vdev_stat_lock);
2067
2068 /*
2069 * If we're getting stats on the root vdev, aggregate the I/O counts
2070 * over all top-level vdevs (i.e. the direct children of the root).
2071 */
2072 if (vd == rvd) {
b128c09f 2073 for (int c = 0; c < rvd->vdev_children; c++) {
34dc7c2f
BB
2074 vdev_t *cvd = rvd->vdev_child[c];
2075 vdev_stat_t *cvs = &cvd->vdev_stat;
2076
2077 mutex_enter(&vd->vdev_stat_lock);
b128c09f 2078 for (int t = 0; t < ZIO_TYPES; t++) {
34dc7c2f
BB
2079 vs->vs_ops[t] += cvs->vs_ops[t];
2080 vs->vs_bytes[t] += cvs->vs_bytes[t];
2081 }
34dc7c2f 2082 vs->vs_scrub_examined += cvs->vs_scrub_examined;
34dc7c2f
BB
2083 mutex_exit(&vd->vdev_stat_lock);
2084 }
2085 }
2086}
2087
2088void
2089vdev_clear_stats(vdev_t *vd)
2090{
2091 mutex_enter(&vd->vdev_stat_lock);
2092 vd->vdev_stat.vs_space = 0;
2093 vd->vdev_stat.vs_dspace = 0;
2094 vd->vdev_stat.vs_alloc = 0;
2095 mutex_exit(&vd->vdev_stat_lock);
2096}
2097
2098void
b128c09f 2099vdev_stat_update(zio_t *zio, uint64_t psize)
34dc7c2f 2100{
fb5f0bc8
BB
2101 spa_t *spa = zio->io_spa;
2102 vdev_t *rvd = spa->spa_root_vdev;
b128c09f 2103 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
34dc7c2f
BB
2104 vdev_t *pvd;
2105 uint64_t txg = zio->io_txg;
2106 vdev_stat_t *vs = &vd->vdev_stat;
2107 zio_type_t type = zio->io_type;
2108 int flags = zio->io_flags;
2109
b128c09f
BB
2110 /*
2111 * If this i/o is a gang leader, it didn't do any actual work.
2112 */
2113 if (zio->io_gang_tree)
2114 return;
2115
34dc7c2f 2116 if (zio->io_error == 0) {
b128c09f
BB
2117 /*
2118 * If this is a root i/o, don't count it -- we've already
2119 * counted the top-level vdevs, and vdev_get_stats() will
2120 * aggregate them when asked. This reduces contention on
2121 * the root vdev_stat_lock and implicitly handles blocks
2122 * that compress away to holes, for which there is no i/o.
2123 * (Holes never create vdev children, so all the counters
2124 * remain zero, which is what we want.)
2125 *
2126 * Note: this only applies to successful i/o (io_error == 0)
2127 * because unlike i/o counts, errors are not additive.
2128 * When reading a ditto block, for example, failure of
2129 * one top-level vdev does not imply a root-level error.
2130 */
2131 if (vd == rvd)
2132 return;
2133
2134 ASSERT(vd == zio->io_vd);
fb5f0bc8
BB
2135
2136 if (flags & ZIO_FLAG_IO_BYPASS)
2137 return;
2138
2139 mutex_enter(&vd->vdev_stat_lock);
2140
b128c09f 2141 if (flags & ZIO_FLAG_IO_REPAIR) {
34dc7c2f 2142 if (flags & ZIO_FLAG_SCRUB_THREAD)
b128c09f 2143 vs->vs_scrub_repaired += psize;
fb5f0bc8 2144 if (flags & ZIO_FLAG_SELF_HEAL)
b128c09f 2145 vs->vs_self_healed += psize;
34dc7c2f 2146 }
fb5f0bc8
BB
2147
2148 vs->vs_ops[type]++;
2149 vs->vs_bytes[type] += psize;
2150
2151 mutex_exit(&vd->vdev_stat_lock);
34dc7c2f
BB
2152 return;
2153 }
2154
2155 if (flags & ZIO_FLAG_SPECULATIVE)
2156 return;
2157
b128c09f
BB
2158 mutex_enter(&vd->vdev_stat_lock);
2159 if (type == ZIO_TYPE_READ) {
2160 if (zio->io_error == ECKSUM)
2161 vs->vs_checksum_errors++;
2162 else
2163 vs->vs_read_errors++;
34dc7c2f 2164 }
b128c09f
BB
2165 if (type == ZIO_TYPE_WRITE)
2166 vs->vs_write_errors++;
2167 mutex_exit(&vd->vdev_stat_lock);
34dc7c2f 2168
fb5f0bc8
BB
2169 if (type == ZIO_TYPE_WRITE && txg != 0 &&
2170 (!(flags & ZIO_FLAG_IO_REPAIR) ||
2171 (flags & ZIO_FLAG_SCRUB_THREAD))) {
2172 /*
2173 * This is either a normal write (not a repair), or it's a
2174 * repair induced by the scrub thread. In the normal case,
2175 * we commit the DTL change in the same txg as the block
2176 * was born. In the scrub-induced repair case, we know that
2177 * scrubs run in first-pass syncing context, so we commit
2178 * the DTL change in spa->spa_syncing_txg.
2179 *
2180 * We currently do not make DTL entries for failed spontaneous
2181 * self-healing writes triggered by normal (non-scrubbing)
2182 * reads, because we have no transactional context in which to
2183 * do so -- and it's not clear that it'd be desirable anyway.
2184 */
2185 if (vd->vdev_ops->vdev_op_leaf) {
2186 uint64_t commit_txg = txg;
2187 if (flags & ZIO_FLAG_SCRUB_THREAD) {
2188 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
2189 ASSERT(spa_sync_pass(spa) == 1);
2190 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
2191 commit_txg = spa->spa_syncing_txg;
2192 }
2193 ASSERT(commit_txg >= spa->spa_syncing_txg);
2194 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
34dc7c2f 2195 return;
fb5f0bc8
BB
2196 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2197 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
2198 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
34dc7c2f 2199 }
fb5f0bc8
BB
2200 if (vd != rvd)
2201 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
34dc7c2f
BB
2202 }
2203}
2204
2205void
2206vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete)
2207{
2208 int c;
2209 vdev_stat_t *vs = &vd->vdev_stat;
2210
2211 for (c = 0; c < vd->vdev_children; c++)
2212 vdev_scrub_stat_update(vd->vdev_child[c], type, complete);
2213
2214 mutex_enter(&vd->vdev_stat_lock);
2215
2216 if (type == POOL_SCRUB_NONE) {
2217 /*
2218 * Update completion and end time. Leave everything else alone
2219 * so we can report what happened during the previous scrub.
2220 */
2221 vs->vs_scrub_complete = complete;
2222 vs->vs_scrub_end = gethrestime_sec();
2223 } else {
2224 vs->vs_scrub_type = type;
2225 vs->vs_scrub_complete = 0;
2226 vs->vs_scrub_examined = 0;
2227 vs->vs_scrub_repaired = 0;
34dc7c2f
BB
2228 vs->vs_scrub_start = gethrestime_sec();
2229 vs->vs_scrub_end = 0;
2230 }
2231
2232 mutex_exit(&vd->vdev_stat_lock);
2233}
2234
2235/*
2236 * Update the in-core space usage stats for this vdev and the root vdev.
2237 */
2238void
2239vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta,
2240 boolean_t update_root)
2241{
2242 int64_t dspace_delta = space_delta;
2243 spa_t *spa = vd->vdev_spa;
2244 vdev_t *rvd = spa->spa_root_vdev;
2245
2246 ASSERT(vd == vd->vdev_top);
2247
2248 /*
2249 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
2250 * factor. We must calculate this here and not at the root vdev
2251 * because the root vdev's psize-to-asize is simply the max of its
2252 * childrens', thus not accurate enough for us.
2253 */
2254 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
2255 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
2256 vd->vdev_deflate_ratio;
2257
2258 mutex_enter(&vd->vdev_stat_lock);
2259 vd->vdev_stat.vs_space += space_delta;
2260 vd->vdev_stat.vs_alloc += alloc_delta;
2261 vd->vdev_stat.vs_dspace += dspace_delta;
2262 mutex_exit(&vd->vdev_stat_lock);
2263
2264 if (update_root) {
2265 ASSERT(rvd == vd->vdev_parent);
2266 ASSERT(vd->vdev_ms_count != 0);
2267
2268 /*
2269 * Don't count non-normal (e.g. intent log) space as part of
2270 * the pool's capacity.
2271 */
2272 if (vd->vdev_mg->mg_class != spa->spa_normal_class)
2273 return;
2274
2275 mutex_enter(&rvd->vdev_stat_lock);
2276 rvd->vdev_stat.vs_space += space_delta;
2277 rvd->vdev_stat.vs_alloc += alloc_delta;
2278 rvd->vdev_stat.vs_dspace += dspace_delta;
2279 mutex_exit(&rvd->vdev_stat_lock);
2280 }
2281}
2282
2283/*
2284 * Mark a top-level vdev's config as dirty, placing it on the dirty list
2285 * so that it will be written out next time the vdev configuration is synced.
2286 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
2287 */
2288void
2289vdev_config_dirty(vdev_t *vd)
2290{
2291 spa_t *spa = vd->vdev_spa;
2292 vdev_t *rvd = spa->spa_root_vdev;
2293 int c;
2294
2295 /*
b128c09f
BB
2296 * If this is an aux vdev (as with l2cache devices), then we update the
2297 * vdev config manually and set the sync flag.
2298 */
2299 if (vd->vdev_aux != NULL) {
2300 spa_aux_vdev_t *sav = vd->vdev_aux;
2301 nvlist_t **aux;
2302 uint_t naux;
2303
2304 for (c = 0; c < sav->sav_count; c++) {
2305 if (sav->sav_vdevs[c] == vd)
2306 break;
2307 }
2308
2309 if (c == sav->sav_count) {
2310 /*
2311 * We're being removed. There's nothing more to do.
2312 */
2313 ASSERT(sav->sav_sync == B_TRUE);
2314 return;
2315 }
2316
2317 sav->sav_sync = B_TRUE;
2318
2319 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
2320 ZPOOL_CONFIG_L2CACHE, &aux, &naux) == 0);
2321
2322 ASSERT(c < naux);
2323
2324 /*
2325 * Setting the nvlist in the middle if the array is a little
2326 * sketchy, but it will work.
2327 */
2328 nvlist_free(aux[c]);
2329 aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE);
2330
2331 return;
2332 }
2333
2334 /*
2335 * The dirty list is protected by the SCL_CONFIG lock. The caller
2336 * must either hold SCL_CONFIG as writer, or must be the sync thread
2337 * (which holds SCL_CONFIG as reader). There's only one sync thread,
34dc7c2f
BB
2338 * so this is sufficient to ensure mutual exclusion.
2339 */
b128c09f
BB
2340 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2341 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2342 spa_config_held(spa, SCL_CONFIG, RW_READER)));
34dc7c2f
BB
2343
2344 if (vd == rvd) {
2345 for (c = 0; c < rvd->vdev_children; c++)
2346 vdev_config_dirty(rvd->vdev_child[c]);
2347 } else {
2348 ASSERT(vd == vd->vdev_top);
2349
b128c09f
BB
2350 if (!list_link_active(&vd->vdev_config_dirty_node))
2351 list_insert_head(&spa->spa_config_dirty_list, vd);
34dc7c2f
BB
2352 }
2353}
2354
2355void
2356vdev_config_clean(vdev_t *vd)
2357{
2358 spa_t *spa = vd->vdev_spa;
2359
b128c09f
BB
2360 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2361 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2362 spa_config_held(spa, SCL_CONFIG, RW_READER)));
34dc7c2f 2363
b128c09f
BB
2364 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
2365 list_remove(&spa->spa_config_dirty_list, vd);
34dc7c2f
BB
2366}
2367
b128c09f
BB
2368/*
2369 * Mark a top-level vdev's state as dirty, so that the next pass of
2370 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
2371 * the state changes from larger config changes because they require
2372 * much less locking, and are often needed for administrative actions.
2373 */
2374void
2375vdev_state_dirty(vdev_t *vd)
2376{
2377 spa_t *spa = vd->vdev_spa;
2378
2379 ASSERT(vd == vd->vdev_top);
2380
2381 /*
2382 * The state list is protected by the SCL_STATE lock. The caller
2383 * must either hold SCL_STATE as writer, or must be the sync thread
2384 * (which holds SCL_STATE as reader). There's only one sync thread,
2385 * so this is sufficient to ensure mutual exclusion.
2386 */
2387 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2388 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2389 spa_config_held(spa, SCL_STATE, RW_READER)));
2390
2391 if (!list_link_active(&vd->vdev_state_dirty_node))
2392 list_insert_head(&spa->spa_state_dirty_list, vd);
2393}
2394
2395void
2396vdev_state_clean(vdev_t *vd)
2397{
2398 spa_t *spa = vd->vdev_spa;
2399
2400 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2401 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2402 spa_config_held(spa, SCL_STATE, RW_READER)));
2403
2404 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
2405 list_remove(&spa->spa_state_dirty_list, vd);
2406}
2407
2408/*
2409 * Propagate vdev state up from children to parent.
2410 */
34dc7c2f
BB
2411void
2412vdev_propagate_state(vdev_t *vd)
2413{
fb5f0bc8
BB
2414 spa_t *spa = vd->vdev_spa;
2415 vdev_t *rvd = spa->spa_root_vdev;
34dc7c2f
BB
2416 int degraded = 0, faulted = 0;
2417 int corrupted = 0;
2418 int c;
2419 vdev_t *child;
2420
2421 if (vd->vdev_children > 0) {
2422 for (c = 0; c < vd->vdev_children; c++) {
2423 child = vd->vdev_child[c];
b128c09f
BB
2424
2425 if (!vdev_readable(child) ||
fb5f0bc8 2426 (!vdev_writeable(child) && spa_writeable(spa))) {
b128c09f
BB
2427 /*
2428 * Root special: if there is a top-level log
2429 * device, treat the root vdev as if it were
2430 * degraded.
2431 */
2432 if (child->vdev_islog && vd == rvd)
2433 degraded++;
2434 else
2435 faulted++;
2436 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
34dc7c2f 2437 degraded++;
b128c09f 2438 }
34dc7c2f
BB
2439
2440 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
2441 corrupted++;
2442 }
2443
2444 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
2445
2446 /*
b128c09f 2447 * Root special: if there is a top-level vdev that cannot be
34dc7c2f
BB
2448 * opened due to corrupted metadata, then propagate the root
2449 * vdev's aux state as 'corrupt' rather than 'insufficient
2450 * replicas'.
2451 */
2452 if (corrupted && vd == rvd &&
2453 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
2454 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
2455 VDEV_AUX_CORRUPT_DATA);
2456 }
2457
b128c09f 2458 if (vd->vdev_parent)
34dc7c2f
BB
2459 vdev_propagate_state(vd->vdev_parent);
2460}
2461
2462/*
2463 * Set a vdev's state. If this is during an open, we don't update the parent
2464 * state, because we're in the process of opening children depth-first.
2465 * Otherwise, we propagate the change to the parent.
2466 *
2467 * If this routine places a device in a faulted state, an appropriate ereport is
2468 * generated.
2469 */
2470void
2471vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
2472{
2473 uint64_t save_state;
b128c09f 2474 spa_t *spa = vd->vdev_spa;
34dc7c2f
BB
2475
2476 if (state == vd->vdev_state) {
2477 vd->vdev_stat.vs_aux = aux;
2478 return;
2479 }
2480
2481 save_state = vd->vdev_state;
2482
2483 vd->vdev_state = state;
2484 vd->vdev_stat.vs_aux = aux;
2485
2486 /*
2487 * If we are setting the vdev state to anything but an open state, then
2488 * always close the underlying device. Otherwise, we keep accessible
2489 * but invalid devices open forever. We don't call vdev_close() itself,
2490 * because that implies some extra checks (offline, etc) that we don't
2491 * want here. This is limited to leaf devices, because otherwise
2492 * closing the device will affect other children.
2493 */
b128c09f 2494 if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf)
34dc7c2f
BB
2495 vd->vdev_ops->vdev_op_close(vd);
2496
2497 if (vd->vdev_removed &&
2498 state == VDEV_STATE_CANT_OPEN &&
2499 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
2500 /*
2501 * If the previous state is set to VDEV_STATE_REMOVED, then this
2502 * device was previously marked removed and someone attempted to
2503 * reopen it. If this failed due to a nonexistent device, then
2504 * keep the device in the REMOVED state. We also let this be if
2505 * it is one of our special test online cases, which is only
2506 * attempting to online the device and shouldn't generate an FMA
2507 * fault.
2508 */
2509 vd->vdev_state = VDEV_STATE_REMOVED;
2510 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
2511 } else if (state == VDEV_STATE_REMOVED) {
2512 /*
2513 * Indicate to the ZFS DE that this device has been removed, and
2514 * any recent errors should be ignored.
2515 */
b128c09f 2516 zfs_post_remove(spa, vd);
34dc7c2f
BB
2517 vd->vdev_removed = B_TRUE;
2518 } else if (state == VDEV_STATE_CANT_OPEN) {
2519 /*
2520 * If we fail to open a vdev during an import, we mark it as
2521 * "not available", which signifies that it was never there to
2522 * begin with. Failure to open such a device is not considered
2523 * an error.
2524 */
b128c09f
BB
2525 if (spa->spa_load_state == SPA_LOAD_IMPORT &&
2526 !spa->spa_import_faulted &&
34dc7c2f
BB
2527 vd->vdev_ops->vdev_op_leaf)
2528 vd->vdev_not_present = 1;
2529
2530 /*
2531 * Post the appropriate ereport. If the 'prevstate' field is
2532 * set to something other than VDEV_STATE_UNKNOWN, it indicates
2533 * that this is part of a vdev_reopen(). In this case, we don't
2534 * want to post the ereport if the device was already in the
2535 * CANT_OPEN state beforehand.
2536 *
2537 * If the 'checkremove' flag is set, then this is an attempt to
2538 * online the device in response to an insertion event. If we
2539 * hit this case, then we have detected an insertion event for a
2540 * faulted or offline device that wasn't in the removed state.
2541 * In this scenario, we don't post an ereport because we are
2542 * about to replace the device, or attempt an online with
2543 * vdev_forcefault, which will generate the fault for us.
2544 */
2545 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
2546 !vd->vdev_not_present && !vd->vdev_checkremove &&
b128c09f 2547 vd != spa->spa_root_vdev) {
34dc7c2f
BB
2548 const char *class;
2549
2550 switch (aux) {
2551 case VDEV_AUX_OPEN_FAILED:
2552 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
2553 break;
2554 case VDEV_AUX_CORRUPT_DATA:
2555 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
2556 break;
2557 case VDEV_AUX_NO_REPLICAS:
2558 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
2559 break;
2560 case VDEV_AUX_BAD_GUID_SUM:
2561 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
2562 break;
2563 case VDEV_AUX_TOO_SMALL:
2564 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
2565 break;
2566 case VDEV_AUX_BAD_LABEL:
2567 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
2568 break;
b128c09f
BB
2569 case VDEV_AUX_IO_FAILURE:
2570 class = FM_EREPORT_ZFS_IO_FAILURE;
2571 break;
34dc7c2f
BB
2572 default:
2573 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
2574 }
2575
b128c09f 2576 zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
34dc7c2f
BB
2577 }
2578
2579 /* Erase any notion of persistent removed state */
2580 vd->vdev_removed = B_FALSE;
2581 } else {
2582 vd->vdev_removed = B_FALSE;
2583 }
2584
2585 if (!isopen)
2586 vdev_propagate_state(vd);
2587}
b128c09f
BB
2588
2589/*
2590 * Check the vdev configuration to ensure that it's capable of supporting
2591 * a root pool. Currently, we do not support RAID-Z or partial configuration.
2592 * In addition, only a single top-level vdev is allowed and none of the leaves
2593 * can be wholedisks.
2594 */
2595boolean_t
2596vdev_is_bootable(vdev_t *vd)
2597{
2598 int c;
2599
2600 if (!vd->vdev_ops->vdev_op_leaf) {
2601 char *vdev_type = vd->vdev_ops->vdev_op_type;
2602
2603 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
2604 vd->vdev_children > 1) {
2605 return (B_FALSE);
2606 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
2607 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
2608 return (B_FALSE);
2609 }
2610 } else if (vd->vdev_wholedisk == 1) {
2611 return (B_FALSE);
2612 }
2613
2614 for (c = 0; c < vd->vdev_children; c++) {
2615 if (!vdev_is_bootable(vd->vdev_child[c]))
2616 return (B_FALSE);
2617 }
2618 return (B_TRUE);
2619}