]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/vdev.c
Add zfs_nicebytes() to print human-readable sizes
[mirror_zfs.git] / module / zfs / vdev.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
c3520e7f 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
153b2285 25 * Copyright 2017 Nexenta Systems, Inc.
e550644f
BB
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2016 Toomas Soome <tsoome@me.com>
34dc7c2f
BB
28 */
29
34dc7c2f
BB
30#include <sys/zfs_context.h>
31#include <sys/fm/fs/zfs.h>
32#include <sys/spa.h>
33#include <sys/spa_impl.h>
34#include <sys/dmu.h>
35#include <sys/dmu_tx.h>
36#include <sys/vdev_impl.h>
37#include <sys/uberblock_impl.h>
38#include <sys/metaslab.h>
39#include <sys/metaslab_impl.h>
40#include <sys/space_map.h>
93cf2076 41#include <sys/space_reftree.h>
34dc7c2f
BB
42#include <sys/zio.h>
43#include <sys/zap.h>
44#include <sys/fs/zfs.h>
b128c09f 45#include <sys/arc.h>
9babb374 46#include <sys/zil.h>
428870ff 47#include <sys/dsl_scan.h>
a6255b7f 48#include <sys/abd.h>
6c285672 49#include <sys/zvol.h>
6078881a 50#include <sys/zfs_ratelimit.h>
34dc7c2f 51
b8bcca18
MA
52/*
53 * When a vdev is added, it will be divided into approximately (but no
54 * more than) this number of metaslabs.
55 */
56int metaslabs_per_vdev = 200;
57
34dc7c2f
BB
58/*
59 * Virtual device management.
60 */
61
62static vdev_ops_t *vdev_ops_table[] = {
63 &vdev_root_ops,
64 &vdev_raidz_ops,
65 &vdev_mirror_ops,
66 &vdev_replacing_ops,
67 &vdev_spare_ops,
68 &vdev_disk_ops,
69 &vdev_file_ops,
70 &vdev_missing_ops,
428870ff 71 &vdev_hole_ops,
34dc7c2f
BB
72 NULL
73};
74
34dc7c2f
BB
75/*
76 * Given a vdev type, return the appropriate ops vector.
77 */
78static vdev_ops_t *
79vdev_getops(const char *type)
80{
81 vdev_ops_t *ops, **opspp;
82
83 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
84 if (strcmp(ops->vdev_op_type, type) == 0)
85 break;
86
87 return (ops);
88}
89
90/*
91 * Default asize function: return the MAX of psize with the asize of
92 * all children. This is what's used by anything other than RAID-Z.
93 */
94uint64_t
95vdev_default_asize(vdev_t *vd, uint64_t psize)
96{
97 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
98 uint64_t csize;
d6320ddb 99 int c;
34dc7c2f 100
d6320ddb 101 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f
BB
102 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
103 asize = MAX(asize, csize);
104 }
105
106 return (asize);
107}
108
109/*
9babb374
BB
110 * Get the minimum allocatable size. We define the allocatable size as
111 * the vdev's asize rounded to the nearest metaslab. This allows us to
112 * replace or attach devices which don't have the same physical size but
113 * can still satisfy the same number of allocations.
34dc7c2f
BB
114 */
115uint64_t
9babb374 116vdev_get_min_asize(vdev_t *vd)
34dc7c2f 117{
9babb374 118 vdev_t *pvd = vd->vdev_parent;
34dc7c2f 119
9babb374 120 /*
1bd201e7 121 * If our parent is NULL (inactive spare or cache) or is the root,
9babb374
BB
122 * just return our own asize.
123 */
124 if (pvd == NULL)
125 return (vd->vdev_asize);
34dc7c2f
BB
126
127 /*
9babb374
BB
128 * The top-level vdev just returns the allocatable size rounded
129 * to the nearest metaslab.
34dc7c2f 130 */
9babb374
BB
131 if (vd == vd->vdev_top)
132 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
34dc7c2f 133
9babb374
BB
134 /*
135 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
136 * so each child must provide at least 1/Nth of its asize.
137 */
138 if (pvd->vdev_ops == &vdev_raidz_ops)
2e215fec
SH
139 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) /
140 pvd->vdev_children);
34dc7c2f 141
9babb374
BB
142 return (pvd->vdev_min_asize);
143}
144
145void
146vdev_set_min_asize(vdev_t *vd)
147{
d6320ddb 148 int c;
9babb374 149 vd->vdev_min_asize = vdev_get_min_asize(vd);
34dc7c2f 150
d6320ddb 151 for (c = 0; c < vd->vdev_children; c++)
9babb374 152 vdev_set_min_asize(vd->vdev_child[c]);
34dc7c2f
BB
153}
154
155vdev_t *
156vdev_lookup_top(spa_t *spa, uint64_t vdev)
157{
158 vdev_t *rvd = spa->spa_root_vdev;
159
b128c09f 160 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
34dc7c2f 161
b128c09f
BB
162 if (vdev < rvd->vdev_children) {
163 ASSERT(rvd->vdev_child[vdev] != NULL);
34dc7c2f 164 return (rvd->vdev_child[vdev]);
b128c09f 165 }
34dc7c2f
BB
166
167 return (NULL);
168}
169
170vdev_t *
171vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
172{
34dc7c2f 173 vdev_t *mvd;
d6320ddb 174 int c;
34dc7c2f
BB
175
176 if (vd->vdev_guid == guid)
177 return (vd);
178
d6320ddb 179 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
180 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
181 NULL)
182 return (mvd);
183
184 return (NULL);
185}
186
9c43027b
AJ
187static int
188vdev_count_leaves_impl(vdev_t *vd)
189{
190 int n = 0;
191 int c;
192
193 if (vd->vdev_ops->vdev_op_leaf)
194 return (1);
195
196 for (c = 0; c < vd->vdev_children; c++)
197 n += vdev_count_leaves_impl(vd->vdev_child[c]);
198
199 return (n);
200}
201
202int
203vdev_count_leaves(spa_t *spa)
204{
205 return (vdev_count_leaves_impl(spa->spa_root_vdev));
206}
207
34dc7c2f
BB
208void
209vdev_add_child(vdev_t *pvd, vdev_t *cvd)
210{
211 size_t oldsize, newsize;
212 uint64_t id = cvd->vdev_id;
213 vdev_t **newchild;
214
44de2f02 215 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
216 ASSERT(cvd->vdev_parent == NULL);
217
218 cvd->vdev_parent = pvd;
219
220 if (pvd == NULL)
221 return;
222
223 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
224
225 oldsize = pvd->vdev_children * sizeof (vdev_t *);
226 pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
227 newsize = pvd->vdev_children * sizeof (vdev_t *);
228
79c76d5b 229 newchild = kmem_alloc(newsize, KM_SLEEP);
34dc7c2f
BB
230 if (pvd->vdev_child != NULL) {
231 bcopy(pvd->vdev_child, newchild, oldsize);
232 kmem_free(pvd->vdev_child, oldsize);
233 }
234
235 pvd->vdev_child = newchild;
236 pvd->vdev_child[id] = cvd;
237
238 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
239 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
240
241 /*
242 * Walk up all ancestors to update guid sum.
243 */
244 for (; pvd != NULL; pvd = pvd->vdev_parent)
245 pvd->vdev_guid_sum += cvd->vdev_guid_sum;
34dc7c2f
BB
246}
247
248void
249vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
250{
251 int c;
252 uint_t id = cvd->vdev_id;
253
254 ASSERT(cvd->vdev_parent == pvd);
255
256 if (pvd == NULL)
257 return;
258
259 ASSERT(id < pvd->vdev_children);
260 ASSERT(pvd->vdev_child[id] == cvd);
261
262 pvd->vdev_child[id] = NULL;
263 cvd->vdev_parent = NULL;
264
265 for (c = 0; c < pvd->vdev_children; c++)
266 if (pvd->vdev_child[c])
267 break;
268
269 if (c == pvd->vdev_children) {
270 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
271 pvd->vdev_child = NULL;
272 pvd->vdev_children = 0;
273 }
274
275 /*
276 * Walk up all ancestors to update guid sum.
277 */
278 for (; pvd != NULL; pvd = pvd->vdev_parent)
279 pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
34dc7c2f
BB
280}
281
282/*
283 * Remove any holes in the child array.
284 */
285void
286vdev_compact_children(vdev_t *pvd)
287{
288 vdev_t **newchild, *cvd;
289 int oldc = pvd->vdev_children;
9babb374 290 int newc;
d6320ddb 291 int c;
34dc7c2f 292
b128c09f 293 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f 294
d6320ddb 295 for (c = newc = 0; c < oldc; c++)
34dc7c2f
BB
296 if (pvd->vdev_child[c])
297 newc++;
298
79c76d5b 299 newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
34dc7c2f 300
d6320ddb 301 for (c = newc = 0; c < oldc; c++) {
34dc7c2f
BB
302 if ((cvd = pvd->vdev_child[c]) != NULL) {
303 newchild[newc] = cvd;
304 cvd->vdev_id = newc++;
305 }
306 }
307
308 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
309 pvd->vdev_child = newchild;
310 pvd->vdev_children = newc;
311}
312
313/*
314 * Allocate and minimally initialize a vdev_t.
315 */
428870ff 316vdev_t *
34dc7c2f
BB
317vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
318{
319 vdev_t *vd;
d6320ddb 320 int t;
34dc7c2f 321
79c76d5b 322 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
34dc7c2f
BB
323
324 if (spa->spa_root_vdev == NULL) {
325 ASSERT(ops == &vdev_root_ops);
326 spa->spa_root_vdev = vd;
3541dc6d 327 spa->spa_load_guid = spa_generate_guid(NULL);
34dc7c2f
BB
328 }
329
428870ff 330 if (guid == 0 && ops != &vdev_hole_ops) {
34dc7c2f
BB
331 if (spa->spa_root_vdev == vd) {
332 /*
333 * The root vdev's guid will also be the pool guid,
334 * which must be unique among all pools.
335 */
428870ff 336 guid = spa_generate_guid(NULL);
34dc7c2f
BB
337 } else {
338 /*
339 * Any other vdev's guid must be unique within the pool.
340 */
428870ff 341 guid = spa_generate_guid(spa);
34dc7c2f
BB
342 }
343 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
344 }
345
346 vd->vdev_spa = spa;
347 vd->vdev_id = id;
348 vd->vdev_guid = guid;
349 vd->vdev_guid_sum = guid;
350 vd->vdev_ops = ops;
351 vd->vdev_state = VDEV_STATE_CLOSED;
428870ff 352 vd->vdev_ishole = (ops == &vdev_hole_ops);
34dc7c2f 353
6078881a
TH
354 /*
355 * Initialize rate limit structs for events. We rate limit ZIO delay
356 * and checksum events so that we don't overwhelm ZED with thousands
357 * of events when a disk is acting up.
358 */
359 zfs_ratelimit_init(&vd->vdev_delay_rl, DELAYS_PER_SECOND, 1);
360 zfs_ratelimit_init(&vd->vdev_checksum_rl, CHECKSUMS_PER_SECOND, 1);
361
98f72a53
BB
362 list_link_init(&vd->vdev_config_dirty_node);
363 list_link_init(&vd->vdev_state_dirty_node);
448d7aaa 364 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
34dc7c2f 365 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
b128c09f 366 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
3dfb57a3 367 mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL);
6078881a 368
d6320ddb 369 for (t = 0; t < DTL_TYPES; t++) {
93cf2076 370 vd->vdev_dtl[t] = range_tree_create(NULL, NULL,
fb5f0bc8
BB
371 &vd->vdev_dtl_lock);
372 }
34dc7c2f
BB
373 txg_list_create(&vd->vdev_ms_list,
374 offsetof(struct metaslab, ms_txg_node));
375 txg_list_create(&vd->vdev_dtl_list,
376 offsetof(struct vdev, vdev_dtl_node));
377 vd->vdev_stat.vs_timestamp = gethrtime();
378 vdev_queue_init(vd);
379 vdev_cache_init(vd);
380
381 return (vd);
382}
383
384/*
385 * Allocate a new vdev. The 'alloctype' is used to control whether we are
386 * creating a new vdev or loading an existing one - the behavior is slightly
387 * different for each case.
388 */
389int
390vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
391 int alloctype)
392{
393 vdev_ops_t *ops;
394 char *type;
395 uint64_t guid = 0, islog, nparity;
396 vdev_t *vd;
397
b128c09f 398 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
399
400 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2e528b49 401 return (SET_ERROR(EINVAL));
34dc7c2f
BB
402
403 if ((ops = vdev_getops(type)) == NULL)
2e528b49 404 return (SET_ERROR(EINVAL));
34dc7c2f
BB
405
406 /*
407 * If this is a load, get the vdev guid from the nvlist.
408 * Otherwise, vdev_alloc_common() will generate one for us.
409 */
410 if (alloctype == VDEV_ALLOC_LOAD) {
411 uint64_t label_id;
412
413 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
414 label_id != id)
2e528b49 415 return (SET_ERROR(EINVAL));
34dc7c2f
BB
416
417 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
2e528b49 418 return (SET_ERROR(EINVAL));
34dc7c2f
BB
419 } else if (alloctype == VDEV_ALLOC_SPARE) {
420 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
2e528b49 421 return (SET_ERROR(EINVAL));
34dc7c2f
BB
422 } else if (alloctype == VDEV_ALLOC_L2CACHE) {
423 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
2e528b49 424 return (SET_ERROR(EINVAL));
9babb374
BB
425 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
426 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
2e528b49 427 return (SET_ERROR(EINVAL));
34dc7c2f
BB
428 }
429
430 /*
431 * The first allocated vdev must be of type 'root'.
432 */
433 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
2e528b49 434 return (SET_ERROR(EINVAL));
34dc7c2f
BB
435
436 /*
437 * Determine whether we're a log vdev.
438 */
439 islog = 0;
440 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
441 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
2e528b49 442 return (SET_ERROR(ENOTSUP));
34dc7c2f 443
428870ff 444 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
2e528b49 445 return (SET_ERROR(ENOTSUP));
428870ff 446
34dc7c2f
BB
447 /*
448 * Set the nparity property for RAID-Z vdevs.
449 */
450 nparity = -1ULL;
451 if (ops == &vdev_raidz_ops) {
452 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
453 &nparity) == 0) {
428870ff 454 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
2e528b49 455 return (SET_ERROR(EINVAL));
34dc7c2f 456 /*
45d1cae3
BB
457 * Previous versions could only support 1 or 2 parity
458 * device.
34dc7c2f 459 */
45d1cae3
BB
460 if (nparity > 1 &&
461 spa_version(spa) < SPA_VERSION_RAIDZ2)
2e528b49 462 return (SET_ERROR(ENOTSUP));
45d1cae3
BB
463 if (nparity > 2 &&
464 spa_version(spa) < SPA_VERSION_RAIDZ3)
2e528b49 465 return (SET_ERROR(ENOTSUP));
34dc7c2f
BB
466 } else {
467 /*
468 * We require the parity to be specified for SPAs that
469 * support multiple parity levels.
470 */
45d1cae3 471 if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
2e528b49 472 return (SET_ERROR(EINVAL));
34dc7c2f
BB
473 /*
474 * Otherwise, we default to 1 parity device for RAID-Z.
475 */
476 nparity = 1;
477 }
478 } else {
479 nparity = 0;
480 }
481 ASSERT(nparity != -1ULL);
482
483 vd = vdev_alloc_common(spa, id, guid, ops);
484
485 vd->vdev_islog = islog;
486 vd->vdev_nparity = nparity;
487
488 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
489 vd->vdev_path = spa_strdup(vd->vdev_path);
490 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
491 vd->vdev_devid = spa_strdup(vd->vdev_devid);
492 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
493 &vd->vdev_physpath) == 0)
494 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
1bbd8770
TH
495
496 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
497 &vd->vdev_enc_sysfs_path) == 0)
498 vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path);
499
9babb374
BB
500 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
501 vd->vdev_fru = spa_strdup(vd->vdev_fru);
34dc7c2f
BB
502
503 /*
504 * Set the whole_disk property. If it's not specified, leave the value
505 * as -1.
506 */
507 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
508 &vd->vdev_wholedisk) != 0)
509 vd->vdev_wholedisk = -1ULL;
510
511 /*
512 * Look for the 'not present' flag. This will only be set if the device
513 * was not present at the time of import.
514 */
9babb374
BB
515 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
516 &vd->vdev_not_present);
34dc7c2f
BB
517
518 /*
519 * Get the alignment requirement.
520 */
521 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
522
428870ff
BB
523 /*
524 * Retrieve the vdev creation time.
525 */
526 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
527 &vd->vdev_crtxg);
528
34dc7c2f
BB
529 /*
530 * If we're a top-level vdev, try to load the allocation parameters.
531 */
428870ff
BB
532 if (parent && !parent->vdev_parent &&
533 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
34dc7c2f
BB
534 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
535 &vd->vdev_ms_array);
536 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
537 &vd->vdev_ms_shift);
538 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
539 &vd->vdev_asize);
428870ff
BB
540 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
541 &vd->vdev_removing);
e0ab3ab5
JS
542 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
543 &vd->vdev_top_zap);
544 } else {
545 ASSERT0(vd->vdev_top_zap);
428870ff
BB
546 }
547
5ffb9d1d 548 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
428870ff
BB
549 ASSERT(alloctype == VDEV_ALLOC_LOAD ||
550 alloctype == VDEV_ALLOC_ADD ||
551 alloctype == VDEV_ALLOC_SPLIT ||
552 alloctype == VDEV_ALLOC_ROOTPOOL);
553 vd->vdev_mg = metaslab_group_create(islog ?
554 spa_log_class(spa) : spa_normal_class(spa), vd);
34dc7c2f
BB
555 }
556
e0ab3ab5
JS
557 if (vd->vdev_ops->vdev_op_leaf &&
558 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
559 (void) nvlist_lookup_uint64(nv,
560 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
561 } else {
562 ASSERT0(vd->vdev_leaf_zap);
563 }
564
34dc7c2f
BB
565 /*
566 * If we're a leaf vdev, try to load the DTL object and other state.
567 */
e0ab3ab5 568
b128c09f 569 if (vd->vdev_ops->vdev_op_leaf &&
9babb374
BB
570 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
571 alloctype == VDEV_ALLOC_ROOTPOOL)) {
b128c09f
BB
572 if (alloctype == VDEV_ALLOC_LOAD) {
573 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
93cf2076 574 &vd->vdev_dtl_object);
b128c09f
BB
575 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
576 &vd->vdev_unspare);
577 }
9babb374
BB
578
579 if (alloctype == VDEV_ALLOC_ROOTPOOL) {
580 uint64_t spare = 0;
581
582 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
583 &spare) == 0 && spare)
584 spa_spare_add(vd);
585 }
586
34dc7c2f
BB
587 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
588 &vd->vdev_offline);
b128c09f 589
5d1f7fb6
GW
590 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
591 &vd->vdev_resilver_txg);
572e2857 592
34dc7c2f
BB
593 /*
594 * When importing a pool, we want to ignore the persistent fault
595 * state, as the diagnosis made on another system may not be
428870ff
BB
596 * valid in the current context. Local vdevs will
597 * remain in the faulted state.
34dc7c2f 598 */
428870ff 599 if (spa_load_state(spa) == SPA_LOAD_OPEN) {
34dc7c2f
BB
600 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
601 &vd->vdev_faulted);
602 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
603 &vd->vdev_degraded);
604 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
605 &vd->vdev_removed);
428870ff
BB
606
607 if (vd->vdev_faulted || vd->vdev_degraded) {
608 char *aux;
609
610 vd->vdev_label_aux =
611 VDEV_AUX_ERR_EXCEEDED;
612 if (nvlist_lookup_string(nv,
613 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
614 strcmp(aux, "external") == 0)
615 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
616 }
34dc7c2f
BB
617 }
618 }
619
620 /*
621 * Add ourselves to the parent's list of children.
622 */
623 vdev_add_child(parent, vd);
624
625 *vdp = vd;
626
627 return (0);
628}
629
630void
631vdev_free(vdev_t *vd)
632{
d6320ddb 633 int c, t;
34dc7c2f
BB
634 spa_t *spa = vd->vdev_spa;
635
636 /*
637 * vdev_free() implies closing the vdev first. This is simpler than
638 * trying to ensure complicated semantics for all callers.
639 */
640 vdev_close(vd);
641
b128c09f 642 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
428870ff 643 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
34dc7c2f
BB
644
645 /*
646 * Free all children.
647 */
d6320ddb 648 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
649 vdev_free(vd->vdev_child[c]);
650
651 ASSERT(vd->vdev_child == NULL);
652 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
653
654 /*
655 * Discard allocation state.
656 */
428870ff 657 if (vd->vdev_mg != NULL) {
34dc7c2f 658 vdev_metaslab_fini(vd);
428870ff
BB
659 metaslab_group_destroy(vd->vdev_mg);
660 }
34dc7c2f 661
c99c9001
MS
662 ASSERT0(vd->vdev_stat.vs_space);
663 ASSERT0(vd->vdev_stat.vs_dspace);
664 ASSERT0(vd->vdev_stat.vs_alloc);
34dc7c2f
BB
665
666 /*
667 * Remove this vdev from its parent's child list.
668 */
669 vdev_remove_child(vd->vdev_parent, vd);
670
671 ASSERT(vd->vdev_parent == NULL);
672
673 /*
674 * Clean up vdev structure.
675 */
676 vdev_queue_fini(vd);
677 vdev_cache_fini(vd);
678
679 if (vd->vdev_path)
680 spa_strfree(vd->vdev_path);
681 if (vd->vdev_devid)
682 spa_strfree(vd->vdev_devid);
683 if (vd->vdev_physpath)
684 spa_strfree(vd->vdev_physpath);
1bbd8770
TH
685
686 if (vd->vdev_enc_sysfs_path)
687 spa_strfree(vd->vdev_enc_sysfs_path);
688
9babb374
BB
689 if (vd->vdev_fru)
690 spa_strfree(vd->vdev_fru);
34dc7c2f
BB
691
692 if (vd->vdev_isspare)
693 spa_spare_remove(vd);
694 if (vd->vdev_isl2cache)
695 spa_l2cache_remove(vd);
696
697 txg_list_destroy(&vd->vdev_ms_list);
698 txg_list_destroy(&vd->vdev_dtl_list);
fb5f0bc8 699
34dc7c2f 700 mutex_enter(&vd->vdev_dtl_lock);
93cf2076 701 space_map_close(vd->vdev_dtl_sm);
d6320ddb 702 for (t = 0; t < DTL_TYPES; t++) {
93cf2076
GW
703 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
704 range_tree_destroy(vd->vdev_dtl[t]);
fb5f0bc8 705 }
34dc7c2f 706 mutex_exit(&vd->vdev_dtl_lock);
fb5f0bc8 707
3dfb57a3 708 mutex_destroy(&vd->vdev_queue_lock);
34dc7c2f
BB
709 mutex_destroy(&vd->vdev_dtl_lock);
710 mutex_destroy(&vd->vdev_stat_lock);
b128c09f 711 mutex_destroy(&vd->vdev_probe_lock);
34dc7c2f
BB
712
713 if (vd == spa->spa_root_vdev)
714 spa->spa_root_vdev = NULL;
715
716 kmem_free(vd, sizeof (vdev_t));
717}
718
719/*
720 * Transfer top-level vdev state from svd to tvd.
721 */
722static void
723vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
724{
725 spa_t *spa = svd->vdev_spa;
726 metaslab_t *msp;
727 vdev_t *vd;
728 int t;
729
730 ASSERT(tvd == tvd->vdev_top);
731
77943bc1 732 tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
34dc7c2f
BB
733 tvd->vdev_ms_array = svd->vdev_ms_array;
734 tvd->vdev_ms_shift = svd->vdev_ms_shift;
735 tvd->vdev_ms_count = svd->vdev_ms_count;
e0ab3ab5 736 tvd->vdev_top_zap = svd->vdev_top_zap;
34dc7c2f
BB
737
738 svd->vdev_ms_array = 0;
739 svd->vdev_ms_shift = 0;
740 svd->vdev_ms_count = 0;
e0ab3ab5 741 svd->vdev_top_zap = 0;
34dc7c2f 742
5ffb9d1d
GW
743 if (tvd->vdev_mg)
744 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
34dc7c2f
BB
745 tvd->vdev_mg = svd->vdev_mg;
746 tvd->vdev_ms = svd->vdev_ms;
747
748 svd->vdev_mg = NULL;
749 svd->vdev_ms = NULL;
750
751 if (tvd->vdev_mg != NULL)
752 tvd->vdev_mg->mg_vd = tvd;
753
754 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
755 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
756 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
757
758 svd->vdev_stat.vs_alloc = 0;
759 svd->vdev_stat.vs_space = 0;
760 svd->vdev_stat.vs_dspace = 0;
761
762 for (t = 0; t < TXG_SIZE; t++) {
763 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
764 (void) txg_list_add(&tvd->vdev_ms_list, msp, t);
765 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
766 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
767 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
768 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
769 }
770
b128c09f 771 if (list_link_active(&svd->vdev_config_dirty_node)) {
34dc7c2f
BB
772 vdev_config_clean(svd);
773 vdev_config_dirty(tvd);
774 }
775
b128c09f
BB
776 if (list_link_active(&svd->vdev_state_dirty_node)) {
777 vdev_state_clean(svd);
778 vdev_state_dirty(tvd);
779 }
780
34dc7c2f
BB
781 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
782 svd->vdev_deflate_ratio = 0;
783
784 tvd->vdev_islog = svd->vdev_islog;
785 svd->vdev_islog = 0;
786}
787
788static void
789vdev_top_update(vdev_t *tvd, vdev_t *vd)
790{
d6320ddb
BB
791 int c;
792
34dc7c2f
BB
793 if (vd == NULL)
794 return;
795
796 vd->vdev_top = tvd;
797
d6320ddb 798 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
799 vdev_top_update(tvd, vd->vdev_child[c]);
800}
801
802/*
803 * Add a mirror/replacing vdev above an existing vdev.
804 */
805vdev_t *
806vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
807{
808 spa_t *spa = cvd->vdev_spa;
809 vdev_t *pvd = cvd->vdev_parent;
810 vdev_t *mvd;
811
b128c09f 812 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
813
814 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
815
816 mvd->vdev_asize = cvd->vdev_asize;
9babb374 817 mvd->vdev_min_asize = cvd->vdev_min_asize;
1bd201e7 818 mvd->vdev_max_asize = cvd->vdev_max_asize;
34dc7c2f
BB
819 mvd->vdev_ashift = cvd->vdev_ashift;
820 mvd->vdev_state = cvd->vdev_state;
428870ff 821 mvd->vdev_crtxg = cvd->vdev_crtxg;
34dc7c2f
BB
822
823 vdev_remove_child(pvd, cvd);
824 vdev_add_child(pvd, mvd);
825 cvd->vdev_id = mvd->vdev_children;
826 vdev_add_child(mvd, cvd);
827 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
828
829 if (mvd == mvd->vdev_top)
830 vdev_top_transfer(cvd, mvd);
831
832 return (mvd);
833}
834
835/*
836 * Remove a 1-way mirror/replacing vdev from the tree.
837 */
838void
839vdev_remove_parent(vdev_t *cvd)
840{
841 vdev_t *mvd = cvd->vdev_parent;
842 vdev_t *pvd = mvd->vdev_parent;
843
b128c09f 844 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
34dc7c2f
BB
845
846 ASSERT(mvd->vdev_children == 1);
847 ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
848 mvd->vdev_ops == &vdev_replacing_ops ||
849 mvd->vdev_ops == &vdev_spare_ops);
850 cvd->vdev_ashift = mvd->vdev_ashift;
851
852 vdev_remove_child(mvd, cvd);
853 vdev_remove_child(pvd, mvd);
fb5f0bc8 854
34dc7c2f 855 /*
b128c09f
BB
856 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
857 * Otherwise, we could have detached an offline device, and when we
858 * go to import the pool we'll think we have two top-level vdevs,
859 * instead of a different version of the same top-level vdev.
34dc7c2f 860 */
fb5f0bc8
BB
861 if (mvd->vdev_top == mvd) {
862 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
428870ff 863 cvd->vdev_orig_guid = cvd->vdev_guid;
fb5f0bc8
BB
864 cvd->vdev_guid += guid_delta;
865 cvd->vdev_guid_sum += guid_delta;
61e99a73
AB
866
867 /*
868 * If pool not set for autoexpand, we need to also preserve
869 * mvd's asize to prevent automatic expansion of cvd.
870 * Otherwise if we are adjusting the mirror by attaching and
871 * detaching children of non-uniform sizes, the mirror could
872 * autoexpand, unexpectedly requiring larger devices to
873 * re-establish the mirror.
874 */
875 if (!cvd->vdev_spa->spa_autoexpand)
876 cvd->vdev_asize = mvd->vdev_asize;
fb5f0bc8 877 }
b128c09f
BB
878 cvd->vdev_id = mvd->vdev_id;
879 vdev_add_child(pvd, cvd);
34dc7c2f
BB
880 vdev_top_update(cvd->vdev_top, cvd->vdev_top);
881
882 if (cvd == cvd->vdev_top)
883 vdev_top_transfer(mvd, cvd);
884
885 ASSERT(mvd->vdev_children == 0);
886 vdev_free(mvd);
887}
888
889int
890vdev_metaslab_init(vdev_t *vd, uint64_t txg)
891{
892 spa_t *spa = vd->vdev_spa;
893 objset_t *mos = spa->spa_meta_objset;
34dc7c2f
BB
894 uint64_t m;
895 uint64_t oldc = vd->vdev_ms_count;
896 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
897 metaslab_t **mspp;
898 int error;
899
428870ff
BB
900 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
901
902 /*
903 * This vdev is not being allocated from yet or is a hole.
904 */
905 if (vd->vdev_ms_shift == 0)
34dc7c2f
BB
906 return (0);
907
428870ff
BB
908 ASSERT(!vd->vdev_ishole);
909
9babb374
BB
910 /*
911 * Compute the raidz-deflation ratio. Note, we hard-code
f1512ee6
MA
912 * in 128k (1 << 17) because it is the "typical" blocksize.
913 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
914 * otherwise it would inconsistently account for existing bp's.
9babb374
BB
915 */
916 vd->vdev_deflate_ratio = (1 << 17) /
917 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
918
34dc7c2f
BB
919 ASSERT(oldc <= newc);
920
bffb68a2 921 mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
34dc7c2f
BB
922
923 if (oldc != 0) {
924 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
bffb68a2 925 vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
34dc7c2f
BB
926 }
927
928 vd->vdev_ms = mspp;
929 vd->vdev_ms_count = newc;
930
931 for (m = oldc; m < newc; m++) {
93cf2076
GW
932 uint64_t object = 0;
933
34dc7c2f 934 if (txg == 0) {
34dc7c2f 935 error = dmu_read(mos, vd->vdev_ms_array,
9babb374
BB
936 m * sizeof (uint64_t), sizeof (uint64_t), &object,
937 DMU_READ_PREFETCH);
34dc7c2f
BB
938 if (error)
939 return (error);
34dc7c2f 940 }
fb42a493
PS
941
942 error = metaslab_init(vd->vdev_mg, m, object, txg,
943 &(vd->vdev_ms[m]));
944 if (error)
945 return (error);
34dc7c2f
BB
946 }
947
428870ff
BB
948 if (txg == 0)
949 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
950
951 /*
952 * If the vdev is being removed we don't activate
953 * the metaslabs since we want to ensure that no new
954 * allocations are performed on this device.
955 */
956 if (oldc == 0 && !vd->vdev_removing)
957 metaslab_group_activate(vd->vdev_mg);
958
959 if (txg == 0)
960 spa_config_exit(spa, SCL_ALLOC, FTAG);
961
34dc7c2f
BB
962 return (0);
963}
964
965void
966vdev_metaslab_fini(vdev_t *vd)
967{
968 uint64_t m;
969 uint64_t count = vd->vdev_ms_count;
970
971 if (vd->vdev_ms != NULL) {
428870ff 972 metaslab_group_passivate(vd->vdev_mg);
93cf2076
GW
973 for (m = 0; m < count; m++) {
974 metaslab_t *msp = vd->vdev_ms[m];
975
976 if (msp != NULL)
977 metaslab_fini(msp);
978 }
bffb68a2 979 vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
34dc7c2f
BB
980 vd->vdev_ms = NULL;
981 }
920dd524
ED
982
983 ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
34dc7c2f
BB
984}
985
b128c09f
BB
986typedef struct vdev_probe_stats {
987 boolean_t vps_readable;
988 boolean_t vps_writeable;
989 int vps_flags;
b128c09f
BB
990} vdev_probe_stats_t;
991
992static void
993vdev_probe_done(zio_t *zio)
34dc7c2f 994{
fb5f0bc8 995 spa_t *spa = zio->io_spa;
d164b209 996 vdev_t *vd = zio->io_vd;
b128c09f 997 vdev_probe_stats_t *vps = zio->io_private;
d164b209
BB
998
999 ASSERT(vd->vdev_probe_zio != NULL);
b128c09f
BB
1000
1001 if (zio->io_type == ZIO_TYPE_READ) {
b128c09f
BB
1002 if (zio->io_error == 0)
1003 vps->vps_readable = 1;
fb5f0bc8 1004 if (zio->io_error == 0 && spa_writeable(spa)) {
d164b209 1005 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
a6255b7f 1006 zio->io_offset, zio->io_size, zio->io_abd,
b128c09f
BB
1007 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1008 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
1009 } else {
a6255b7f 1010 abd_free(zio->io_abd);
b128c09f
BB
1011 }
1012 } else if (zio->io_type == ZIO_TYPE_WRITE) {
b128c09f
BB
1013 if (zio->io_error == 0)
1014 vps->vps_writeable = 1;
a6255b7f 1015 abd_free(zio->io_abd);
b128c09f 1016 } else if (zio->io_type == ZIO_TYPE_NULL) {
d164b209 1017 zio_t *pio;
3dfb57a3 1018 zio_link_t *zl;
b128c09f
BB
1019
1020 vd->vdev_cant_read |= !vps->vps_readable;
1021 vd->vdev_cant_write |= !vps->vps_writeable;
1022
1023 if (vdev_readable(vd) &&
fb5f0bc8 1024 (vdev_writeable(vd) || !spa_writeable(spa))) {
b128c09f
BB
1025 zio->io_error = 0;
1026 } else {
1027 ASSERT(zio->io_error != 0);
1028 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
fb5f0bc8 1029 spa, vd, NULL, 0, 0);
2e528b49 1030 zio->io_error = SET_ERROR(ENXIO);
b128c09f 1031 }
d164b209
BB
1032
1033 mutex_enter(&vd->vdev_probe_lock);
1034 ASSERT(vd->vdev_probe_zio == zio);
1035 vd->vdev_probe_zio = NULL;
1036 mutex_exit(&vd->vdev_probe_lock);
1037
3dfb57a3
DB
1038 zl = NULL;
1039 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
d164b209 1040 if (!vdev_accessible(vd, pio))
2e528b49 1041 pio->io_error = SET_ERROR(ENXIO);
d164b209 1042
b128c09f
BB
1043 kmem_free(vps, sizeof (*vps));
1044 }
1045}
34dc7c2f 1046
b128c09f 1047/*
d3cc8b15
WA
1048 * Determine whether this device is accessible.
1049 *
1050 * Read and write to several known locations: the pad regions of each
1051 * vdev label but the first, which we leave alone in case it contains
1052 * a VTOC.
b128c09f
BB
1053 */
1054zio_t *
d164b209 1055vdev_probe(vdev_t *vd, zio_t *zio)
b128c09f
BB
1056{
1057 spa_t *spa = vd->vdev_spa;
d164b209
BB
1058 vdev_probe_stats_t *vps = NULL;
1059 zio_t *pio;
d6320ddb 1060 int l;
d164b209
BB
1061
1062 ASSERT(vd->vdev_ops->vdev_op_leaf);
34dc7c2f 1063
d164b209
BB
1064 /*
1065 * Don't probe the probe.
1066 */
1067 if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
1068 return (NULL);
b128c09f 1069
d164b209
BB
1070 /*
1071 * To prevent 'probe storms' when a device fails, we create
1072 * just one probe i/o at a time. All zios that want to probe
1073 * this vdev will become parents of the probe io.
1074 */
1075 mutex_enter(&vd->vdev_probe_lock);
b128c09f 1076
d164b209 1077 if ((pio = vd->vdev_probe_zio) == NULL) {
79c76d5b 1078 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
d164b209
BB
1079
1080 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
1081 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
9babb374 1082 ZIO_FLAG_TRYHARD;
d164b209
BB
1083
1084 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1085 /*
1086 * vdev_cant_read and vdev_cant_write can only
1087 * transition from TRUE to FALSE when we have the
1088 * SCL_ZIO lock as writer; otherwise they can only
1089 * transition from FALSE to TRUE. This ensures that
1090 * any zio looking at these values can assume that
1091 * failures persist for the life of the I/O. That's
1092 * important because when a device has intermittent
1093 * connectivity problems, we want to ensure that
1094 * they're ascribed to the device (ENXIO) and not
1095 * the zio (EIO).
1096 *
1097 * Since we hold SCL_ZIO as writer here, clear both
1098 * values so the probe can reevaluate from first
1099 * principles.
1100 */
1101 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
1102 vd->vdev_cant_read = B_FALSE;
1103 vd->vdev_cant_write = B_FALSE;
1104 }
1105
1106 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1107 vdev_probe_done, vps,
1108 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
1109
428870ff
BB
1110 /*
1111 * We can't change the vdev state in this context, so we
1112 * kick off an async task to do it on our behalf.
1113 */
d164b209
BB
1114 if (zio != NULL) {
1115 vd->vdev_probe_wanted = B_TRUE;
1116 spa_async_request(spa, SPA_ASYNC_PROBE);
1117 }
b128c09f
BB
1118 }
1119
d164b209
BB
1120 if (zio != NULL)
1121 zio_add_child(zio, pio);
b128c09f 1122
d164b209 1123 mutex_exit(&vd->vdev_probe_lock);
b128c09f 1124
d164b209
BB
1125 if (vps == NULL) {
1126 ASSERT(zio != NULL);
1127 return (NULL);
1128 }
b128c09f 1129
d6320ddb 1130 for (l = 1; l < VDEV_LABELS; l++) {
d164b209 1131 zio_nowait(zio_read_phys(pio, vd,
b128c09f 1132 vdev_label_offset(vd->vdev_psize, l,
a6255b7f
DQ
1133 offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE,
1134 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
b128c09f
BB
1135 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1136 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1137 }
1138
d164b209
BB
1139 if (zio == NULL)
1140 return (pio);
1141
1142 zio_nowait(pio);
1143 return (NULL);
34dc7c2f
BB
1144}
1145
45d1cae3
BB
1146static void
1147vdev_open_child(void *arg)
1148{
1149 vdev_t *vd = arg;
1150
1151 vd->vdev_open_thread = curthread;
1152 vd->vdev_open_error = vdev_open(vd);
1153 vd->vdev_open_thread = NULL;
1154}
1155
6c285672 1156static boolean_t
428870ff
BB
1157vdev_uses_zvols(vdev_t *vd)
1158{
d6320ddb
BB
1159 int c;
1160
6c285672
JL
1161#ifdef _KERNEL
1162 if (zvol_is_zvol(vd->vdev_path))
428870ff 1163 return (B_TRUE);
6c285672
JL
1164#endif
1165
d6320ddb 1166 for (c = 0; c < vd->vdev_children; c++)
428870ff
BB
1167 if (vdev_uses_zvols(vd->vdev_child[c]))
1168 return (B_TRUE);
6c285672 1169
428870ff
BB
1170 return (B_FALSE);
1171}
1172
45d1cae3
BB
1173void
1174vdev_open_children(vdev_t *vd)
1175{
1176 taskq_t *tq;
1177 int children = vd->vdev_children;
d6320ddb 1178 int c;
45d1cae3 1179
428870ff
BB
1180 /*
1181 * in order to handle pools on top of zvols, do the opens
1182 * in a single thread so that the same thread holds the
1183 * spa_namespace_lock
1184 */
1185 if (vdev_uses_zvols(vd)) {
13d9a004 1186retry_sync:
4770aa06 1187 for (c = 0; c < children; c++)
428870ff
BB
1188 vd->vdev_child[c]->vdev_open_error =
1189 vdev_open(vd->vdev_child[c]);
4770aa06
HJ
1190 } else {
1191 tq = taskq_create("vdev_open", children, minclsyspri,
1192 children, children, TASKQ_PREPOPULATE);
13d9a004
BB
1193 if (tq == NULL)
1194 goto retry_sync;
45d1cae3 1195
4770aa06
HJ
1196 for (c = 0; c < children; c++)
1197 VERIFY(taskq_dispatch(tq, vdev_open_child,
48d3eb40 1198 vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID);
45d1cae3 1199
4770aa06
HJ
1200 taskq_destroy(tq);
1201 }
1202
1203 vd->vdev_nonrot = B_TRUE;
fb40095f
RY
1204
1205 for (c = 0; c < children; c++)
1206 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
45d1cae3
BB
1207}
1208
34dc7c2f
BB
1209/*
1210 * Prepare a virtual device for access.
1211 */
1212int
1213vdev_open(vdev_t *vd)
1214{
fb5f0bc8 1215 spa_t *spa = vd->vdev_spa;
34dc7c2f 1216 int error;
34dc7c2f 1217 uint64_t osize = 0;
1bd201e7
CS
1218 uint64_t max_osize = 0;
1219 uint64_t asize, max_asize, psize;
34dc7c2f 1220 uint64_t ashift = 0;
d6320ddb 1221 int c;
34dc7c2f 1222
45d1cae3
BB
1223 ASSERT(vd->vdev_open_thread == curthread ||
1224 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
34dc7c2f
BB
1225 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1226 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1227 vd->vdev_state == VDEV_STATE_OFFLINE);
1228
34dc7c2f 1229 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
9babb374
BB
1230 vd->vdev_cant_read = B_FALSE;
1231 vd->vdev_cant_write = B_FALSE;
1232 vd->vdev_min_asize = vdev_get_min_asize(vd);
34dc7c2f 1233
428870ff
BB
1234 /*
1235 * If this vdev is not removed, check its fault status. If it's
1236 * faulted, bail out of the open.
1237 */
34dc7c2f
BB
1238 if (!vd->vdev_removed && vd->vdev_faulted) {
1239 ASSERT(vd->vdev_children == 0);
428870ff
BB
1240 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1241 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
34dc7c2f 1242 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
428870ff 1243 vd->vdev_label_aux);
2e528b49 1244 return (SET_ERROR(ENXIO));
34dc7c2f
BB
1245 } else if (vd->vdev_offline) {
1246 ASSERT(vd->vdev_children == 0);
1247 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
2e528b49 1248 return (SET_ERROR(ENXIO));
34dc7c2f
BB
1249 }
1250
1bd201e7 1251 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
34dc7c2f 1252
428870ff
BB
1253 /*
1254 * Reset the vdev_reopening flag so that we actually close
1255 * the vdev on error.
1256 */
1257 vd->vdev_reopening = B_FALSE;
34dc7c2f 1258 if (zio_injection_enabled && error == 0)
9babb374 1259 error = zio_handle_device_injection(vd, NULL, ENXIO);
34dc7c2f
BB
1260
1261 if (error) {
1262 if (vd->vdev_removed &&
1263 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1264 vd->vdev_removed = B_FALSE;
1265
1266 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1267 vd->vdev_stat.vs_aux);
1268 return (error);
1269 }
1270
1271 vd->vdev_removed = B_FALSE;
1272
428870ff
BB
1273 /*
1274 * Recheck the faulted flag now that we have confirmed that
1275 * the vdev is accessible. If we're faulted, bail.
1276 */
1277 if (vd->vdev_faulted) {
1278 ASSERT(vd->vdev_children == 0);
1279 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1280 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1281 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1282 vd->vdev_label_aux);
2e528b49 1283 return (SET_ERROR(ENXIO));
428870ff
BB
1284 }
1285
34dc7c2f
BB
1286 if (vd->vdev_degraded) {
1287 ASSERT(vd->vdev_children == 0);
1288 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1289 VDEV_AUX_ERR_EXCEEDED);
1290 } else {
428870ff 1291 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
34dc7c2f
BB
1292 }
1293
428870ff
BB
1294 /*
1295 * For hole or missing vdevs we just return success.
1296 */
1297 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1298 return (0);
1299
d6320ddb 1300 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f
BB
1301 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1302 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1303 VDEV_AUX_NONE);
1304 break;
1305 }
9babb374 1306 }
34dc7c2f
BB
1307
1308 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1bd201e7 1309 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
34dc7c2f
BB
1310
1311 if (vd->vdev_children == 0) {
1312 if (osize < SPA_MINDEVSIZE) {
1313 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1314 VDEV_AUX_TOO_SMALL);
2e528b49 1315 return (SET_ERROR(EOVERFLOW));
34dc7c2f
BB
1316 }
1317 psize = osize;
1318 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1bd201e7
CS
1319 max_asize = max_osize - (VDEV_LABEL_START_SIZE +
1320 VDEV_LABEL_END_SIZE);
34dc7c2f
BB
1321 } else {
1322 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1323 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1324 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1325 VDEV_AUX_TOO_SMALL);
2e528b49 1326 return (SET_ERROR(EOVERFLOW));
34dc7c2f
BB
1327 }
1328 psize = 0;
1329 asize = osize;
1bd201e7 1330 max_asize = max_osize;
34dc7c2f
BB
1331 }
1332
1333 vd->vdev_psize = psize;
1334
9babb374 1335 /*
2e215fec 1336 * Make sure the allocatable size hasn't shrunk too much.
9babb374
BB
1337 */
1338 if (asize < vd->vdev_min_asize) {
1339 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1340 VDEV_AUX_BAD_LABEL);
2e528b49 1341 return (SET_ERROR(EINVAL));
9babb374
BB
1342 }
1343
34dc7c2f
BB
1344 if (vd->vdev_asize == 0) {
1345 /*
1346 * This is the first-ever open, so use the computed values.
b28e57cb 1347 * For compatibility, a different ashift can be requested.
34dc7c2f
BB
1348 */
1349 vd->vdev_asize = asize;
1bd201e7 1350 vd->vdev_max_asize = max_asize;
ff61d1a4 1351 if (vd->vdev_ashift == 0) {
1352 vd->vdev_ashift = ashift; /* use detected value */
1353 }
1354 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
1355 vd->vdev_ashift > ASHIFT_MAX)) {
1356 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1357 VDEV_AUX_BAD_ASHIFT);
1358 return (SET_ERROR(EDOM));
1359 }
34dc7c2f
BB
1360 } else {
1361 /*
32a9872b
GW
1362 * Detect if the alignment requirement has increased.
1363 * We don't want to make the pool unavailable, just
1364 * post an event instead.
34dc7c2f 1365 */
32a9872b
GW
1366 if (ashift > vd->vdev_top->vdev_ashift &&
1367 vd->vdev_ops->vdev_op_leaf) {
1368 zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
1369 spa, vd, NULL, 0, 0);
34dc7c2f 1370 }
32a9872b 1371
1bd201e7 1372 vd->vdev_max_asize = max_asize;
9babb374 1373 }
34dc7c2f 1374
9babb374 1375 /*
2e215fec
SH
1376 * If all children are healthy we update asize if either:
1377 * The asize has increased, due to a device expansion caused by dynamic
1378 * LUN growth or vdev replacement, and automatic expansion is enabled;
1379 * making the additional space available.
1380 *
1381 * The asize has decreased, due to a device shrink usually caused by a
1382 * vdev replace with a smaller device. This ensures that calculations
1383 * based of max_asize and asize e.g. esize are always valid. It's safe
1384 * to do this as we've already validated that asize is greater than
1385 * vdev_min_asize.
9babb374 1386 */
2e215fec
SH
1387 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
1388 ((asize > vd->vdev_asize &&
1389 (vd->vdev_expanding || spa->spa_autoexpand)) ||
1390 (asize < vd->vdev_asize)))
9babb374 1391 vd->vdev_asize = asize;
34dc7c2f 1392
9babb374 1393 vdev_set_min_asize(vd);
34dc7c2f
BB
1394
1395 /*
1396 * Ensure we can issue some IO before declaring the
1397 * vdev open for business.
1398 */
b128c09f
BB
1399 if (vd->vdev_ops->vdev_op_leaf &&
1400 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
428870ff
BB
1401 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1402 VDEV_AUX_ERR_EXCEEDED);
34dc7c2f
BB
1403 return (error);
1404 }
1405
c3520e7f
MA
1406 /*
1407 * Track the min and max ashift values for normal data devices.
1408 */
1409 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1410 !vd->vdev_islog && vd->vdev_aux == NULL) {
1411 if (vd->vdev_ashift > spa->spa_max_ashift)
1412 spa->spa_max_ashift = vd->vdev_ashift;
1413 if (vd->vdev_ashift < spa->spa_min_ashift)
1414 spa->spa_min_ashift = vd->vdev_ashift;
1415 }
1416
34dc7c2f 1417 /*
b128c09f 1418 * If a leaf vdev has a DTL, and seems healthy, then kick off a
fb5f0bc8
BB
1419 * resilver. But don't do this if we are doing a reopen for a scrub,
1420 * since this would just restart the scrub we are already doing.
34dc7c2f 1421 */
fb5f0bc8
BB
1422 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1423 vdev_resilver_needed(vd, NULL, NULL))
1424 spa_async_request(spa, SPA_ASYNC_RESILVER);
34dc7c2f
BB
1425
1426 return (0);
1427}
1428
1429/*
1430 * Called once the vdevs are all opened, this routine validates the label
1431 * contents. This needs to be done before vdev_load() so that we don't
1432 * inadvertently do repair I/Os to the wrong device.
1433 *
c7f2d69d
GW
1434 * If 'strict' is false ignore the spa guid check. This is necessary because
1435 * if the machine crashed during a re-guid the new guid might have been written
1436 * to all of the vdev labels, but not the cached config. The strict check
1437 * will be performed when the pool is opened again using the mos config.
1438 *
34dc7c2f
BB
1439 * This function will only return failure if one of the vdevs indicates that it
1440 * has since been destroyed or exported. This is only possible if
1441 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
1442 * will be updated but the function will return 0.
1443 */
1444int
c7f2d69d 1445vdev_validate(vdev_t *vd, boolean_t strict)
34dc7c2f
BB
1446{
1447 spa_t *spa = vd->vdev_spa;
34dc7c2f 1448 nvlist_t *label;
428870ff 1449 uint64_t guid = 0, top_guid;
34dc7c2f 1450 uint64_t state;
d6320ddb 1451 int c;
34dc7c2f 1452
d6320ddb 1453 for (c = 0; c < vd->vdev_children; c++)
c7f2d69d 1454 if (vdev_validate(vd->vdev_child[c], strict) != 0)
2e528b49 1455 return (SET_ERROR(EBADF));
34dc7c2f
BB
1456
1457 /*
1458 * If the device has already failed, or was marked offline, don't do
1459 * any further validation. Otherwise, label I/O will fail and we will
1460 * overwrite the previous state.
1461 */
b128c09f 1462 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
428870ff
BB
1463 uint64_t aux_guid = 0;
1464 nvlist_t *nvl;
295304be
GW
1465 uint64_t txg = spa_last_synced_txg(spa) != 0 ?
1466 spa_last_synced_txg(spa) : -1ULL;
34dc7c2f 1467
3bc7e0fb 1468 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
e35c5a82 1469 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
34dc7c2f
BB
1470 VDEV_AUX_BAD_LABEL);
1471 return (0);
1472 }
1473
428870ff
BB
1474 /*
1475 * Determine if this vdev has been split off into another
1476 * pool. If so, then refuse to open it.
1477 */
1478 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
1479 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
1480 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1481 VDEV_AUX_SPLIT_POOL);
1482 nvlist_free(label);
1483 return (0);
1484 }
1485
c7f2d69d
GW
1486 if (strict && (nvlist_lookup_uint64(label,
1487 ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
1488 guid != spa_guid(spa))) {
34dc7c2f
BB
1489 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1490 VDEV_AUX_CORRUPT_DATA);
1491 nvlist_free(label);
1492 return (0);
1493 }
1494
428870ff
BB
1495 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
1496 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
1497 &aux_guid) != 0)
1498 aux_guid = 0;
1499
b128c09f
BB
1500 /*
1501 * If this vdev just became a top-level vdev because its
1502 * sibling was detached, it will have adopted the parent's
1503 * vdev guid -- but the label may or may not be on disk yet.
1504 * Fortunately, either version of the label will have the
1505 * same top guid, so if we're a top-level vdev, we can
1506 * safely compare to that instead.
428870ff
BB
1507 *
1508 * If we split this vdev off instead, then we also check the
1509 * original pool's guid. We don't want to consider the vdev
1510 * corrupt if it is partway through a split operation.
b128c09f 1511 */
34dc7c2f 1512 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
b128c09f
BB
1513 &guid) != 0 ||
1514 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
1515 &top_guid) != 0 ||
428870ff 1516 ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) &&
b128c09f 1517 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
34dc7c2f
BB
1518 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1519 VDEV_AUX_CORRUPT_DATA);
1520 nvlist_free(label);
1521 return (0);
1522 }
1523
1524 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1525 &state) != 0) {
1526 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1527 VDEV_AUX_CORRUPT_DATA);
1528 nvlist_free(label);
1529 return (0);
1530 }
1531
1532 nvlist_free(label);
1533
45d1cae3 1534 /*
572e2857 1535 * If this is a verbatim import, no need to check the
45d1cae3
BB
1536 * state of the pool.
1537 */
572e2857 1538 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
428870ff 1539 spa_load_state(spa) == SPA_LOAD_OPEN &&
34dc7c2f 1540 state != POOL_STATE_ACTIVE)
2e528b49 1541 return (SET_ERROR(EBADF));
34dc7c2f 1542
b128c09f
BB
1543 /*
1544 * If we were able to open and validate a vdev that was
1545 * previously marked permanently unavailable, clear that state
1546 * now.
1547 */
1548 if (vd->vdev_not_present)
1549 vd->vdev_not_present = 0;
1550 }
34dc7c2f
BB
1551
1552 return (0);
1553}
1554
1555/*
1556 * Close a virtual device.
1557 */
1558void
1559vdev_close(vdev_t *vd)
1560{
428870ff 1561 vdev_t *pvd = vd->vdev_parent;
1fde1e37 1562 ASSERTV(spa_t *spa = vd->vdev_spa);
fb5f0bc8
BB
1563
1564 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1565
428870ff
BB
1566 /*
1567 * If our parent is reopening, then we are as well, unless we are
1568 * going offline.
1569 */
1570 if (pvd != NULL && pvd->vdev_reopening)
1571 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
1572
34dc7c2f
BB
1573 vd->vdev_ops->vdev_op_close(vd);
1574
1575 vdev_cache_purge(vd);
1576
1577 /*
9babb374 1578 * We record the previous state before we close it, so that if we are
34dc7c2f
BB
1579 * doing a reopen(), we don't generate FMA ereports if we notice that
1580 * it's still faulted.
1581 */
1582 vd->vdev_prevstate = vd->vdev_state;
1583
1584 if (vd->vdev_offline)
1585 vd->vdev_state = VDEV_STATE_OFFLINE;
1586 else
1587 vd->vdev_state = VDEV_STATE_CLOSED;
1588 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1589}
1590
428870ff
BB
1591void
1592vdev_hold(vdev_t *vd)
1593{
1594 spa_t *spa = vd->vdev_spa;
d6320ddb 1595 int c;
428870ff
BB
1596
1597 ASSERT(spa_is_root(spa));
1598 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1599 return;
1600
d6320ddb 1601 for (c = 0; c < vd->vdev_children; c++)
428870ff
BB
1602 vdev_hold(vd->vdev_child[c]);
1603
1604 if (vd->vdev_ops->vdev_op_leaf)
1605 vd->vdev_ops->vdev_op_hold(vd);
1606}
1607
1608void
1609vdev_rele(vdev_t *vd)
1610{
d6320ddb 1611 int c;
428870ff 1612
d6320ddb
BB
1613 ASSERT(spa_is_root(vd->vdev_spa));
1614 for (c = 0; c < vd->vdev_children; c++)
428870ff
BB
1615 vdev_rele(vd->vdev_child[c]);
1616
1617 if (vd->vdev_ops->vdev_op_leaf)
1618 vd->vdev_ops->vdev_op_rele(vd);
1619}
1620
1621/*
1622 * Reopen all interior vdevs and any unopened leaves. We don't actually
1623 * reopen leaf vdevs which had previously been opened as they might deadlock
1624 * on the spa_config_lock. Instead we only obtain the leaf's physical size.
1625 * If the leaf has never been opened then open it, as usual.
1626 */
34dc7c2f
BB
1627void
1628vdev_reopen(vdev_t *vd)
1629{
1630 spa_t *spa = vd->vdev_spa;
1631
b128c09f 1632 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
34dc7c2f 1633
428870ff
BB
1634 /* set the reopening flag unless we're taking the vdev offline */
1635 vd->vdev_reopening = !vd->vdev_offline;
34dc7c2f
BB
1636 vdev_close(vd);
1637 (void) vdev_open(vd);
1638
1639 /*
1640 * Call vdev_validate() here to make sure we have the same device.
1641 * Otherwise, a device with an invalid label could be successfully
1642 * opened in response to vdev_reopen().
1643 */
b128c09f
BB
1644 if (vd->vdev_aux) {
1645 (void) vdev_validate_aux(vd);
1646 if (vdev_readable(vd) && vdev_writeable(vd) &&
9babb374
BB
1647 vd->vdev_aux == &spa->spa_l2cache &&
1648 !l2arc_vdev_present(vd))
1649 l2arc_add_vdev(spa, vd);
b128c09f 1650 } else {
295304be 1651 (void) vdev_validate(vd, B_TRUE);
b128c09f 1652 }
34dc7c2f
BB
1653
1654 /*
1655 * Reassess parent vdev's health.
1656 */
1657 vdev_propagate_state(vd);
1658}
1659
1660int
1661vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1662{
1663 int error;
1664
1665 /*
1666 * Normally, partial opens (e.g. of a mirror) are allowed.
1667 * For a create, however, we want to fail the request if
1668 * there are any components we can't open.
1669 */
1670 error = vdev_open(vd);
1671
1672 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1673 vdev_close(vd);
1674 return (error ? error : ENXIO);
1675 }
1676
1677 /*
93cf2076 1678 * Recursively load DTLs and initialize all labels.
34dc7c2f 1679 */
93cf2076
GW
1680 if ((error = vdev_dtl_load(vd)) != 0 ||
1681 (error = vdev_label_init(vd, txg, isreplacing ?
34dc7c2f
BB
1682 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1683 vdev_close(vd);
1684 return (error);
1685 }
1686
1687 return (0);
1688}
1689
34dc7c2f 1690void
9babb374 1691vdev_metaslab_set_size(vdev_t *vd)
34dc7c2f
BB
1692{
1693 /*
b8bcca18 1694 * Aim for roughly metaslabs_per_vdev (default 200) metaslabs per vdev.
34dc7c2f 1695 */
b8bcca18 1696 vd->vdev_ms_shift = highbit64(vd->vdev_asize / metaslabs_per_vdev);
34dc7c2f 1697 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
34dc7c2f
BB
1698}
1699
1700void
1701vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1702{
1703 ASSERT(vd == vd->vdev_top);
428870ff 1704 ASSERT(!vd->vdev_ishole);
34dc7c2f 1705 ASSERT(ISP2(flags));
572e2857 1706 ASSERT(spa_writeable(vd->vdev_spa));
34dc7c2f
BB
1707
1708 if (flags & VDD_METASLAB)
1709 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1710
1711 if (flags & VDD_DTL)
1712 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1713
1714 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1715}
1716
93cf2076
GW
1717void
1718vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
1719{
1720 int c;
1721
1722 for (c = 0; c < vd->vdev_children; c++)
1723 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
1724
1725 if (vd->vdev_ops->vdev_op_leaf)
1726 vdev_dirty(vd->vdev_top, flags, vd, txg);
1727}
1728
fb5f0bc8
BB
1729/*
1730 * DTLs.
1731 *
1732 * A vdev's DTL (dirty time log) is the set of transaction groups for which
428870ff 1733 * the vdev has less than perfect replication. There are four kinds of DTL:
fb5f0bc8
BB
1734 *
1735 * DTL_MISSING: txgs for which the vdev has no valid copies of the data
1736 *
1737 * DTL_PARTIAL: txgs for which data is available, but not fully replicated
1738 *
1739 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
1740 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
1741 * txgs that was scrubbed.
1742 *
1743 * DTL_OUTAGE: txgs which cannot currently be read, whether due to
1744 * persistent errors or just some device being offline.
1745 * Unlike the other three, the DTL_OUTAGE map is not generally
1746 * maintained; it's only computed when needed, typically to
1747 * determine whether a device can be detached.
1748 *
1749 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
1750 * either has the data or it doesn't.
1751 *
1752 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
1753 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
1754 * if any child is less than fully replicated, then so is its parent.
1755 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
1756 * comprising only those txgs which appear in 'maxfaults' or more children;
1757 * those are the txgs we don't have enough replication to read. For example,
1758 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
1759 * thus, its DTL_MISSING consists of the set of txgs that appear in more than
1760 * two child DTL_MISSING maps.
1761 *
1762 * It should be clear from the above that to compute the DTLs and outage maps
1763 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
1764 * Therefore, that is all we keep on disk. When loading the pool, or after
1765 * a configuration change, we generate all other DTLs from first principles.
1766 */
34dc7c2f 1767void
fb5f0bc8 1768vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
34dc7c2f 1769{
93cf2076 1770 range_tree_t *rt = vd->vdev_dtl[t];
fb5f0bc8
BB
1771
1772 ASSERT(t < DTL_TYPES);
1773 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
572e2857 1774 ASSERT(spa_writeable(vd->vdev_spa));
fb5f0bc8 1775
93cf2076
GW
1776 mutex_enter(rt->rt_lock);
1777 if (!range_tree_contains(rt, txg, size))
1778 range_tree_add(rt, txg, size);
1779 mutex_exit(rt->rt_lock);
34dc7c2f
BB
1780}
1781
fb5f0bc8
BB
1782boolean_t
1783vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
34dc7c2f 1784{
93cf2076 1785 range_tree_t *rt = vd->vdev_dtl[t];
fb5f0bc8 1786 boolean_t dirty = B_FALSE;
34dc7c2f 1787
fb5f0bc8
BB
1788 ASSERT(t < DTL_TYPES);
1789 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
34dc7c2f 1790
93cf2076
GW
1791 mutex_enter(rt->rt_lock);
1792 if (range_tree_space(rt) != 0)
1793 dirty = range_tree_contains(rt, txg, size);
1794 mutex_exit(rt->rt_lock);
34dc7c2f
BB
1795
1796 return (dirty);
1797}
1798
fb5f0bc8
BB
1799boolean_t
1800vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
1801{
93cf2076 1802 range_tree_t *rt = vd->vdev_dtl[t];
fb5f0bc8
BB
1803 boolean_t empty;
1804
93cf2076
GW
1805 mutex_enter(rt->rt_lock);
1806 empty = (range_tree_space(rt) == 0);
1807 mutex_exit(rt->rt_lock);
fb5f0bc8
BB
1808
1809 return (empty);
1810}
1811
5d1f7fb6
GW
1812/*
1813 * Returns the lowest txg in the DTL range.
1814 */
1815static uint64_t
1816vdev_dtl_min(vdev_t *vd)
1817{
93cf2076 1818 range_seg_t *rs;
5d1f7fb6
GW
1819
1820 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
93cf2076 1821 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
5d1f7fb6
GW
1822 ASSERT0(vd->vdev_children);
1823
93cf2076
GW
1824 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root);
1825 return (rs->rs_start - 1);
5d1f7fb6
GW
1826}
1827
1828/*
1829 * Returns the highest txg in the DTL.
1830 */
1831static uint64_t
1832vdev_dtl_max(vdev_t *vd)
1833{
93cf2076 1834 range_seg_t *rs;
5d1f7fb6
GW
1835
1836 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
93cf2076 1837 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
5d1f7fb6
GW
1838 ASSERT0(vd->vdev_children);
1839
93cf2076
GW
1840 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root);
1841 return (rs->rs_end);
5d1f7fb6
GW
1842}
1843
1844/*
1845 * Determine if a resilvering vdev should remove any DTL entries from
1846 * its range. If the vdev was resilvering for the entire duration of the
1847 * scan then it should excise that range from its DTLs. Otherwise, this
1848 * vdev is considered partially resilvered and should leave its DTL
1849 * entries intact. The comment in vdev_dtl_reassess() describes how we
1850 * excise the DTLs.
1851 */
1852static boolean_t
1853vdev_dtl_should_excise(vdev_t *vd)
1854{
1855 spa_t *spa = vd->vdev_spa;
1856 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
1857
1858 ASSERT0(scn->scn_phys.scn_errors);
1859 ASSERT0(vd->vdev_children);
1860
1861 if (vd->vdev_resilver_txg == 0 ||
93cf2076 1862 range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0)
5d1f7fb6
GW
1863 return (B_TRUE);
1864
1865 /*
1866 * When a resilver is initiated the scan will assign the scn_max_txg
1867 * value to the highest txg value that exists in all DTLs. If this
1868 * device's max DTL is not part of this scan (i.e. it is not in
1869 * the range (scn_min_txg, scn_max_txg] then it is not eligible
1870 * for excision.
1871 */
1872 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
1873 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
1874 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
1875 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
1876 return (B_TRUE);
1877 }
1878 return (B_FALSE);
1879}
1880
34dc7c2f
BB
1881/*
1882 * Reassess DTLs after a config change or scrub completion.
1883 */
1884void
1885vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1886{
1887 spa_t *spa = vd->vdev_spa;
fb5f0bc8 1888 avl_tree_t reftree;
d6320ddb 1889 int c, t, minref;
34dc7c2f 1890
fb5f0bc8 1891 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
34dc7c2f 1892
d6320ddb 1893 for (c = 0; c < vd->vdev_children; c++)
fb5f0bc8
BB
1894 vdev_dtl_reassess(vd->vdev_child[c], txg,
1895 scrub_txg, scrub_done);
1896
428870ff 1897 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
fb5f0bc8
BB
1898 return;
1899
1900 if (vd->vdev_ops->vdev_op_leaf) {
428870ff
BB
1901 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
1902
34dc7c2f 1903 mutex_enter(&vd->vdev_dtl_lock);
5d1f7fb6
GW
1904
1905 /*
1906 * If we've completed a scan cleanly then determine
1907 * if this vdev should remove any DTLs. We only want to
1908 * excise regions on vdevs that were available during
1909 * the entire duration of this scan.
1910 */
b128c09f 1911 if (scrub_txg != 0 &&
428870ff 1912 (spa->spa_scrub_started ||
5d1f7fb6
GW
1913 (scn != NULL && scn->scn_phys.scn_errors == 0)) &&
1914 vdev_dtl_should_excise(vd)) {
b128c09f
BB
1915 /*
1916 * We completed a scrub up to scrub_txg. If we
1917 * did it without rebooting, then the scrub dtl
1918 * will be valid, so excise the old region and
1919 * fold in the scrub dtl. Otherwise, leave the
1920 * dtl as-is if there was an error.
fb5f0bc8
BB
1921 *
1922 * There's little trick here: to excise the beginning
1923 * of the DTL_MISSING map, we put it into a reference
1924 * tree and then add a segment with refcnt -1 that
1925 * covers the range [0, scrub_txg). This means
1926 * that each txg in that range has refcnt -1 or 0.
1927 * We then add DTL_SCRUB with a refcnt of 2, so that
1928 * entries in the range [0, scrub_txg) will have a
1929 * positive refcnt -- either 1 or 2. We then convert
1930 * the reference tree into the new DTL_MISSING map.
b128c09f 1931 */
93cf2076
GW
1932 space_reftree_create(&reftree);
1933 space_reftree_add_map(&reftree,
1934 vd->vdev_dtl[DTL_MISSING], 1);
1935 space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
1936 space_reftree_add_map(&reftree,
1937 vd->vdev_dtl[DTL_SCRUB], 2);
1938 space_reftree_generate_map(&reftree,
1939 vd->vdev_dtl[DTL_MISSING], 1);
1940 space_reftree_destroy(&reftree);
34dc7c2f 1941 }
93cf2076
GW
1942 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
1943 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
1944 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
34dc7c2f 1945 if (scrub_done)
93cf2076
GW
1946 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
1947 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
fb5f0bc8 1948 if (!vdev_readable(vd))
93cf2076 1949 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
fb5f0bc8 1950 else
93cf2076
GW
1951 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
1952 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
5d1f7fb6
GW
1953
1954 /*
1955 * If the vdev was resilvering and no longer has any
d14fa5db 1956 * DTLs then reset its resilvering flag and dirty
1957 * the top level so that we persist the change.
5d1f7fb6
GW
1958 */
1959 if (vd->vdev_resilver_txg != 0 &&
93cf2076 1960 range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0 &&
d14fa5db 1961 range_tree_space(vd->vdev_dtl[DTL_OUTAGE]) == 0) {
5d1f7fb6 1962 vd->vdev_resilver_txg = 0;
d14fa5db 1963 vdev_config_dirty(vd->vdev_top);
1964 }
5d1f7fb6 1965
34dc7c2f 1966 mutex_exit(&vd->vdev_dtl_lock);
b128c09f 1967
34dc7c2f
BB
1968 if (txg != 0)
1969 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1970 return;
1971 }
1972
34dc7c2f 1973 mutex_enter(&vd->vdev_dtl_lock);
d6320ddb 1974 for (t = 0; t < DTL_TYPES; t++) {
93cf2076
GW
1975 int c;
1976
428870ff
BB
1977 /* account for child's outage in parent's missing map */
1978 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
fb5f0bc8
BB
1979 if (t == DTL_SCRUB)
1980 continue; /* leaf vdevs only */
1981 if (t == DTL_PARTIAL)
1982 minref = 1; /* i.e. non-zero */
1983 else if (vd->vdev_nparity != 0)
1984 minref = vd->vdev_nparity + 1; /* RAID-Z */
1985 else
1986 minref = vd->vdev_children; /* any kind of mirror */
93cf2076 1987 space_reftree_create(&reftree);
d6320ddb 1988 for (c = 0; c < vd->vdev_children; c++) {
fb5f0bc8
BB
1989 vdev_t *cvd = vd->vdev_child[c];
1990 mutex_enter(&cvd->vdev_dtl_lock);
93cf2076 1991 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
fb5f0bc8
BB
1992 mutex_exit(&cvd->vdev_dtl_lock);
1993 }
93cf2076
GW
1994 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
1995 space_reftree_destroy(&reftree);
34dc7c2f 1996 }
fb5f0bc8 1997 mutex_exit(&vd->vdev_dtl_lock);
34dc7c2f
BB
1998}
1999
93cf2076 2000int
34dc7c2f
BB
2001vdev_dtl_load(vdev_t *vd)
2002{
2003 spa_t *spa = vd->vdev_spa;
34dc7c2f 2004 objset_t *mos = spa->spa_meta_objset;
93cf2076
GW
2005 int error = 0;
2006 int c;
34dc7c2f 2007
93cf2076
GW
2008 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
2009 ASSERT(!vd->vdev_ishole);
34dc7c2f 2010
93cf2076
GW
2011 error = space_map_open(&vd->vdev_dtl_sm, mos,
2012 vd->vdev_dtl_object, 0, -1ULL, 0, &vd->vdev_dtl_lock);
2013 if (error)
2014 return (error);
2015 ASSERT(vd->vdev_dtl_sm != NULL);
34dc7c2f 2016
93cf2076 2017 mutex_enter(&vd->vdev_dtl_lock);
428870ff 2018
93cf2076
GW
2019 /*
2020 * Now that we've opened the space_map we need to update
2021 * the in-core DTL.
2022 */
2023 space_map_update(vd->vdev_dtl_sm);
34dc7c2f 2024
93cf2076
GW
2025 error = space_map_load(vd->vdev_dtl_sm,
2026 vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
2027 mutex_exit(&vd->vdev_dtl_lock);
34dc7c2f 2028
93cf2076
GW
2029 return (error);
2030 }
2031
2032 for (c = 0; c < vd->vdev_children; c++) {
2033 error = vdev_dtl_load(vd->vdev_child[c]);
2034 if (error != 0)
2035 break;
2036 }
34dc7c2f
BB
2037
2038 return (error);
2039}
2040
e0ab3ab5
JS
2041void
2042vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
2043{
2044 spa_t *spa = vd->vdev_spa;
2045
2046 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
2047 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2048 zapobj, tx));
2049}
2050
2051uint64_t
2052vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
2053{
2054 spa_t *spa = vd->vdev_spa;
2055 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
2056 DMU_OT_NONE, 0, tx);
2057
2058 ASSERT(zap != 0);
2059 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
2060 zap, tx));
2061
2062 return (zap);
2063}
2064
2065void
2066vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
2067{
2068 uint64_t i;
2069
2070 if (vd->vdev_ops != &vdev_hole_ops &&
2071 vd->vdev_ops != &vdev_missing_ops &&
2072 vd->vdev_ops != &vdev_root_ops &&
2073 !vd->vdev_top->vdev_removing) {
2074 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
2075 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
2076 }
2077 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
2078 vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
2079 }
2080 }
2081 for (i = 0; i < vd->vdev_children; i++) {
2082 vdev_construct_zaps(vd->vdev_child[i], tx);
2083 }
2084}
2085
34dc7c2f
BB
2086void
2087vdev_dtl_sync(vdev_t *vd, uint64_t txg)
2088{
2089 spa_t *spa = vd->vdev_spa;
93cf2076 2090 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
34dc7c2f 2091 objset_t *mos = spa->spa_meta_objset;
93cf2076
GW
2092 range_tree_t *rtsync;
2093 kmutex_t rtlock;
34dc7c2f 2094 dmu_tx_t *tx;
93cf2076 2095 uint64_t object = space_map_object(vd->vdev_dtl_sm);
34dc7c2f 2096
428870ff 2097 ASSERT(!vd->vdev_ishole);
93cf2076 2098 ASSERT(vd->vdev_ops->vdev_op_leaf);
428870ff 2099
34dc7c2f
BB
2100 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2101
93cf2076
GW
2102 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
2103 mutex_enter(&vd->vdev_dtl_lock);
2104 space_map_free(vd->vdev_dtl_sm, tx);
2105 space_map_close(vd->vdev_dtl_sm);
2106 vd->vdev_dtl_sm = NULL;
2107 mutex_exit(&vd->vdev_dtl_lock);
e0ab3ab5
JS
2108
2109 /*
2110 * We only destroy the leaf ZAP for detached leaves or for
2111 * removed log devices. Removed data devices handle leaf ZAP
2112 * cleanup later, once cancellation is no longer possible.
2113 */
2114 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
2115 vd->vdev_top->vdev_islog)) {
2116 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
2117 vd->vdev_leaf_zap = 0;
2118 }
2119
34dc7c2f 2120 dmu_tx_commit(tx);
34dc7c2f
BB
2121 return;
2122 }
2123
93cf2076
GW
2124 if (vd->vdev_dtl_sm == NULL) {
2125 uint64_t new_object;
2126
2127 new_object = space_map_alloc(mos, tx);
2128 VERIFY3U(new_object, !=, 0);
2129
2130 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
2131 0, -1ULL, 0, &vd->vdev_dtl_lock));
2132 ASSERT(vd->vdev_dtl_sm != NULL);
34dc7c2f
BB
2133 }
2134
93cf2076 2135 mutex_init(&rtlock, NULL, MUTEX_DEFAULT, NULL);
34dc7c2f 2136
93cf2076 2137 rtsync = range_tree_create(NULL, NULL, &rtlock);
34dc7c2f 2138
93cf2076 2139 mutex_enter(&rtlock);
34dc7c2f
BB
2140
2141 mutex_enter(&vd->vdev_dtl_lock);
93cf2076 2142 range_tree_walk(rt, range_tree_add, rtsync);
34dc7c2f
BB
2143 mutex_exit(&vd->vdev_dtl_lock);
2144
93cf2076
GW
2145 space_map_truncate(vd->vdev_dtl_sm, tx);
2146 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, tx);
2147 range_tree_vacate(rtsync, NULL, NULL);
34dc7c2f 2148
93cf2076 2149 range_tree_destroy(rtsync);
34dc7c2f 2150
93cf2076
GW
2151 mutex_exit(&rtlock);
2152 mutex_destroy(&rtlock);
34dc7c2f 2153
93cf2076
GW
2154 /*
2155 * If the object for the space map has changed then dirty
2156 * the top level so that we update the config.
2157 */
2158 if (object != space_map_object(vd->vdev_dtl_sm)) {
2159 zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
2160 "new object %llu", txg, spa_name(spa), object,
2161 space_map_object(vd->vdev_dtl_sm));
2162 vdev_config_dirty(vd->vdev_top);
2163 }
34dc7c2f
BB
2164
2165 dmu_tx_commit(tx);
93cf2076
GW
2166
2167 mutex_enter(&vd->vdev_dtl_lock);
2168 space_map_update(vd->vdev_dtl_sm);
2169 mutex_exit(&vd->vdev_dtl_lock);
34dc7c2f
BB
2170}
2171
fb5f0bc8
BB
2172/*
2173 * Determine whether the specified vdev can be offlined/detached/removed
2174 * without losing data.
2175 */
2176boolean_t
2177vdev_dtl_required(vdev_t *vd)
2178{
2179 spa_t *spa = vd->vdev_spa;
2180 vdev_t *tvd = vd->vdev_top;
2181 uint8_t cant_read = vd->vdev_cant_read;
2182 boolean_t required;
2183
2184 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2185
2186 if (vd == spa->spa_root_vdev || vd == tvd)
2187 return (B_TRUE);
2188
2189 /*
2190 * Temporarily mark the device as unreadable, and then determine
2191 * whether this results in any DTL outages in the top-level vdev.
2192 * If not, we can safely offline/detach/remove the device.
2193 */
2194 vd->vdev_cant_read = B_TRUE;
2195 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2196 required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
2197 vd->vdev_cant_read = cant_read;
2198 vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
2199
572e2857
BB
2200 if (!required && zio_injection_enabled)
2201 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
2202
fb5f0bc8
BB
2203 return (required);
2204}
2205
b128c09f
BB
2206/*
2207 * Determine if resilver is needed, and if so the txg range.
2208 */
2209boolean_t
2210vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
2211{
2212 boolean_t needed = B_FALSE;
2213 uint64_t thismin = UINT64_MAX;
2214 uint64_t thismax = 0;
d6320ddb 2215 int c;
b128c09f
BB
2216
2217 if (vd->vdev_children == 0) {
2218 mutex_enter(&vd->vdev_dtl_lock);
93cf2076 2219 if (range_tree_space(vd->vdev_dtl[DTL_MISSING]) != 0 &&
fb5f0bc8 2220 vdev_writeable(vd)) {
b128c09f 2221
5d1f7fb6
GW
2222 thismin = vdev_dtl_min(vd);
2223 thismax = vdev_dtl_max(vd);
b128c09f
BB
2224 needed = B_TRUE;
2225 }
2226 mutex_exit(&vd->vdev_dtl_lock);
2227 } else {
d6320ddb 2228 for (c = 0; c < vd->vdev_children; c++) {
b128c09f
BB
2229 vdev_t *cvd = vd->vdev_child[c];
2230 uint64_t cmin, cmax;
2231
2232 if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
2233 thismin = MIN(thismin, cmin);
2234 thismax = MAX(thismax, cmax);
2235 needed = B_TRUE;
2236 }
2237 }
2238 }
2239
2240 if (needed && minp) {
2241 *minp = thismin;
2242 *maxp = thismax;
2243 }
2244 return (needed);
2245}
2246
34dc7c2f
BB
2247void
2248vdev_load(vdev_t *vd)
2249{
d6320ddb
BB
2250 int c;
2251
34dc7c2f
BB
2252 /*
2253 * Recursively load all children.
2254 */
d6320ddb 2255 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
2256 vdev_load(vd->vdev_child[c]);
2257
2258 /*
2259 * If this is a top-level vdev, initialize its metaslabs.
2260 */
428870ff 2261 if (vd == vd->vdev_top && !vd->vdev_ishole &&
34dc7c2f
BB
2262 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
2263 vdev_metaslab_init(vd, 0) != 0))
2264 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2265 VDEV_AUX_CORRUPT_DATA);
34dc7c2f
BB
2266 /*
2267 * If this is a leaf vdev, load its DTL.
2268 */
2269 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
2270 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2271 VDEV_AUX_CORRUPT_DATA);
2272}
2273
2274/*
2275 * The special vdev case is used for hot spares and l2cache devices. Its
2276 * sole purpose it to set the vdev state for the associated vdev. To do this,
2277 * we make sure that we can open the underlying device, then try to read the
2278 * label, and make sure that the label is sane and that it hasn't been
2279 * repurposed to another pool.
2280 */
2281int
2282vdev_validate_aux(vdev_t *vd)
2283{
2284 nvlist_t *label;
2285 uint64_t guid, version;
2286 uint64_t state;
2287
b128c09f
BB
2288 if (!vdev_readable(vd))
2289 return (0);
2290
3bc7e0fb 2291 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
34dc7c2f
BB
2292 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2293 VDEV_AUX_CORRUPT_DATA);
2294 return (-1);
2295 }
2296
2297 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
9ae529ec 2298 !SPA_VERSION_IS_SUPPORTED(version) ||
34dc7c2f
BB
2299 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
2300 guid != vd->vdev_guid ||
2301 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
2302 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2303 VDEV_AUX_CORRUPT_DATA);
2304 nvlist_free(label);
2305 return (-1);
2306 }
2307
2308 /*
2309 * We don't actually check the pool state here. If it's in fact in
2310 * use by another pool, we update this fact on the fly when requested.
2311 */
2312 nvlist_free(label);
2313 return (0);
2314}
2315
428870ff
BB
2316void
2317vdev_remove(vdev_t *vd, uint64_t txg)
2318{
2319 spa_t *spa = vd->vdev_spa;
2320 objset_t *mos = spa->spa_meta_objset;
2321 dmu_tx_t *tx;
f3a7f661 2322 int m, i;
428870ff
BB
2323
2324 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
e0ab3ab5
JS
2325 ASSERT(vd == vd->vdev_top);
2326 ASSERT3U(txg, ==, spa_syncing_txg(spa));
428870ff 2327
428870ff 2328 if (vd->vdev_ms != NULL) {
f3a7f661
GW
2329 metaslab_group_t *mg = vd->vdev_mg;
2330
2331 metaslab_group_histogram_verify(mg);
2332 metaslab_class_histogram_verify(mg->mg_class);
2333
d6320ddb 2334 for (m = 0; m < vd->vdev_ms_count; m++) {
428870ff
BB
2335 metaslab_t *msp = vd->vdev_ms[m];
2336
93cf2076 2337 if (msp == NULL || msp->ms_sm == NULL)
428870ff
BB
2338 continue;
2339
93cf2076 2340 mutex_enter(&msp->ms_lock);
f3a7f661
GW
2341 /*
2342 * If the metaslab was not loaded when the vdev
2343 * was removed then the histogram accounting may
2344 * not be accurate. Update the histogram information
2345 * here so that we ensure that the metaslab group
2346 * and metaslab class are up-to-date.
2347 */
2348 metaslab_group_histogram_remove(mg, msp);
2349
93cf2076
GW
2350 VERIFY0(space_map_allocated(msp->ms_sm));
2351 space_map_free(msp->ms_sm, tx);
2352 space_map_close(msp->ms_sm);
2353 msp->ms_sm = NULL;
2354 mutex_exit(&msp->ms_lock);
428870ff 2355 }
f3a7f661
GW
2356
2357 metaslab_group_histogram_verify(mg);
2358 metaslab_class_histogram_verify(mg->mg_class);
2359 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
2360 ASSERT0(mg->mg_histogram[i]);
2361
428870ff
BB
2362 }
2363
2364 if (vd->vdev_ms_array) {
2365 (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
2366 vd->vdev_ms_array = 0;
428870ff 2367 }
e0ab3ab5
JS
2368
2369 if (vd->vdev_islog && vd->vdev_top_zap != 0) {
2370 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
2371 vd->vdev_top_zap = 0;
2372 }
428870ff
BB
2373 dmu_tx_commit(tx);
2374}
2375
34dc7c2f
BB
2376void
2377vdev_sync_done(vdev_t *vd, uint64_t txg)
2378{
2379 metaslab_t *msp;
428870ff
BB
2380 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
2381
2382 ASSERT(!vd->vdev_ishole);
34dc7c2f 2383
c65aa5b2 2384 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
34dc7c2f 2385 metaslab_sync_done(msp, txg);
428870ff
BB
2386
2387 if (reassess)
2388 metaslab_sync_reassess(vd->vdev_mg);
34dc7c2f
BB
2389}
2390
2391void
2392vdev_sync(vdev_t *vd, uint64_t txg)
2393{
2394 spa_t *spa = vd->vdev_spa;
2395 vdev_t *lvd;
2396 metaslab_t *msp;
2397 dmu_tx_t *tx;
2398
428870ff
BB
2399 ASSERT(!vd->vdev_ishole);
2400
34dc7c2f
BB
2401 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
2402 ASSERT(vd == vd->vdev_top);
2403 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2404 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
2405 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
2406 ASSERT(vd->vdev_ms_array != 0);
2407 vdev_config_dirty(vd);
2408 dmu_tx_commit(tx);
2409 }
2410
428870ff
BB
2411 /*
2412 * Remove the metadata associated with this vdev once it's empty.
2413 */
2414 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
2415 vdev_remove(vd, txg);
2416
34dc7c2f
BB
2417 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
2418 metaslab_sync(msp, txg);
2419 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
2420 }
2421
2422 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
2423 vdev_dtl_sync(lvd, txg);
2424
2425 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
2426}
2427
2428uint64_t
2429vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
2430{
2431 return (vd->vdev_ops->vdev_op_asize(vd, psize));
2432}
2433
34dc7c2f
BB
2434/*
2435 * Mark the given vdev faulted. A faulted vdev behaves as if the device could
2436 * not be opened, and no I/O is attempted.
2437 */
2438int
428870ff 2439vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2440{
572e2857 2441 vdev_t *vd, *tvd;
34dc7c2f 2442
428870ff 2443 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 2444
b128c09f
BB
2445 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2446 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f 2447
34dc7c2f 2448 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 2449 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 2450
572e2857
BB
2451 tvd = vd->vdev_top;
2452
428870ff
BB
2453 /*
2454 * We don't directly use the aux state here, but if we do a
2455 * vdev_reopen(), we need this value to be present to remember why we
2456 * were faulted.
2457 */
2458 vd->vdev_label_aux = aux;
2459
34dc7c2f
BB
2460 /*
2461 * Faulted state takes precedence over degraded.
2462 */
428870ff 2463 vd->vdev_delayed_close = B_FALSE;
34dc7c2f
BB
2464 vd->vdev_faulted = 1ULL;
2465 vd->vdev_degraded = 0ULL;
428870ff 2466 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
34dc7c2f
BB
2467
2468 /*
428870ff
BB
2469 * If this device has the only valid copy of the data, then
2470 * back off and simply mark the vdev as degraded instead.
34dc7c2f 2471 */
572e2857 2472 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
34dc7c2f
BB
2473 vd->vdev_degraded = 1ULL;
2474 vd->vdev_faulted = 0ULL;
2475
2476 /*
2477 * If we reopen the device and it's not dead, only then do we
2478 * mark it degraded.
2479 */
572e2857 2480 vdev_reopen(tvd);
34dc7c2f 2481
428870ff
BB
2482 if (vdev_readable(vd))
2483 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
34dc7c2f
BB
2484 }
2485
b128c09f 2486 return (spa_vdev_state_exit(spa, vd, 0));
34dc7c2f
BB
2487}
2488
2489/*
2490 * Mark the given vdev degraded. A degraded vdev is purely an indication to the
2491 * user that something is wrong. The vdev continues to operate as normal as far
2492 * as I/O is concerned.
2493 */
2494int
428870ff 2495vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
34dc7c2f 2496{
b128c09f 2497 vdev_t *vd;
34dc7c2f 2498
428870ff 2499 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 2500
b128c09f
BB
2501 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2502 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f 2503
34dc7c2f 2504 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 2505 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f
BB
2506
2507 /*
2508 * If the vdev is already faulted, then don't do anything.
2509 */
b128c09f
BB
2510 if (vd->vdev_faulted || vd->vdev_degraded)
2511 return (spa_vdev_state_exit(spa, NULL, 0));
34dc7c2f
BB
2512
2513 vd->vdev_degraded = 1ULL;
2514 if (!vdev_is_dead(vd))
2515 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
428870ff 2516 aux);
34dc7c2f 2517
b128c09f 2518 return (spa_vdev_state_exit(spa, vd, 0));
34dc7c2f
BB
2519}
2520
2521/*
d3cc8b15
WA
2522 * Online the given vdev.
2523 *
2524 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
2525 * spare device should be detached when the device finishes resilvering.
2526 * Second, the online should be treated like a 'test' online case, so no FMA
2527 * events are generated if the device fails to open.
34dc7c2f
BB
2528 */
2529int
b128c09f 2530vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
34dc7c2f 2531{
9babb374 2532 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
153b2285
YP
2533 boolean_t wasoffline;
2534 vdev_state_t oldstate;
34dc7c2f 2535
428870ff 2536 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 2537
b128c09f
BB
2538 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2539 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f
BB
2540
2541 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 2542 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 2543
153b2285
YP
2544 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
2545 oldstate = vd->vdev_state;
fb390aaf 2546
9babb374 2547 tvd = vd->vdev_top;
34dc7c2f
BB
2548 vd->vdev_offline = B_FALSE;
2549 vd->vdev_tmpoffline = B_FALSE;
b128c09f
BB
2550 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
2551 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
9babb374
BB
2552
2553 /* XXX - L2ARC 1.0 does not support expansion */
2554 if (!vd->vdev_aux) {
2555 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2556 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
2557 }
2558
2559 vdev_reopen(tvd);
34dc7c2f
BB
2560 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
2561
9babb374
BB
2562 if (!vd->vdev_aux) {
2563 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2564 pvd->vdev_expanding = B_FALSE;
2565 }
2566
34dc7c2f
BB
2567 if (newstate)
2568 *newstate = vd->vdev_state;
2569 if ((flags & ZFS_ONLINE_UNSPARE) &&
2570 !vdev_is_dead(vd) && vd->vdev_parent &&
2571 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2572 vd->vdev_parent->vdev_child[0] == vd)
2573 vd->vdev_unspare = B_TRUE;
2574
9babb374
BB
2575 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
2576
2577 /* XXX - L2ARC 1.0 does not support expansion */
2578 if (vd->vdev_aux)
2579 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
2580 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2581 }
fb390aaf 2582
153b2285
YP
2583 if (wasoffline ||
2584 (oldstate < VDEV_STATE_DEGRADED &&
2585 vd->vdev_state >= VDEV_STATE_DEGRADED))
fb390aaf
HR
2586 spa_event_notify(spa, vd, ESC_ZFS_VDEV_ONLINE);
2587
fb5f0bc8 2588 return (spa_vdev_state_exit(spa, vd, 0));
34dc7c2f
BB
2589}
2590
428870ff
BB
2591static int
2592vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
34dc7c2f 2593{
9babb374 2594 vdev_t *vd, *tvd;
428870ff
BB
2595 int error = 0;
2596 uint64_t generation;
2597 metaslab_group_t *mg;
34dc7c2f 2598
428870ff
BB
2599top:
2600 spa_vdev_state_enter(spa, SCL_ALLOC);
34dc7c2f 2601
b128c09f
BB
2602 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2603 return (spa_vdev_state_exit(spa, NULL, ENODEV));
34dc7c2f
BB
2604
2605 if (!vd->vdev_ops->vdev_op_leaf)
b128c09f 2606 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 2607
9babb374 2608 tvd = vd->vdev_top;
428870ff
BB
2609 mg = tvd->vdev_mg;
2610 generation = spa->spa_config_generation + 1;
9babb374 2611
34dc7c2f
BB
2612 /*
2613 * If the device isn't already offline, try to offline it.
2614 */
2615 if (!vd->vdev_offline) {
2616 /*
fb5f0bc8 2617 * If this device has the only valid copy of some data,
9babb374
BB
2618 * don't allow it to be offlined. Log devices are always
2619 * expendable.
34dc7c2f 2620 */
9babb374
BB
2621 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2622 vdev_dtl_required(vd))
b128c09f 2623 return (spa_vdev_state_exit(spa, NULL, EBUSY));
34dc7c2f 2624
428870ff
BB
2625 /*
2626 * If the top-level is a slog and it has had allocations
2627 * then proceed. We check that the vdev's metaslab group
2628 * is not NULL since it's possible that we may have just
2629 * added this vdev but not yet initialized its metaslabs.
2630 */
2631 if (tvd->vdev_islog && mg != NULL) {
2632 /*
2633 * Prevent any future allocations.
2634 */
2635 metaslab_group_passivate(mg);
2636 (void) spa_vdev_state_exit(spa, vd, 0);
2637
2638 error = spa_offline_log(spa);
2639
2640 spa_vdev_state_enter(spa, SCL_ALLOC);
2641
2642 /*
2643 * Check to see if the config has changed.
2644 */
2645 if (error || generation != spa->spa_config_generation) {
2646 metaslab_group_activate(mg);
2647 if (error)
2648 return (spa_vdev_state_exit(spa,
2649 vd, error));
2650 (void) spa_vdev_state_exit(spa, vd, 0);
2651 goto top;
2652 }
c99c9001 2653 ASSERT0(tvd->vdev_stat.vs_alloc);
428870ff
BB
2654 }
2655
34dc7c2f
BB
2656 /*
2657 * Offline this device and reopen its top-level vdev.
9babb374
BB
2658 * If the top-level vdev is a log device then just offline
2659 * it. Otherwise, if this action results in the top-level
2660 * vdev becoming unusable, undo it and fail the request.
34dc7c2f
BB
2661 */
2662 vd->vdev_offline = B_TRUE;
9babb374
BB
2663 vdev_reopen(tvd);
2664
2665 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2666 vdev_is_dead(tvd)) {
34dc7c2f 2667 vd->vdev_offline = B_FALSE;
9babb374 2668 vdev_reopen(tvd);
b128c09f 2669 return (spa_vdev_state_exit(spa, NULL, EBUSY));
34dc7c2f 2670 }
428870ff
BB
2671
2672 /*
2673 * Add the device back into the metaslab rotor so that
2674 * once we online the device it's open for business.
2675 */
2676 if (tvd->vdev_islog && mg != NULL)
2677 metaslab_group_activate(mg);
34dc7c2f
BB
2678 }
2679
b128c09f 2680 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
34dc7c2f 2681
428870ff
BB
2682 return (spa_vdev_state_exit(spa, vd, 0));
2683}
9babb374 2684
428870ff
BB
2685int
2686vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
2687{
2688 int error;
9babb374 2689
428870ff
BB
2690 mutex_enter(&spa->spa_vdev_top_lock);
2691 error = vdev_offline_locked(spa, guid, flags);
2692 mutex_exit(&spa->spa_vdev_top_lock);
2693
2694 return (error);
34dc7c2f
BB
2695}
2696
2697/*
2698 * Clear the error counts associated with this vdev. Unlike vdev_online() and
2699 * vdev_offline(), we assume the spa config is locked. We also clear all
2700 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
34dc7c2f
BB
2701 */
2702void
b128c09f 2703vdev_clear(spa_t *spa, vdev_t *vd)
34dc7c2f 2704{
b128c09f 2705 vdev_t *rvd = spa->spa_root_vdev;
d6320ddb 2706 int c;
b128c09f
BB
2707
2708 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
34dc7c2f
BB
2709
2710 if (vd == NULL)
b128c09f 2711 vd = rvd;
34dc7c2f
BB
2712
2713 vd->vdev_stat.vs_read_errors = 0;
2714 vd->vdev_stat.vs_write_errors = 0;
2715 vd->vdev_stat.vs_checksum_errors = 0;
34dc7c2f 2716
d6320ddb 2717 for (c = 0; c < vd->vdev_children; c++)
b128c09f 2718 vdev_clear(spa, vd->vdev_child[c]);
34dc7c2f
BB
2719
2720 /*
b128c09f
BB
2721 * If we're in the FAULTED state or have experienced failed I/O, then
2722 * clear the persistent state and attempt to reopen the device. We
2723 * also mark the vdev config dirty, so that the new faulted state is
2724 * written out to disk.
34dc7c2f 2725 */
b128c09f
BB
2726 if (vd->vdev_faulted || vd->vdev_degraded ||
2727 !vdev_readable(vd) || !vdev_writeable(vd)) {
2728
428870ff 2729 /*
4e33ba4c 2730 * When reopening in response to a clear event, it may be due to
428870ff
BB
2731 * a fmadm repair request. In this case, if the device is
2732 * still broken, we want to still post the ereport again.
2733 */
2734 vd->vdev_forcefault = B_TRUE;
2735
572e2857 2736 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
b128c09f
BB
2737 vd->vdev_cant_read = B_FALSE;
2738 vd->vdev_cant_write = B_FALSE;
2739
572e2857 2740 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
34dc7c2f 2741
428870ff
BB
2742 vd->vdev_forcefault = B_FALSE;
2743
572e2857 2744 if (vd != rvd && vdev_writeable(vd->vdev_top))
b128c09f
BB
2745 vdev_state_dirty(vd->vdev_top);
2746
2747 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
34dc7c2f
BB
2748 spa_async_request(spa, SPA_ASYNC_RESILVER);
2749
fb390aaf 2750 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
34dc7c2f 2751 }
428870ff
BB
2752
2753 /*
2754 * When clearing a FMA-diagnosed fault, we always want to
2755 * unspare the device, as we assume that the original spare was
2756 * done in response to the FMA fault.
2757 */
2758 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
2759 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2760 vd->vdev_parent->vdev_child[0] == vd)
2761 vd->vdev_unspare = B_TRUE;
34dc7c2f
BB
2762}
2763
b128c09f
BB
2764boolean_t
2765vdev_is_dead(vdev_t *vd)
2766{
428870ff
BB
2767 /*
2768 * Holes and missing devices are always considered "dead".
2769 * This simplifies the code since we don't have to check for
2770 * these types of devices in the various code paths.
2771 * Instead we rely on the fact that we skip over dead devices
2772 * before issuing I/O to them.
2773 */
2774 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
2775 vd->vdev_ops == &vdev_missing_ops);
b128c09f
BB
2776}
2777
2778boolean_t
34dc7c2f
BB
2779vdev_readable(vdev_t *vd)
2780{
b128c09f 2781 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
34dc7c2f
BB
2782}
2783
b128c09f 2784boolean_t
34dc7c2f
BB
2785vdev_writeable(vdev_t *vd)
2786{
b128c09f 2787 return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
34dc7c2f
BB
2788}
2789
b128c09f
BB
2790boolean_t
2791vdev_allocatable(vdev_t *vd)
34dc7c2f 2792{
fb5f0bc8
BB
2793 uint64_t state = vd->vdev_state;
2794
b128c09f 2795 /*
fb5f0bc8 2796 * We currently allow allocations from vdevs which may be in the
b128c09f
BB
2797 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
2798 * fails to reopen then we'll catch it later when we're holding
fb5f0bc8
BB
2799 * the proper locks. Note that we have to get the vdev state
2800 * in a local variable because although it changes atomically,
2801 * we're asking two separate questions about it.
b128c09f 2802 */
fb5f0bc8 2803 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
3dfb57a3
DB
2804 !vd->vdev_cant_write && !vd->vdev_ishole &&
2805 vd->vdev_mg->mg_initialized);
34dc7c2f
BB
2806}
2807
b128c09f
BB
2808boolean_t
2809vdev_accessible(vdev_t *vd, zio_t *zio)
34dc7c2f 2810{
b128c09f 2811 ASSERT(zio->io_vd == vd);
34dc7c2f 2812
b128c09f
BB
2813 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
2814 return (B_FALSE);
34dc7c2f 2815
b128c09f
BB
2816 if (zio->io_type == ZIO_TYPE_READ)
2817 return (!vd->vdev_cant_read);
34dc7c2f 2818
b128c09f
BB
2819 if (zio->io_type == ZIO_TYPE_WRITE)
2820 return (!vd->vdev_cant_write);
34dc7c2f 2821
b128c09f 2822 return (B_TRUE);
34dc7c2f
BB
2823}
2824
193a37cb
TH
2825static void
2826vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
34dc7c2f 2827{
193a37cb
TH
2828 int t;
2829 for (t = 0; t < ZIO_TYPES; t++) {
2830 vs->vs_ops[t] += cvs->vs_ops[t];
2831 vs->vs_bytes[t] += cvs->vs_bytes[t];
2832 }
34dc7c2f 2833
193a37cb
TH
2834 cvs->vs_scan_removing = cvd->vdev_removing;
2835}
f3a7f661 2836
193a37cb
TH
2837/*
2838 * Get extended stats
2839 */
2840static void
2841vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
2842{
2843 int t, b;
2844 for (t = 0; t < ZIO_TYPES; t++) {
7e945072 2845 for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
193a37cb 2846 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
7e945072
TH
2847
2848 for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
193a37cb
TH
2849 vsx->vsx_total_histo[t][b] +=
2850 cvsx->vsx_total_histo[t][b];
2851 }
f38dfec3 2852 }
34dc7c2f 2853
193a37cb 2854 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
7e945072 2855 for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
193a37cb
TH
2856 vsx->vsx_queue_histo[t][b] +=
2857 cvsx->vsx_queue_histo[t][b];
2858 }
2859 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
2860 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
7e945072
TH
2861
2862 for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
2863 vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
2864
2865 for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
2866 vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
193a37cb 2867 }
7e945072 2868
193a37cb
TH
2869}
2870
2871/*
2872 * Get statistics for the given vdev.
2873 */
2874static void
2875vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
2876{
2877 int c, t;
34dc7c2f
BB
2878 /*
2879 * If we're getting stats on the root vdev, aggregate the I/O counts
2880 * over all top-level vdevs (i.e. the direct children of the root).
2881 */
193a37cb
TH
2882 if (!vd->vdev_ops->vdev_op_leaf) {
2883 if (vs) {
2884 memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
2885 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
2886 }
2887 if (vsx)
2888 memset(vsx, 0, sizeof (*vsx));
2889
2890 for (c = 0; c < vd->vdev_children; c++) {
2891 vdev_t *cvd = vd->vdev_child[c];
34dc7c2f 2892 vdev_stat_t *cvs = &cvd->vdev_stat;
193a37cb
TH
2893 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
2894
2895 vdev_get_stats_ex_impl(cvd, cvs, cvsx);
2896 if (vs)
2897 vdev_get_child_stat(cvd, vs, cvs);
2898 if (vsx)
2899 vdev_get_child_stat_ex(cvd, vsx, cvsx);
34dc7c2f 2900
193a37cb
TH
2901 }
2902 } else {
2903 /*
2904 * We're a leaf. Just copy our ZIO active queue stats in. The
2905 * other leaf stats are updated in vdev_stat_update().
2906 */
2907 if (!vsx)
2908 return;
2909
2910 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
2911
2912 for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
2913 vsx->vsx_active_queue[t] =
2914 vd->vdev_queue.vq_class[t].vqc_active;
2915 vsx->vsx_pend_queue[t] = avl_numnodes(
2916 &vd->vdev_queue.vq_class[t].vqc_queued_tree);
34dc7c2f
BB
2917 }
2918 }
193a37cb
TH
2919}
2920
2921void
2922vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
2923{
0f676dc2 2924 vdev_t *tvd = vd->vdev_top;
193a37cb
TH
2925 mutex_enter(&vd->vdev_stat_lock);
2926 if (vs) {
2927 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
2928 vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
2929 vs->vs_state = vd->vdev_state;
2930 vs->vs_rsize = vdev_get_min_asize(vd);
2931 if (vd->vdev_ops->vdev_op_leaf)
2932 vs->vs_rsize += VDEV_LABEL_START_SIZE +
2933 VDEV_LABEL_END_SIZE;
0f676dc2
GM
2934 /*
2935 * Report expandable space on top-level, non-auxillary devices
2936 * only. The expandable space is reported in terms of metaslab
2937 * sized units since that determines how much space the pool
2938 * can expand.
2939 */
2940 if (vd->vdev_aux == NULL && tvd != NULL) {
2941 vs->vs_esize = P2ALIGN(
2942 vd->vdev_max_asize - vd->vdev_asize,
2943 1ULL << tvd->vdev_ms_shift);
2944 }
193a37cb
TH
2945 vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
2946 if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
2947 !vd->vdev_ishole) {
2948 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
2949 }
2950 }
2951
2952 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_READER) != 0);
2953 vdev_get_stats_ex_impl(vd, vs, vsx);
f3a7f661 2954 mutex_exit(&vd->vdev_stat_lock);
34dc7c2f
BB
2955}
2956
193a37cb
TH
2957void
2958vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2959{
2960 return (vdev_get_stats_ex(vd, vs, NULL));
2961}
2962
34dc7c2f
BB
2963void
2964vdev_clear_stats(vdev_t *vd)
2965{
2966 mutex_enter(&vd->vdev_stat_lock);
2967 vd->vdev_stat.vs_space = 0;
2968 vd->vdev_stat.vs_dspace = 0;
2969 vd->vdev_stat.vs_alloc = 0;
2970 mutex_exit(&vd->vdev_stat_lock);
2971}
2972
428870ff
BB
2973void
2974vdev_scan_stat_init(vdev_t *vd)
2975{
2976 vdev_stat_t *vs = &vd->vdev_stat;
d6320ddb 2977 int c;
428870ff 2978
d6320ddb 2979 for (c = 0; c < vd->vdev_children; c++)
428870ff
BB
2980 vdev_scan_stat_init(vd->vdev_child[c]);
2981
2982 mutex_enter(&vd->vdev_stat_lock);
2983 vs->vs_scan_processed = 0;
2984 mutex_exit(&vd->vdev_stat_lock);
2985}
2986
34dc7c2f 2987void
b128c09f 2988vdev_stat_update(zio_t *zio, uint64_t psize)
34dc7c2f 2989{
fb5f0bc8
BB
2990 spa_t *spa = zio->io_spa;
2991 vdev_t *rvd = spa->spa_root_vdev;
b128c09f 2992 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
34dc7c2f
BB
2993 vdev_t *pvd;
2994 uint64_t txg = zio->io_txg;
2995 vdev_stat_t *vs = &vd->vdev_stat;
193a37cb 2996 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
34dc7c2f
BB
2997 zio_type_t type = zio->io_type;
2998 int flags = zio->io_flags;
2999
b128c09f
BB
3000 /*
3001 * If this i/o is a gang leader, it didn't do any actual work.
3002 */
3003 if (zio->io_gang_tree)
3004 return;
3005
34dc7c2f 3006 if (zio->io_error == 0) {
b128c09f
BB
3007 /*
3008 * If this is a root i/o, don't count it -- we've already
3009 * counted the top-level vdevs, and vdev_get_stats() will
3010 * aggregate them when asked. This reduces contention on
3011 * the root vdev_stat_lock and implicitly handles blocks
3012 * that compress away to holes, for which there is no i/o.
3013 * (Holes never create vdev children, so all the counters
3014 * remain zero, which is what we want.)
3015 *
3016 * Note: this only applies to successful i/o (io_error == 0)
3017 * because unlike i/o counts, errors are not additive.
3018 * When reading a ditto block, for example, failure of
3019 * one top-level vdev does not imply a root-level error.
3020 */
3021 if (vd == rvd)
3022 return;
3023
3024 ASSERT(vd == zio->io_vd);
fb5f0bc8
BB
3025
3026 if (flags & ZIO_FLAG_IO_BYPASS)
3027 return;
3028
3029 mutex_enter(&vd->vdev_stat_lock);
3030
b128c09f 3031 if (flags & ZIO_FLAG_IO_REPAIR) {
572e2857 3032 if (flags & ZIO_FLAG_SCAN_THREAD) {
428870ff
BB
3033 dsl_scan_phys_t *scn_phys =
3034 &spa->spa_dsl_pool->dp_scan->scn_phys;
3035 uint64_t *processed = &scn_phys->scn_processed;
3036
3037 /* XXX cleanup? */
3038 if (vd->vdev_ops->vdev_op_leaf)
3039 atomic_add_64(processed, psize);
3040 vs->vs_scan_processed += psize;
3041 }
3042
fb5f0bc8 3043 if (flags & ZIO_FLAG_SELF_HEAL)
b128c09f 3044 vs->vs_self_healed += psize;
34dc7c2f 3045 }
fb5f0bc8 3046
193a37cb
TH
3047 /*
3048 * The bytes/ops/histograms are recorded at the leaf level and
3049 * aggregated into the higher level vdevs in vdev_get_stats().
3050 */
4eb0db42
TH
3051 if (vd->vdev_ops->vdev_op_leaf &&
3052 (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
193a37cb
TH
3053
3054 vs->vs_ops[type]++;
3055 vs->vs_bytes[type] += psize;
3056
7e945072
TH
3057 if (flags & ZIO_FLAG_DELEGATED) {
3058 vsx->vsx_agg_histo[zio->io_priority]
3059 [RQ_HISTO(zio->io_size)]++;
3060 } else {
3061 vsx->vsx_ind_histo[zio->io_priority]
3062 [RQ_HISTO(zio->io_size)]++;
3063 }
3064
193a37cb
TH
3065 if (zio->io_delta && zio->io_delay) {
3066 vsx->vsx_queue_histo[zio->io_priority]
7e945072 3067 [L_HISTO(zio->io_delta - zio->io_delay)]++;
193a37cb 3068 vsx->vsx_disk_histo[type]
7e945072 3069 [L_HISTO(zio->io_delay)]++;
193a37cb 3070 vsx->vsx_total_histo[type]
7e945072 3071 [L_HISTO(zio->io_delta)]++;
193a37cb
TH
3072 }
3073 }
fb5f0bc8
BB
3074
3075 mutex_exit(&vd->vdev_stat_lock);
34dc7c2f
BB
3076 return;
3077 }
3078
3079 if (flags & ZIO_FLAG_SPECULATIVE)
3080 return;
3081
9babb374
BB
3082 /*
3083 * If this is an I/O error that is going to be retried, then ignore the
3084 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as
3085 * hard errors, when in reality they can happen for any number of
3086 * innocuous reasons (bus resets, MPxIO link failure, etc).
3087 */
3088 if (zio->io_error == EIO &&
3089 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
3090 return;
3091
428870ff
BB
3092 /*
3093 * Intent logs writes won't propagate their error to the root
3094 * I/O so don't mark these types of failures as pool-level
3095 * errors.
3096 */
3097 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
3098 return;
3099
b128c09f 3100 mutex_enter(&vd->vdev_stat_lock);
9babb374 3101 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
b128c09f
BB
3102 if (zio->io_error == ECKSUM)
3103 vs->vs_checksum_errors++;
3104 else
3105 vs->vs_read_errors++;
34dc7c2f 3106 }
9babb374 3107 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
b128c09f
BB
3108 vs->vs_write_errors++;
3109 mutex_exit(&vd->vdev_stat_lock);
34dc7c2f 3110
fb5f0bc8
BB
3111 if (type == ZIO_TYPE_WRITE && txg != 0 &&
3112 (!(flags & ZIO_FLAG_IO_REPAIR) ||
572e2857 3113 (flags & ZIO_FLAG_SCAN_THREAD) ||
428870ff 3114 spa->spa_claiming)) {
fb5f0bc8 3115 /*
428870ff
BB
3116 * This is either a normal write (not a repair), or it's
3117 * a repair induced by the scrub thread, or it's a repair
3118 * made by zil_claim() during spa_load() in the first txg.
3119 * In the normal case, we commit the DTL change in the same
3120 * txg as the block was born. In the scrub-induced repair
3121 * case, we know that scrubs run in first-pass syncing context,
3122 * so we commit the DTL change in spa_syncing_txg(spa).
3123 * In the zil_claim() case, we commit in spa_first_txg(spa).
fb5f0bc8
BB
3124 *
3125 * We currently do not make DTL entries for failed spontaneous
3126 * self-healing writes triggered by normal (non-scrubbing)
3127 * reads, because we have no transactional context in which to
3128 * do so -- and it's not clear that it'd be desirable anyway.
3129 */
3130 if (vd->vdev_ops->vdev_op_leaf) {
3131 uint64_t commit_txg = txg;
572e2857 3132 if (flags & ZIO_FLAG_SCAN_THREAD) {
fb5f0bc8
BB
3133 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3134 ASSERT(spa_sync_pass(spa) == 1);
3135 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
428870ff
BB
3136 commit_txg = spa_syncing_txg(spa);
3137 } else if (spa->spa_claiming) {
3138 ASSERT(flags & ZIO_FLAG_IO_REPAIR);
3139 commit_txg = spa_first_txg(spa);
fb5f0bc8 3140 }
428870ff 3141 ASSERT(commit_txg >= spa_syncing_txg(spa));
fb5f0bc8 3142 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
34dc7c2f 3143 return;
fb5f0bc8
BB
3144 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
3145 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
3146 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
34dc7c2f 3147 }
fb5f0bc8
BB
3148 if (vd != rvd)
3149 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
34dc7c2f
BB
3150 }
3151}
3152
34dc7c2f 3153/*
428870ff
BB
3154 * Update the in-core space usage stats for this vdev, its metaslab class,
3155 * and the root vdev.
34dc7c2f
BB
3156 */
3157void
428870ff
BB
3158vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
3159 int64_t space_delta)
34dc7c2f
BB
3160{
3161 int64_t dspace_delta = space_delta;
3162 spa_t *spa = vd->vdev_spa;
3163 vdev_t *rvd = spa->spa_root_vdev;
428870ff
BB
3164 metaslab_group_t *mg = vd->vdev_mg;
3165 metaslab_class_t *mc = mg ? mg->mg_class : NULL;
34dc7c2f
BB
3166
3167 ASSERT(vd == vd->vdev_top);
3168
3169 /*
3170 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
3171 * factor. We must calculate this here and not at the root vdev
3172 * because the root vdev's psize-to-asize is simply the max of its
3173 * childrens', thus not accurate enough for us.
3174 */
3175 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
9babb374 3176 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
34dc7c2f
BB
3177 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
3178 vd->vdev_deflate_ratio;
3179
3180 mutex_enter(&vd->vdev_stat_lock);
34dc7c2f 3181 vd->vdev_stat.vs_alloc += alloc_delta;
428870ff 3182 vd->vdev_stat.vs_space += space_delta;
34dc7c2f
BB
3183 vd->vdev_stat.vs_dspace += dspace_delta;
3184 mutex_exit(&vd->vdev_stat_lock);
3185
428870ff 3186 if (mc == spa_normal_class(spa)) {
34dc7c2f 3187 mutex_enter(&rvd->vdev_stat_lock);
34dc7c2f 3188 rvd->vdev_stat.vs_alloc += alloc_delta;
428870ff 3189 rvd->vdev_stat.vs_space += space_delta;
34dc7c2f
BB
3190 rvd->vdev_stat.vs_dspace += dspace_delta;
3191 mutex_exit(&rvd->vdev_stat_lock);
3192 }
428870ff
BB
3193
3194 if (mc != NULL) {
3195 ASSERT(rvd == vd->vdev_parent);
3196 ASSERT(vd->vdev_ms_count != 0);
3197
3198 metaslab_class_space_update(mc,
3199 alloc_delta, defer_delta, space_delta, dspace_delta);
3200 }
34dc7c2f
BB
3201}
3202
3203/*
3204 * Mark a top-level vdev's config as dirty, placing it on the dirty list
3205 * so that it will be written out next time the vdev configuration is synced.
3206 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
3207 */
3208void
3209vdev_config_dirty(vdev_t *vd)
3210{
3211 spa_t *spa = vd->vdev_spa;
3212 vdev_t *rvd = spa->spa_root_vdev;
3213 int c;
3214
572e2857
BB
3215 ASSERT(spa_writeable(spa));
3216
34dc7c2f 3217 /*
9babb374
BB
3218 * If this is an aux vdev (as with l2cache and spare devices), then we
3219 * update the vdev config manually and set the sync flag.
b128c09f
BB
3220 */
3221 if (vd->vdev_aux != NULL) {
3222 spa_aux_vdev_t *sav = vd->vdev_aux;
3223 nvlist_t **aux;
3224 uint_t naux;
3225
3226 for (c = 0; c < sav->sav_count; c++) {
3227 if (sav->sav_vdevs[c] == vd)
3228 break;
3229 }
3230
3231 if (c == sav->sav_count) {
3232 /*
3233 * We're being removed. There's nothing more to do.
3234 */
3235 ASSERT(sav->sav_sync == B_TRUE);
3236 return;
3237 }
3238
3239 sav->sav_sync = B_TRUE;
3240
9babb374
BB
3241 if (nvlist_lookup_nvlist_array(sav->sav_config,
3242 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
3243 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
3244 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
3245 }
b128c09f
BB
3246
3247 ASSERT(c < naux);
3248
3249 /*
3250 * Setting the nvlist in the middle if the array is a little
3251 * sketchy, but it will work.
3252 */
3253 nvlist_free(aux[c]);
428870ff 3254 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
b128c09f
BB
3255
3256 return;
3257 }
3258
3259 /*
3260 * The dirty list is protected by the SCL_CONFIG lock. The caller
3261 * must either hold SCL_CONFIG as writer, or must be the sync thread
3262 * (which holds SCL_CONFIG as reader). There's only one sync thread,
34dc7c2f
BB
3263 * so this is sufficient to ensure mutual exclusion.
3264 */
b128c09f
BB
3265 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3266 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3267 spa_config_held(spa, SCL_CONFIG, RW_READER)));
34dc7c2f
BB
3268
3269 if (vd == rvd) {
3270 for (c = 0; c < rvd->vdev_children; c++)
3271 vdev_config_dirty(rvd->vdev_child[c]);
3272 } else {
3273 ASSERT(vd == vd->vdev_top);
3274
428870ff
BB
3275 if (!list_link_active(&vd->vdev_config_dirty_node) &&
3276 !vd->vdev_ishole)
b128c09f 3277 list_insert_head(&spa->spa_config_dirty_list, vd);
34dc7c2f
BB
3278 }
3279}
3280
3281void
3282vdev_config_clean(vdev_t *vd)
3283{
3284 spa_t *spa = vd->vdev_spa;
3285
b128c09f
BB
3286 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
3287 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3288 spa_config_held(spa, SCL_CONFIG, RW_READER)));
34dc7c2f 3289
b128c09f
BB
3290 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
3291 list_remove(&spa->spa_config_dirty_list, vd);
34dc7c2f
BB
3292}
3293
b128c09f
BB
3294/*
3295 * Mark a top-level vdev's state as dirty, so that the next pass of
3296 * spa_sync() can convert this into vdev_config_dirty(). We distinguish
3297 * the state changes from larger config changes because they require
3298 * much less locking, and are often needed for administrative actions.
3299 */
3300void
3301vdev_state_dirty(vdev_t *vd)
3302{
3303 spa_t *spa = vd->vdev_spa;
3304
572e2857 3305 ASSERT(spa_writeable(spa));
b128c09f
BB
3306 ASSERT(vd == vd->vdev_top);
3307
3308 /*
3309 * The state list is protected by the SCL_STATE lock. The caller
3310 * must either hold SCL_STATE as writer, or must be the sync thread
3311 * (which holds SCL_STATE as reader). There's only one sync thread,
3312 * so this is sufficient to ensure mutual exclusion.
3313 */
3314 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3315 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3316 spa_config_held(spa, SCL_STATE, RW_READER)));
3317
428870ff 3318 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
b128c09f
BB
3319 list_insert_head(&spa->spa_state_dirty_list, vd);
3320}
3321
3322void
3323vdev_state_clean(vdev_t *vd)
3324{
3325 spa_t *spa = vd->vdev_spa;
3326
3327 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
3328 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
3329 spa_config_held(spa, SCL_STATE, RW_READER)));
3330
3331 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
3332 list_remove(&spa->spa_state_dirty_list, vd);
3333}
3334
3335/*
3336 * Propagate vdev state up from children to parent.
3337 */
34dc7c2f
BB
3338void
3339vdev_propagate_state(vdev_t *vd)
3340{
fb5f0bc8
BB
3341 spa_t *spa = vd->vdev_spa;
3342 vdev_t *rvd = spa->spa_root_vdev;
34dc7c2f
BB
3343 int degraded = 0, faulted = 0;
3344 int corrupted = 0;
34dc7c2f 3345 vdev_t *child;
d6320ddb 3346 int c;
34dc7c2f
BB
3347
3348 if (vd->vdev_children > 0) {
d6320ddb 3349 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f 3350 child = vd->vdev_child[c];
b128c09f 3351
428870ff
BB
3352 /*
3353 * Don't factor holes into the decision.
3354 */
3355 if (child->vdev_ishole)
3356 continue;
3357
b128c09f 3358 if (!vdev_readable(child) ||
fb5f0bc8 3359 (!vdev_writeable(child) && spa_writeable(spa))) {
b128c09f
BB
3360 /*
3361 * Root special: if there is a top-level log
3362 * device, treat the root vdev as if it were
3363 * degraded.
3364 */
3365 if (child->vdev_islog && vd == rvd)
3366 degraded++;
3367 else
3368 faulted++;
3369 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
34dc7c2f 3370 degraded++;
b128c09f 3371 }
34dc7c2f
BB
3372
3373 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
3374 corrupted++;
3375 }
3376
3377 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
3378
3379 /*
b128c09f 3380 * Root special: if there is a top-level vdev that cannot be
34dc7c2f
BB
3381 * opened due to corrupted metadata, then propagate the root
3382 * vdev's aux state as 'corrupt' rather than 'insufficient
3383 * replicas'.
3384 */
3385 if (corrupted && vd == rvd &&
3386 rvd->vdev_state == VDEV_STATE_CANT_OPEN)
3387 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
3388 VDEV_AUX_CORRUPT_DATA);
3389 }
3390
b128c09f 3391 if (vd->vdev_parent)
34dc7c2f
BB
3392 vdev_propagate_state(vd->vdev_parent);
3393}
3394
3395/*
3396 * Set a vdev's state. If this is during an open, we don't update the parent
3397 * state, because we're in the process of opening children depth-first.
3398 * Otherwise, we propagate the change to the parent.
3399 *
3400 * If this routine places a device in a faulted state, an appropriate ereport is
3401 * generated.
3402 */
3403void
3404vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
3405{
3406 uint64_t save_state;
b128c09f 3407 spa_t *spa = vd->vdev_spa;
34dc7c2f
BB
3408
3409 if (state == vd->vdev_state) {
976246fa
DB
3410 /*
3411 * Since vdev_offline() code path is already in an offline
3412 * state we can miss a statechange event to OFFLINE. Check
3413 * the previous state to catch this condition.
3414 */
3415 if (vd->vdev_ops->vdev_op_leaf &&
3416 (state == VDEV_STATE_OFFLINE) &&
3417 (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
3418 /* post an offline state change */
3419 zfs_post_state_change(spa, vd, vd->vdev_prevstate);
3420 }
34dc7c2f
BB
3421 vd->vdev_stat.vs_aux = aux;
3422 return;
3423 }
3424
3425 save_state = vd->vdev_state;
3426
3427 vd->vdev_state = state;
3428 vd->vdev_stat.vs_aux = aux;
3429
3430 /*
3431 * If we are setting the vdev state to anything but an open state, then
428870ff
BB
3432 * always close the underlying device unless the device has requested
3433 * a delayed close (i.e. we're about to remove or fault the device).
3434 * Otherwise, we keep accessible but invalid devices open forever.
3435 * We don't call vdev_close() itself, because that implies some extra
3436 * checks (offline, etc) that we don't want here. This is limited to
3437 * leaf devices, because otherwise closing the device will affect other
3438 * children.
34dc7c2f 3439 */
428870ff
BB
3440 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
3441 vd->vdev_ops->vdev_op_leaf)
34dc7c2f
BB
3442 vd->vdev_ops->vdev_op_close(vd);
3443
3444 if (vd->vdev_removed &&
3445 state == VDEV_STATE_CANT_OPEN &&
3446 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
3447 /*
3448 * If the previous state is set to VDEV_STATE_REMOVED, then this
3449 * device was previously marked removed and someone attempted to
3450 * reopen it. If this failed due to a nonexistent device, then
3451 * keep the device in the REMOVED state. We also let this be if
3452 * it is one of our special test online cases, which is only
3453 * attempting to online the device and shouldn't generate an FMA
3454 * fault.
3455 */
3456 vd->vdev_state = VDEV_STATE_REMOVED;
3457 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
3458 } else if (state == VDEV_STATE_REMOVED) {
34dc7c2f
BB
3459 vd->vdev_removed = B_TRUE;
3460 } else if (state == VDEV_STATE_CANT_OPEN) {
3461 /*
572e2857
BB
3462 * If we fail to open a vdev during an import or recovery, we
3463 * mark it as "not available", which signifies that it was
3464 * never there to begin with. Failure to open such a device
3465 * is not considered an error.
34dc7c2f 3466 */
572e2857
BB
3467 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
3468 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
34dc7c2f
BB
3469 vd->vdev_ops->vdev_op_leaf)
3470 vd->vdev_not_present = 1;
3471
3472 /*
3473 * Post the appropriate ereport. If the 'prevstate' field is
3474 * set to something other than VDEV_STATE_UNKNOWN, it indicates
3475 * that this is part of a vdev_reopen(). In this case, we don't
3476 * want to post the ereport if the device was already in the
3477 * CANT_OPEN state beforehand.
3478 *
3479 * If the 'checkremove' flag is set, then this is an attempt to
3480 * online the device in response to an insertion event. If we
3481 * hit this case, then we have detected an insertion event for a
3482 * faulted or offline device that wasn't in the removed state.
3483 * In this scenario, we don't post an ereport because we are
3484 * about to replace the device, or attempt an online with
3485 * vdev_forcefault, which will generate the fault for us.
3486 */
3487 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
3488 !vd->vdev_not_present && !vd->vdev_checkremove &&
b128c09f 3489 vd != spa->spa_root_vdev) {
34dc7c2f
BB
3490 const char *class;
3491
3492 switch (aux) {
3493 case VDEV_AUX_OPEN_FAILED:
3494 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
3495 break;
3496 case VDEV_AUX_CORRUPT_DATA:
3497 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
3498 break;
3499 case VDEV_AUX_NO_REPLICAS:
3500 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
3501 break;
3502 case VDEV_AUX_BAD_GUID_SUM:
3503 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
3504 break;
3505 case VDEV_AUX_TOO_SMALL:
3506 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
3507 break;
3508 case VDEV_AUX_BAD_LABEL:
3509 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
3510 break;
ff61d1a4 3511 case VDEV_AUX_BAD_ASHIFT:
3512 class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
3513 break;
34dc7c2f
BB
3514 default:
3515 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
3516 }
3517
b128c09f 3518 zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
34dc7c2f
BB
3519 }
3520
3521 /* Erase any notion of persistent removed state */
3522 vd->vdev_removed = B_FALSE;
3523 } else {
3524 vd->vdev_removed = B_FALSE;
3525 }
3526
d02ca379
DB
3527 /*
3528 * Notify ZED of any significant state-change on a leaf vdev.
3529 *
d02ca379 3530 */
6078881a
TH
3531 if (vd->vdev_ops->vdev_op_leaf) {
3532 /* preserve original state from a vdev_reopen() */
3533 if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
3534 (vd->vdev_prevstate != vd->vdev_state) &&
3535 (save_state <= VDEV_STATE_CLOSED))
3536 save_state = vd->vdev_prevstate;
3537
3538 /* filter out state change due to initial vdev_open */
3539 if (save_state > VDEV_STATE_CLOSED)
3540 zfs_post_state_change(spa, vd, save_state);
d02ca379
DB
3541 }
3542
9babb374
BB
3543 if (!isopen && vd->vdev_parent)
3544 vdev_propagate_state(vd->vdev_parent);
34dc7c2f 3545}
b128c09f
BB
3546
3547/*
3548 * Check the vdev configuration to ensure that it's capable of supporting
e550644f 3549 * a root pool. We do not support partial configuration.
b128c09f
BB
3550 */
3551boolean_t
3552vdev_is_bootable(vdev_t *vd)
3553{
b128c09f 3554 if (!vd->vdev_ops->vdev_op_leaf) {
e550644f 3555 const char *vdev_type = vd->vdev_ops->vdev_op_type;
b128c09f 3556
e550644f 3557 if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
b128c09f 3558 return (B_FALSE);
b128c09f
BB
3559 }
3560
e550644f 3561 for (int c = 0; c < vd->vdev_children; c++) {
b128c09f
BB
3562 if (!vdev_is_bootable(vd->vdev_child[c]))
3563 return (B_FALSE);
3564 }
3565 return (B_TRUE);
3566}
9babb374 3567
428870ff
BB
3568/*
3569 * Load the state from the original vdev tree (ovd) which
3570 * we've retrieved from the MOS config object. If the original
572e2857
BB
3571 * vdev was offline or faulted then we transfer that state to the
3572 * device in the current vdev tree (nvd).
428870ff 3573 */
9babb374 3574void
428870ff 3575vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
9babb374 3576{
d6320ddb 3577 int c;
9babb374 3578
572e2857 3579 ASSERT(nvd->vdev_top->vdev_islog);
1fde1e37
BB
3580 ASSERT(spa_config_held(nvd->vdev_spa,
3581 SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
428870ff 3582 ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
9babb374 3583
d6320ddb 3584 for (c = 0; c < nvd->vdev_children; c++)
428870ff 3585 vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
9babb374 3586
572e2857 3587 if (nvd->vdev_ops->vdev_op_leaf) {
9babb374 3588 /*
572e2857 3589 * Restore the persistent vdev state
9babb374 3590 */
428870ff 3591 nvd->vdev_offline = ovd->vdev_offline;
572e2857
BB
3592 nvd->vdev_faulted = ovd->vdev_faulted;
3593 nvd->vdev_degraded = ovd->vdev_degraded;
3594 nvd->vdev_removed = ovd->vdev_removed;
9babb374
BB
3595 }
3596}
3597
572e2857
BB
3598/*
3599 * Determine if a log device has valid content. If the vdev was
3600 * removed or faulted in the MOS config then we know that
3601 * the content on the log device has already been written to the pool.
3602 */
3603boolean_t
3604vdev_log_state_valid(vdev_t *vd)
3605{
d6320ddb
BB
3606 int c;
3607
572e2857
BB
3608 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
3609 !vd->vdev_removed)
3610 return (B_TRUE);
3611
d6320ddb 3612 for (c = 0; c < vd->vdev_children; c++)
572e2857
BB
3613 if (vdev_log_state_valid(vd->vdev_child[c]))
3614 return (B_TRUE);
3615
3616 return (B_FALSE);
3617}
3618
9babb374
BB
3619/*
3620 * Expand a vdev if possible.
3621 */
3622void
3623vdev_expand(vdev_t *vd, uint64_t txg)
3624{
3625 ASSERT(vd->vdev_top == vd);
3626 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3627
3628 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
3629 VERIFY(vdev_metaslab_init(vd, txg) == 0);
3630 vdev_config_dirty(vd);
3631 }
3632}
428870ff
BB
3633
3634/*
3635 * Split a vdev.
3636 */
3637void
3638vdev_split(vdev_t *vd)
3639{
3640 vdev_t *cvd, *pvd = vd->vdev_parent;
3641
3642 vdev_remove_child(pvd, vd);
3643 vdev_compact_children(pvd);
3644
3645 cvd = pvd->vdev_child[0];
3646 if (pvd->vdev_children == 1) {
3647 vdev_remove_parent(cvd);
3648 cvd->vdev_splitting = B_TRUE;
3649 }
3650 vdev_propagate_state(cvd);
3651}
c28b2279 3652
cc92e9d0
GW
3653void
3654vdev_deadman(vdev_t *vd)
3655{
3656 int c;
3657
3658 for (c = 0; c < vd->vdev_children; c++) {
3659 vdev_t *cvd = vd->vdev_child[c];
3660
3661 vdev_deadman(cvd);
3662 }
3663
3664 if (vd->vdev_ops->vdev_op_leaf) {
3665 vdev_queue_t *vq = &vd->vdev_queue;
3666
3667 mutex_enter(&vq->vq_lock);
e8b96c60 3668 if (avl_numnodes(&vq->vq_active_tree) > 0) {
cc92e9d0
GW
3669 spa_t *spa = vd->vdev_spa;
3670 zio_t *fio;
3671 uint64_t delta;
3672
3673 /*
3674 * Look at the head of all the pending queues,
3675 * if any I/O has been outstanding for longer than
3676 * the spa_deadman_synctime we log a zevent.
3677 */
e8b96c60 3678 fio = avl_first(&vq->vq_active_tree);
cb682a17
MA
3679 delta = gethrtime() - fio->io_timestamp;
3680 if (delta > spa_deadman_synctime(spa)) {
3681 zfs_dbgmsg("SLOW IO: zio timestamp %lluns, "
3682 "delta %lluns, last io %lluns",
cc92e9d0
GW
3683 fio->io_timestamp, delta,
3684 vq->vq_io_complete_ts);
3685 zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
3686 spa, vd, fio, 0, 0);
3687 }
3688 }
3689 mutex_exit(&vq->vq_lock);
3690 }
3691}
3692
c28b2279
BB
3693#if defined(_KERNEL) && defined(HAVE_SPL)
3694EXPORT_SYMBOL(vdev_fault);
3695EXPORT_SYMBOL(vdev_degrade);
3696EXPORT_SYMBOL(vdev_online);
3697EXPORT_SYMBOL(vdev_offline);
3698EXPORT_SYMBOL(vdev_clear);
4ea3f864 3699/* BEGIN CSTYLED */
b8bcca18
MA
3700module_param(metaslabs_per_vdev, int, 0644);
3701MODULE_PARM_DESC(metaslabs_per_vdev,
3702 "Divide added vdev into approximately (but no more than) this number "
3703 "of metaslabs");
4ea3f864 3704/* END CSTYLED */
c28b2279 3705#endif