]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/spa.c
align zfs_autoimport_disable manpage with reality
[mirror_zfs.git] / module / zfs / spa.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Delphix. All rights reserved.
62bdd5eb 25 * Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved.
a38718a6 26 */
34dc7c2f 27
34dc7c2f 28/*
e49f1e20
WA
29 * SPA: Storage Pool Allocator
30 *
34dc7c2f
BB
31 * This file contains all the routines used when modifying on-disk SPA state.
32 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 * pool.
34 */
35
36#include <sys/zfs_context.h>
37#include <sys/fm/fs/zfs.h>
38#include <sys/spa_impl.h>
39#include <sys/zio.h>
40#include <sys/zio_checksum.h>
34dc7c2f
BB
41#include <sys/dmu.h>
42#include <sys/dmu_tx.h>
43#include <sys/zap.h>
44#include <sys/zil.h>
428870ff 45#include <sys/ddt.h>
34dc7c2f 46#include <sys/vdev_impl.h>
c28b2279 47#include <sys/vdev_disk.h>
34dc7c2f 48#include <sys/metaslab.h>
428870ff 49#include <sys/metaslab_impl.h>
34dc7c2f
BB
50#include <sys/uberblock_impl.h>
51#include <sys/txg.h>
52#include <sys/avl.h>
53#include <sys/dmu_traverse.h>
54#include <sys/dmu_objset.h>
55#include <sys/unique.h>
56#include <sys/dsl_pool.h>
57#include <sys/dsl_dataset.h>
58#include <sys/dsl_dir.h>
59#include <sys/dsl_prop.h>
60#include <sys/dsl_synctask.h>
61#include <sys/fs/zfs.h>
62#include <sys/arc.h>
63#include <sys/callb.h>
64#include <sys/systeminfo.h>
34dc7c2f 65#include <sys/spa_boot.h>
9babb374 66#include <sys/zfs_ioctl.h>
428870ff 67#include <sys/dsl_scan.h>
9ae529ec 68#include <sys/zfeature.h>
13fe0198 69#include <sys/dsl_destroy.h>
526af785 70#include <sys/zvol.h>
34dc7c2f 71
d164b209 72#ifdef _KERNEL
428870ff
BB
73#include <sys/bootprops.h>
74#include <sys/callb.h>
75#include <sys/cpupart.h>
76#include <sys/pool.h>
77#include <sys/sysdc.h>
d164b209
BB
78#include <sys/zone.h>
79#endif /* _KERNEL */
80
34dc7c2f
BB
81#include "zfs_prop.h"
82#include "zfs_comutil.h"
83
428870ff 84typedef enum zti_modes {
7ef5e54e 85 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
7ef5e54e
AL
86 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
87 ZTI_MODE_NULL, /* don't create a taskq */
88 ZTI_NMODES
428870ff 89} zti_modes_t;
34dc7c2f 90
7ef5e54e
AL
91#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
92#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
93#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
94#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
9babb374 95
7ef5e54e
AL
96#define ZTI_N(n) ZTI_P(n, 1)
97#define ZTI_ONE ZTI_N(1)
9babb374
BB
98
99typedef struct zio_taskq_info {
7ef5e54e 100 zti_modes_t zti_mode;
428870ff 101 uint_t zti_value;
7ef5e54e 102 uint_t zti_count;
9babb374
BB
103} zio_taskq_info_t;
104
105static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
451041db 106 "iss", "iss_h", "int", "int_h"
9babb374
BB
107};
108
428870ff 109/*
7ef5e54e
AL
110 * This table defines the taskq settings for each ZFS I/O type. When
111 * initializing a pool, we use this table to create an appropriately sized
112 * taskq. Some operations are low volume and therefore have a small, static
113 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
114 * macros. Other operations process a large amount of data; the ZTI_BATCH
115 * macro causes us to create a taskq oriented for throughput. Some operations
116 * are so high frequency and short-lived that the taskq itself can become a a
117 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
118 * additional degree of parallelism specified by the number of threads per-
119 * taskq and the number of taskqs; when dispatching an event in this case, the
120 * particular taskq is chosen at random.
121 *
122 * The different taskq priorities are to handle the different contexts (issue
123 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
124 * need to be handled with minimum delay.
428870ff
BB
125 */
126const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
127 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
7ef5e54e
AL
128 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
129 { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */
130 { ZTI_BATCH, ZTI_N(5), ZTI_N(16), ZTI_N(5) }, /* WRITE */
131 { ZTI_P(4, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
132 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
133 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
9babb374
BB
134};
135
13fe0198
MA
136static void spa_sync_version(void *arg, dmu_tx_t *tx);
137static void spa_sync_props(void *arg, dmu_tx_t *tx);
b128c09f 138static boolean_t spa_has_active_shared_spare(spa_t *spa);
bf701a83 139static inline int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
428870ff
BB
140 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
141 char **ereport);
572e2857 142static void spa_vdev_resilver_done(spa_t *spa);
428870ff 143
e8b96c60 144uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
428870ff
BB
145id_t zio_taskq_psrset_bind = PS_NONE;
146boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
147uint_t zio_taskq_basedc = 80; /* base duty cycle */
148
149boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
150
151/*
152 * This (illegal) pool name is used when temporarily importing a spa_t in order
153 * to get the vdev stats associated with the imported devices.
154 */
155#define TRYIMPORT_NAME "$import"
34dc7c2f
BB
156
157/*
158 * ==========================================================================
159 * SPA properties routines
160 * ==========================================================================
161 */
162
163/*
164 * Add a (source=src, propname=propval) list to an nvlist.
165 */
166static void
167spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
168 uint64_t intval, zprop_source_t src)
169{
170 const char *propname = zpool_prop_to_name(prop);
171 nvlist_t *propval;
172
79c76d5b 173 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
174 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
175
176 if (strval != NULL)
177 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
178 else
179 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
180
181 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
182 nvlist_free(propval);
183}
184
185/*
186 * Get property values from the spa configuration.
187 */
188static void
189spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
190{
1bd201e7 191 vdev_t *rvd = spa->spa_root_vdev;
9ae529ec 192 dsl_pool_t *pool = spa->spa_dsl_pool;
f3a7f661 193 uint64_t size, alloc, cap, version;
34dc7c2f 194 zprop_source_t src = ZPROP_SRC_NONE;
b128c09f 195 spa_config_dirent_t *dp;
f3a7f661 196 metaslab_class_t *mc = spa_normal_class(spa);
b128c09f
BB
197
198 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
34dc7c2f 199
1bd201e7 200 if (rvd != NULL) {
428870ff
BB
201 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
202 size = metaslab_class_get_space(spa_normal_class(spa));
d164b209
BB
203 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
204 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
428870ff
BB
205 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
206 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
207 size - alloc, src);
1bd201e7 208
f3a7f661
GW
209 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
210 metaslab_class_fragmentation(mc), src);
211 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
212 metaslab_class_expandable_space(mc), src);
572e2857
BB
213 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
214 (spa_mode(spa) == FREAD), src);
d164b209 215
428870ff 216 cap = (size == 0) ? 0 : (alloc * 100 / size);
d164b209
BB
217 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
218
428870ff
BB
219 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
220 ddt_get_pool_dedup_ratio(spa), src);
221
d164b209 222 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
1bd201e7 223 rvd->vdev_state, src);
d164b209
BB
224
225 version = spa_version(spa);
226 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
227 src = ZPROP_SRC_DEFAULT;
228 else
229 src = ZPROP_SRC_LOCAL;
230 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
231 }
34dc7c2f 232
9ae529ec 233 if (pool != NULL) {
9ae529ec
CS
234 /*
235 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
236 * when opening pools before this version freedir will be NULL.
237 */
fbeddd60 238 if (pool->dp_free_dir != NULL) {
9ae529ec 239 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
fbeddd60 240 pool->dp_free_dir->dd_phys->dd_used_bytes, src);
9ae529ec
CS
241 } else {
242 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
243 NULL, 0, src);
244 }
fbeddd60
MA
245
246 if (pool->dp_leak_dir != NULL) {
247 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
248 pool->dp_leak_dir->dd_phys->dd_used_bytes, src);
249 } else {
250 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
251 NULL, 0, src);
252 }
9ae529ec
CS
253 }
254
34dc7c2f 255 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
34dc7c2f 256
d96eb2b1
DM
257 if (spa->spa_comment != NULL) {
258 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
259 0, ZPROP_SRC_LOCAL);
260 }
261
34dc7c2f
BB
262 if (spa->spa_root != NULL)
263 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
264 0, ZPROP_SRC_LOCAL);
265
b128c09f
BB
266 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
267 if (dp->scd_path == NULL) {
34dc7c2f 268 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f
BB
269 "none", 0, ZPROP_SRC_LOCAL);
270 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
34dc7c2f 271 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f 272 dp->scd_path, 0, ZPROP_SRC_LOCAL);
34dc7c2f
BB
273 }
274 }
275}
276
277/*
278 * Get zpool property values.
279 */
280int
281spa_prop_get(spa_t *spa, nvlist_t **nvp)
282{
428870ff 283 objset_t *mos = spa->spa_meta_objset;
34dc7c2f
BB
284 zap_cursor_t zc;
285 zap_attribute_t za;
34dc7c2f
BB
286 int err;
287
79c76d5b 288 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
c28b2279 289 if (err)
d1d7e268 290 return (err);
34dc7c2f 291
b128c09f
BB
292 mutex_enter(&spa->spa_props_lock);
293
34dc7c2f
BB
294 /*
295 * Get properties from the spa config.
296 */
297 spa_prop_get_config(spa, nvp);
298
34dc7c2f 299 /* If no pool property object, no more prop to get. */
428870ff 300 if (mos == NULL || spa->spa_pool_props_object == 0) {
34dc7c2f 301 mutex_exit(&spa->spa_props_lock);
c28b2279 302 goto out;
34dc7c2f
BB
303 }
304
305 /*
306 * Get properties from the MOS pool property object.
307 */
308 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
309 (err = zap_cursor_retrieve(&zc, &za)) == 0;
310 zap_cursor_advance(&zc)) {
311 uint64_t intval = 0;
312 char *strval = NULL;
313 zprop_source_t src = ZPROP_SRC_DEFAULT;
314 zpool_prop_t prop;
315
316 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
317 continue;
318
319 switch (za.za_integer_length) {
320 case 8:
321 /* integer property */
322 if (za.za_first_integer !=
323 zpool_prop_default_numeric(prop))
324 src = ZPROP_SRC_LOCAL;
325
326 if (prop == ZPOOL_PROP_BOOTFS) {
327 dsl_pool_t *dp;
328 dsl_dataset_t *ds = NULL;
329
330 dp = spa_get_dsl(spa);
13fe0198 331 dsl_pool_config_enter(dp, FTAG);
c65aa5b2
BB
332 if ((err = dsl_dataset_hold_obj(dp,
333 za.za_first_integer, FTAG, &ds))) {
13fe0198 334 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
335 break;
336 }
337
338 strval = kmem_alloc(
339 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
79c76d5b 340 KM_SLEEP);
34dc7c2f 341 dsl_dataset_name(ds, strval);
b128c09f 342 dsl_dataset_rele(ds, FTAG);
13fe0198 343 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
344 } else {
345 strval = NULL;
346 intval = za.za_first_integer;
347 }
348
349 spa_prop_add_list(*nvp, prop, strval, intval, src);
350
351 if (strval != NULL)
352 kmem_free(strval,
353 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
354
355 break;
356
357 case 1:
358 /* string property */
79c76d5b 359 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
34dc7c2f
BB
360 err = zap_lookup(mos, spa->spa_pool_props_object,
361 za.za_name, 1, za.za_num_integers, strval);
362 if (err) {
363 kmem_free(strval, za.za_num_integers);
364 break;
365 }
366 spa_prop_add_list(*nvp, prop, strval, 0, src);
367 kmem_free(strval, za.za_num_integers);
368 break;
369
370 default:
371 break;
372 }
373 }
374 zap_cursor_fini(&zc);
375 mutex_exit(&spa->spa_props_lock);
376out:
377 if (err && err != ENOENT) {
378 nvlist_free(*nvp);
379 *nvp = NULL;
380 return (err);
381 }
382
383 return (0);
384}
385
386/*
387 * Validate the given pool properties nvlist and modify the list
388 * for the property values to be set.
389 */
390static int
391spa_prop_validate(spa_t *spa, nvlist_t *props)
392{
393 nvpair_t *elem;
394 int error = 0, reset_bootfs = 0;
d4ed6673 395 uint64_t objnum = 0;
9ae529ec 396 boolean_t has_feature = B_FALSE;
34dc7c2f
BB
397
398 elem = NULL;
399 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
34dc7c2f 400 uint64_t intval;
9ae529ec
CS
401 char *strval, *slash, *check, *fname;
402 const char *propname = nvpair_name(elem);
403 zpool_prop_t prop = zpool_name_to_prop(propname);
404
405 switch ((int)prop) {
406 case ZPROP_INVAL:
407 if (!zpool_prop_feature(propname)) {
2e528b49 408 error = SET_ERROR(EINVAL);
9ae529ec
CS
409 break;
410 }
411
412 /*
413 * Sanitize the input.
414 */
415 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
2e528b49 416 error = SET_ERROR(EINVAL);
9ae529ec
CS
417 break;
418 }
419
420 if (nvpair_value_uint64(elem, &intval) != 0) {
2e528b49 421 error = SET_ERROR(EINVAL);
9ae529ec
CS
422 break;
423 }
34dc7c2f 424
9ae529ec 425 if (intval != 0) {
2e528b49 426 error = SET_ERROR(EINVAL);
9ae529ec
CS
427 break;
428 }
34dc7c2f 429
9ae529ec
CS
430 fname = strchr(propname, '@') + 1;
431 if (zfeature_lookup_name(fname, NULL) != 0) {
2e528b49 432 error = SET_ERROR(EINVAL);
9ae529ec
CS
433 break;
434 }
435
436 has_feature = B_TRUE;
437 break;
34dc7c2f 438
34dc7c2f
BB
439 case ZPOOL_PROP_VERSION:
440 error = nvpair_value_uint64(elem, &intval);
441 if (!error &&
9ae529ec
CS
442 (intval < spa_version(spa) ||
443 intval > SPA_VERSION_BEFORE_FEATURES ||
444 has_feature))
2e528b49 445 error = SET_ERROR(EINVAL);
34dc7c2f
BB
446 break;
447
448 case ZPOOL_PROP_DELEGATION:
449 case ZPOOL_PROP_AUTOREPLACE:
b128c09f 450 case ZPOOL_PROP_LISTSNAPS:
9babb374 451 case ZPOOL_PROP_AUTOEXPAND:
34dc7c2f
BB
452 error = nvpair_value_uint64(elem, &intval);
453 if (!error && intval > 1)
2e528b49 454 error = SET_ERROR(EINVAL);
34dc7c2f
BB
455 break;
456
457 case ZPOOL_PROP_BOOTFS:
9babb374
BB
458 /*
459 * If the pool version is less than SPA_VERSION_BOOTFS,
460 * or the pool is still being created (version == 0),
461 * the bootfs property cannot be set.
462 */
34dc7c2f 463 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
2e528b49 464 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
465 break;
466 }
467
468 /*
b128c09f 469 * Make sure the vdev config is bootable
34dc7c2f 470 */
b128c09f 471 if (!vdev_is_bootable(spa->spa_root_vdev)) {
2e528b49 472 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
473 break;
474 }
475
476 reset_bootfs = 1;
477
478 error = nvpair_value_string(elem, &strval);
479
480 if (!error) {
9ae529ec 481 objset_t *os;
b128c09f
BB
482 uint64_t compress;
483
34dc7c2f
BB
484 if (strval == NULL || strval[0] == '\0') {
485 objnum = zpool_prop_default_numeric(
486 ZPOOL_PROP_BOOTFS);
487 break;
488 }
489
d1d7e268
MK
490 error = dmu_objset_hold(strval, FTAG, &os);
491 if (error)
34dc7c2f 492 break;
b128c09f 493
428870ff
BB
494 /* Must be ZPL and not gzip compressed. */
495
496 if (dmu_objset_type(os) != DMU_OST_ZFS) {
2e528b49 497 error = SET_ERROR(ENOTSUP);
13fe0198
MA
498 } else if ((error =
499 dsl_prop_get_int_ds(dmu_objset_ds(os),
b128c09f 500 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
13fe0198 501 &compress)) == 0 &&
b128c09f 502 !BOOTFS_COMPRESS_VALID(compress)) {
2e528b49 503 error = SET_ERROR(ENOTSUP);
b128c09f
BB
504 } else {
505 objnum = dmu_objset_id(os);
506 }
428870ff 507 dmu_objset_rele(os, FTAG);
34dc7c2f
BB
508 }
509 break;
b128c09f 510
34dc7c2f
BB
511 case ZPOOL_PROP_FAILUREMODE:
512 error = nvpair_value_uint64(elem, &intval);
513 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
514 intval > ZIO_FAILURE_MODE_PANIC))
2e528b49 515 error = SET_ERROR(EINVAL);
34dc7c2f
BB
516
517 /*
518 * This is a special case which only occurs when
519 * the pool has completely failed. This allows
520 * the user to change the in-core failmode property
521 * without syncing it out to disk (I/Os might
522 * currently be blocked). We do this by returning
523 * EIO to the caller (spa_prop_set) to trick it
524 * into thinking we encountered a property validation
525 * error.
526 */
b128c09f 527 if (!error && spa_suspended(spa)) {
34dc7c2f 528 spa->spa_failmode = intval;
2e528b49 529 error = SET_ERROR(EIO);
34dc7c2f
BB
530 }
531 break;
532
533 case ZPOOL_PROP_CACHEFILE:
534 if ((error = nvpair_value_string(elem, &strval)) != 0)
535 break;
536
537 if (strval[0] == '\0')
538 break;
539
540 if (strcmp(strval, "none") == 0)
541 break;
542
543 if (strval[0] != '/') {
2e528b49 544 error = SET_ERROR(EINVAL);
34dc7c2f
BB
545 break;
546 }
547
548 slash = strrchr(strval, '/');
549 ASSERT(slash != NULL);
550
551 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
552 strcmp(slash, "/..") == 0)
2e528b49 553 error = SET_ERROR(EINVAL);
34dc7c2f 554 break;
428870ff 555
d96eb2b1
DM
556 case ZPOOL_PROP_COMMENT:
557 if ((error = nvpair_value_string(elem, &strval)) != 0)
558 break;
559 for (check = strval; *check != '\0'; check++) {
560 if (!isprint(*check)) {
2e528b49 561 error = SET_ERROR(EINVAL);
d96eb2b1
DM
562 break;
563 }
564 check++;
565 }
566 if (strlen(strval) > ZPROP_MAX_COMMENT)
2e528b49 567 error = SET_ERROR(E2BIG);
d96eb2b1
DM
568 break;
569
428870ff
BB
570 case ZPOOL_PROP_DEDUPDITTO:
571 if (spa_version(spa) < SPA_VERSION_DEDUP)
2e528b49 572 error = SET_ERROR(ENOTSUP);
428870ff
BB
573 else
574 error = nvpair_value_uint64(elem, &intval);
575 if (error == 0 &&
576 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
2e528b49 577 error = SET_ERROR(EINVAL);
428870ff 578 break;
e75c13c3
BB
579
580 default:
581 break;
34dc7c2f
BB
582 }
583
584 if (error)
585 break;
586 }
587
588 if (!error && reset_bootfs) {
589 error = nvlist_remove(props,
590 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
591
592 if (!error) {
593 error = nvlist_add_uint64(props,
594 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
595 }
596 }
597
598 return (error);
599}
600
d164b209
BB
601void
602spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
603{
604 char *cachefile;
605 spa_config_dirent_t *dp;
606
607 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
608 &cachefile) != 0)
609 return;
610
611 dp = kmem_alloc(sizeof (spa_config_dirent_t),
79c76d5b 612 KM_SLEEP);
d164b209
BB
613
614 if (cachefile[0] == '\0')
615 dp->scd_path = spa_strdup(spa_config_path);
616 else if (strcmp(cachefile, "none") == 0)
617 dp->scd_path = NULL;
618 else
619 dp->scd_path = spa_strdup(cachefile);
620
621 list_insert_head(&spa->spa_config_list, dp);
622 if (need_sync)
623 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
624}
625
34dc7c2f
BB
626int
627spa_prop_set(spa_t *spa, nvlist_t *nvp)
628{
629 int error;
9ae529ec 630 nvpair_t *elem = NULL;
d164b209 631 boolean_t need_sync = B_FALSE;
34dc7c2f
BB
632
633 if ((error = spa_prop_validate(spa, nvp)) != 0)
634 return (error);
635
d164b209 636 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
9ae529ec 637 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
d164b209 638
572e2857
BB
639 if (prop == ZPOOL_PROP_CACHEFILE ||
640 prop == ZPOOL_PROP_ALTROOT ||
641 prop == ZPOOL_PROP_READONLY)
d164b209
BB
642 continue;
643
9ae529ec
CS
644 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
645 uint64_t ver;
646
647 if (prop == ZPOOL_PROP_VERSION) {
648 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
649 } else {
650 ASSERT(zpool_prop_feature(nvpair_name(elem)));
651 ver = SPA_VERSION_FEATURES;
652 need_sync = B_TRUE;
653 }
654
655 /* Save time if the version is already set. */
656 if (ver == spa_version(spa))
657 continue;
658
659 /*
660 * In addition to the pool directory object, we might
661 * create the pool properties object, the features for
662 * read object, the features for write object, or the
663 * feature descriptions object.
664 */
13fe0198
MA
665 error = dsl_sync_task(spa->spa_name, NULL,
666 spa_sync_version, &ver, 6);
9ae529ec
CS
667 if (error)
668 return (error);
669 continue;
670 }
671
d164b209
BB
672 need_sync = B_TRUE;
673 break;
674 }
675
9ae529ec 676 if (need_sync) {
13fe0198
MA
677 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
678 nvp, 6));
9ae529ec
CS
679 }
680
681 return (0);
34dc7c2f
BB
682}
683
684/*
685 * If the bootfs property value is dsobj, clear it.
686 */
687void
688spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
689{
690 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
691 VERIFY(zap_remove(spa->spa_meta_objset,
692 spa->spa_pool_props_object,
693 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
694 spa->spa_bootfs = 0;
695 }
696}
697
3bc7e0fb
GW
698/*ARGSUSED*/
699static int
13fe0198 700spa_change_guid_check(void *arg, dmu_tx_t *tx)
3bc7e0fb 701{
13fe0198 702 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
703 vdev_t *rvd = spa->spa_root_vdev;
704 uint64_t vdev_state;
13fe0198 705 ASSERTV(uint64_t *newguid = arg);
3bc7e0fb
GW
706
707 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
708 vdev_state = rvd->vdev_state;
709 spa_config_exit(spa, SCL_STATE, FTAG);
710
711 if (vdev_state != VDEV_STATE_HEALTHY)
2e528b49 712 return (SET_ERROR(ENXIO));
3bc7e0fb
GW
713
714 ASSERT3U(spa_guid(spa), !=, *newguid);
715
716 return (0);
717}
718
719static void
13fe0198 720spa_change_guid_sync(void *arg, dmu_tx_t *tx)
3bc7e0fb 721{
13fe0198
MA
722 uint64_t *newguid = arg;
723 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
724 uint64_t oldguid;
725 vdev_t *rvd = spa->spa_root_vdev;
726
727 oldguid = spa_guid(spa);
728
729 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
730 rvd->vdev_guid = *newguid;
731 rvd->vdev_guid_sum += (*newguid - oldguid);
732 vdev_config_dirty(rvd);
733 spa_config_exit(spa, SCL_STATE, FTAG);
734
6f1ffb06
MA
735 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
736 oldguid, *newguid);
3bc7e0fb
GW
737}
738
3541dc6d
GA
739/*
740 * Change the GUID for the pool. This is done so that we can later
741 * re-import a pool built from a clone of our own vdevs. We will modify
742 * the root vdev's guid, our own pool guid, and then mark all of our
743 * vdevs dirty. Note that we must make sure that all our vdevs are
744 * online when we do this, or else any vdevs that weren't present
745 * would be orphaned from our pool. We are also going to issue a
746 * sysevent to update any watchers.
747 */
748int
749spa_change_guid(spa_t *spa)
750{
3bc7e0fb
GW
751 int error;
752 uint64_t guid;
3541dc6d 753
621dd7bb 754 mutex_enter(&spa->spa_vdev_top_lock);
3bc7e0fb
GW
755 mutex_enter(&spa_namespace_lock);
756 guid = spa_generate_guid(NULL);
3541dc6d 757
13fe0198
MA
758 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
759 spa_change_guid_sync, &guid, 5);
3541dc6d 760
3bc7e0fb
GW
761 if (error == 0) {
762 spa_config_sync(spa, B_FALSE, B_TRUE);
763 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_REGUID);
764 }
3541dc6d 765
3bc7e0fb 766 mutex_exit(&spa_namespace_lock);
621dd7bb 767 mutex_exit(&spa->spa_vdev_top_lock);
3541dc6d 768
3bc7e0fb 769 return (error);
3541dc6d
GA
770}
771
34dc7c2f
BB
772/*
773 * ==========================================================================
774 * SPA state manipulation (open/create/destroy/import/export)
775 * ==========================================================================
776 */
777
778static int
779spa_error_entry_compare(const void *a, const void *b)
780{
781 spa_error_entry_t *sa = (spa_error_entry_t *)a;
782 spa_error_entry_t *sb = (spa_error_entry_t *)b;
783 int ret;
784
785 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
5dbd68a3 786 sizeof (zbookmark_phys_t));
34dc7c2f
BB
787
788 if (ret < 0)
789 return (-1);
790 else if (ret > 0)
791 return (1);
792 else
793 return (0);
794}
795
796/*
797 * Utility function which retrieves copies of the current logs and
798 * re-initializes them in the process.
799 */
800void
801spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
802{
803 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
804
805 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
806 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
807
808 avl_create(&spa->spa_errlist_scrub,
809 spa_error_entry_compare, sizeof (spa_error_entry_t),
810 offsetof(spa_error_entry_t, se_avl));
811 avl_create(&spa->spa_errlist_last,
812 spa_error_entry_compare, sizeof (spa_error_entry_t),
813 offsetof(spa_error_entry_t, se_avl));
814}
815
7ef5e54e
AL
816static void
817spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
34dc7c2f 818{
7ef5e54e
AL
819 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
820 enum zti_modes mode = ztip->zti_mode;
821 uint_t value = ztip->zti_value;
822 uint_t count = ztip->zti_count;
823 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
824 char name[32];
825 uint_t i, flags = 0;
428870ff 826 boolean_t batch = B_FALSE;
34dc7c2f 827
7ef5e54e
AL
828 if (mode == ZTI_MODE_NULL) {
829 tqs->stqs_count = 0;
830 tqs->stqs_taskq = NULL;
831 return;
832 }
428870ff 833
7ef5e54e 834 ASSERT3U(count, >, 0);
428870ff 835
7ef5e54e
AL
836 tqs->stqs_count = count;
837 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
428870ff 838
e8b96c60
MA
839 switch (mode) {
840 case ZTI_MODE_FIXED:
841 ASSERT3U(value, >=, 1);
842 value = MAX(value, 1);
843 break;
7ef5e54e 844
e8b96c60
MA
845 case ZTI_MODE_BATCH:
846 batch = B_TRUE;
847 flags |= TASKQ_THREADS_CPU_PCT;
848 value = zio_taskq_batch_pct;
849 break;
7ef5e54e 850
e8b96c60
MA
851 default:
852 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
853 "spa_activate()",
854 zio_type_name[t], zio_taskq_types[q], mode, value);
855 break;
856 }
7ef5e54e 857
e8b96c60
MA
858 for (i = 0; i < count; i++) {
859 taskq_t *tq;
7ef5e54e
AL
860
861 if (count > 1) {
862 (void) snprintf(name, sizeof (name), "%s_%s_%u",
863 zio_type_name[t], zio_taskq_types[q], i);
864 } else {
865 (void) snprintf(name, sizeof (name), "%s_%s",
866 zio_type_name[t], zio_taskq_types[q]);
867 }
868
869 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
870 if (batch)
871 flags |= TASKQ_DC_BATCH;
872
873 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
874 spa->spa_proc, zio_taskq_basedc, flags);
875 } else {
e8b96c60
MA
876 pri_t pri = maxclsyspri;
877 /*
878 * The write issue taskq can be extremely CPU
879 * intensive. Run it at slightly lower priority
880 * than the other taskqs.
881 */
882 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
883 pri--;
884
885 tq = taskq_create_proc(name, value, pri, 50,
7ef5e54e
AL
886 INT_MAX, spa->spa_proc, flags);
887 }
888
889 tqs->stqs_taskq[i] = tq;
890 }
891}
892
893static void
894spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
895{
896 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
897 uint_t i;
898
899 if (tqs->stqs_taskq == NULL) {
900 ASSERT3U(tqs->stqs_count, ==, 0);
901 return;
902 }
903
904 for (i = 0; i < tqs->stqs_count; i++) {
905 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
906 taskq_destroy(tqs->stqs_taskq[i]);
428870ff 907 }
34dc7c2f 908
7ef5e54e
AL
909 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
910 tqs->stqs_taskq = NULL;
911}
34dc7c2f 912
7ef5e54e
AL
913/*
914 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
915 * Note that a type may have multiple discrete taskqs to avoid lock contention
916 * on the taskq itself. In that case we choose which taskq at random by using
917 * the low bits of gethrtime().
918 */
919void
920spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
921 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
922{
923 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
924 taskq_t *tq;
925
926 ASSERT3P(tqs->stqs_taskq, !=, NULL);
927 ASSERT3U(tqs->stqs_count, !=, 0);
928
929 if (tqs->stqs_count == 1) {
930 tq = tqs->stqs_taskq[0];
931 } else {
c12936b1 932 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
428870ff 933 }
7ef5e54e
AL
934
935 taskq_dispatch_ent(tq, func, arg, flags, ent);
428870ff
BB
936}
937
044baf00
BB
938/*
939 * Same as spa_taskq_dispatch_ent() but block on the task until completion.
940 */
941void
942spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
943 task_func_t *func, void *arg, uint_t flags)
944{
945 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
946 taskq_t *tq;
947 taskqid_t id;
948
949 ASSERT3P(tqs->stqs_taskq, !=, NULL);
950 ASSERT3U(tqs->stqs_count, !=, 0);
951
952 if (tqs->stqs_count == 1) {
953 tq = tqs->stqs_taskq[0];
954 } else {
c12936b1 955 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
044baf00
BB
956 }
957
958 id = taskq_dispatch(tq, func, arg, flags);
959 if (id)
960 taskq_wait_id(tq, id);
961}
962
428870ff
BB
963static void
964spa_create_zio_taskqs(spa_t *spa)
965{
d6320ddb
BB
966 int t, q;
967
968 for (t = 0; t < ZIO_TYPES; t++) {
969 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 970 spa_taskqs_init(spa, t, q);
428870ff
BB
971 }
972 }
973}
9babb374 974
7b89a549 975#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
428870ff
BB
976static void
977spa_thread(void *arg)
978{
979 callb_cpr_t cprinfo;
9babb374 980
428870ff
BB
981 spa_t *spa = arg;
982 user_t *pu = PTOU(curproc);
9babb374 983
428870ff
BB
984 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
985 spa->spa_name);
9babb374 986
428870ff
BB
987 ASSERT(curproc != &p0);
988 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
989 "zpool-%s", spa->spa_name);
990 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
991
992 /* bind this thread to the requested psrset */
993 if (zio_taskq_psrset_bind != PS_NONE) {
994 pool_lock();
995 mutex_enter(&cpu_lock);
996 mutex_enter(&pidlock);
997 mutex_enter(&curproc->p_lock);
998
999 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1000 0, NULL, NULL) == 0) {
1001 curthread->t_bind_pset = zio_taskq_psrset_bind;
1002 } else {
1003 cmn_err(CE_WARN,
1004 "Couldn't bind process for zfs pool \"%s\" to "
1005 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1006 }
1007
1008 mutex_exit(&curproc->p_lock);
1009 mutex_exit(&pidlock);
1010 mutex_exit(&cpu_lock);
1011 pool_unlock();
1012 }
1013
1014 if (zio_taskq_sysdc) {
1015 sysdc_thread_enter(curthread, 100, 0);
1016 }
1017
1018 spa->spa_proc = curproc;
1019 spa->spa_did = curthread->t_did;
1020
1021 spa_create_zio_taskqs(spa);
1022
1023 mutex_enter(&spa->spa_proc_lock);
1024 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1025
1026 spa->spa_proc_state = SPA_PROC_ACTIVE;
1027 cv_broadcast(&spa->spa_proc_cv);
1028
1029 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1030 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1031 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1032 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1033
1034 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1035 spa->spa_proc_state = SPA_PROC_GONE;
1036 spa->spa_proc = &p0;
1037 cv_broadcast(&spa->spa_proc_cv);
1038 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1039
1040 mutex_enter(&curproc->p_lock);
1041 lwp_exit();
1042}
1043#endif
1044
1045/*
1046 * Activate an uninitialized pool.
1047 */
1048static void
1049spa_activate(spa_t *spa, int mode)
1050{
1051 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1052
1053 spa->spa_state = POOL_STATE_ACTIVE;
1054 spa->spa_mode = mode;
1055
1056 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1057 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1058
1059 /* Try to create a covering process */
1060 mutex_enter(&spa->spa_proc_lock);
1061 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1062 ASSERT(spa->spa_proc == &p0);
1063 spa->spa_did = 0;
1064
7b89a549 1065#ifdef HAVE_SPA_THREAD
428870ff
BB
1066 /* Only create a process if we're going to be around a while. */
1067 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1068 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1069 NULL, 0) == 0) {
1070 spa->spa_proc_state = SPA_PROC_CREATED;
1071 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1072 cv_wait(&spa->spa_proc_cv,
1073 &spa->spa_proc_lock);
9babb374 1074 }
428870ff
BB
1075 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1076 ASSERT(spa->spa_proc != &p0);
1077 ASSERT(spa->spa_did != 0);
1078 } else {
1079#ifdef _KERNEL
1080 cmn_err(CE_WARN,
1081 "Couldn't create process for zfs pool \"%s\"\n",
1082 spa->spa_name);
1083#endif
b128c09f 1084 }
34dc7c2f 1085 }
7b89a549 1086#endif /* HAVE_SPA_THREAD */
428870ff
BB
1087 mutex_exit(&spa->spa_proc_lock);
1088
1089 /* If we didn't create a process, we need to create our taskqs. */
1090 if (spa->spa_proc == &p0) {
1091 spa_create_zio_taskqs(spa);
1092 }
34dc7c2f 1093
b128c09f
BB
1094 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1095 offsetof(vdev_t, vdev_config_dirty_node));
1096 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1097 offsetof(vdev_t, vdev_state_dirty_node));
34dc7c2f
BB
1098
1099 txg_list_create(&spa->spa_vdev_txg_list,
1100 offsetof(struct vdev, vdev_txg_node));
1101
1102 avl_create(&spa->spa_errlist_scrub,
1103 spa_error_entry_compare, sizeof (spa_error_entry_t),
1104 offsetof(spa_error_entry_t, se_avl));
1105 avl_create(&spa->spa_errlist_last,
1106 spa_error_entry_compare, sizeof (spa_error_entry_t),
1107 offsetof(spa_error_entry_t, se_avl));
1108}
1109
1110/*
1111 * Opposite of spa_activate().
1112 */
1113static void
1114spa_deactivate(spa_t *spa)
1115{
d6320ddb
BB
1116 int t, q;
1117
34dc7c2f
BB
1118 ASSERT(spa->spa_sync_on == B_FALSE);
1119 ASSERT(spa->spa_dsl_pool == NULL);
1120 ASSERT(spa->spa_root_vdev == NULL);
9babb374 1121 ASSERT(spa->spa_async_zio_root == NULL);
34dc7c2f
BB
1122 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1123
1124 txg_list_destroy(&spa->spa_vdev_txg_list);
1125
b128c09f
BB
1126 list_destroy(&spa->spa_config_dirty_list);
1127 list_destroy(&spa->spa_state_dirty_list);
34dc7c2f 1128
cc92e9d0
GW
1129 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
1130
d6320ddb
BB
1131 for (t = 0; t < ZIO_TYPES; t++) {
1132 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 1133 spa_taskqs_fini(spa, t, q);
b128c09f 1134 }
34dc7c2f
BB
1135 }
1136
1137 metaslab_class_destroy(spa->spa_normal_class);
1138 spa->spa_normal_class = NULL;
1139
1140 metaslab_class_destroy(spa->spa_log_class);
1141 spa->spa_log_class = NULL;
1142
1143 /*
1144 * If this was part of an import or the open otherwise failed, we may
1145 * still have errors left in the queues. Empty them just in case.
1146 */
1147 spa_errlog_drain(spa);
1148
1149 avl_destroy(&spa->spa_errlist_scrub);
1150 avl_destroy(&spa->spa_errlist_last);
1151
1152 spa->spa_state = POOL_STATE_UNINITIALIZED;
428870ff
BB
1153
1154 mutex_enter(&spa->spa_proc_lock);
1155 if (spa->spa_proc_state != SPA_PROC_NONE) {
1156 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1157 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1158 cv_broadcast(&spa->spa_proc_cv);
1159 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1160 ASSERT(spa->spa_proc != &p0);
1161 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1162 }
1163 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1164 spa->spa_proc_state = SPA_PROC_NONE;
1165 }
1166 ASSERT(spa->spa_proc == &p0);
1167 mutex_exit(&spa->spa_proc_lock);
1168
1169 /*
1170 * We want to make sure spa_thread() has actually exited the ZFS
1171 * module, so that the module can't be unloaded out from underneath
1172 * it.
1173 */
1174 if (spa->spa_did != 0) {
1175 thread_join(spa->spa_did);
1176 spa->spa_did = 0;
1177 }
34dc7c2f
BB
1178}
1179
1180/*
1181 * Verify a pool configuration, and construct the vdev tree appropriately. This
1182 * will create all the necessary vdevs in the appropriate layout, with each vdev
1183 * in the CLOSED state. This will prep the pool before open/creation/import.
1184 * All vdev validation is done by the vdev_alloc() routine.
1185 */
1186static int
1187spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1188 uint_t id, int atype)
1189{
1190 nvlist_t **child;
9babb374 1191 uint_t children;
34dc7c2f 1192 int error;
d6320ddb 1193 int c;
34dc7c2f
BB
1194
1195 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1196 return (error);
1197
1198 if ((*vdp)->vdev_ops->vdev_op_leaf)
1199 return (0);
1200
b128c09f
BB
1201 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1202 &child, &children);
1203
1204 if (error == ENOENT)
1205 return (0);
1206
1207 if (error) {
34dc7c2f
BB
1208 vdev_free(*vdp);
1209 *vdp = NULL;
2e528b49 1210 return (SET_ERROR(EINVAL));
34dc7c2f
BB
1211 }
1212
d6320ddb 1213 for (c = 0; c < children; c++) {
34dc7c2f
BB
1214 vdev_t *vd;
1215 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1216 atype)) != 0) {
1217 vdev_free(*vdp);
1218 *vdp = NULL;
1219 return (error);
1220 }
1221 }
1222
1223 ASSERT(*vdp != NULL);
1224
1225 return (0);
1226}
1227
1228/*
1229 * Opposite of spa_load().
1230 */
1231static void
1232spa_unload(spa_t *spa)
1233{
1234 int i;
1235
b128c09f
BB
1236 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1237
34dc7c2f
BB
1238 /*
1239 * Stop async tasks.
1240 */
1241 spa_async_suspend(spa);
1242
1243 /*
1244 * Stop syncing.
1245 */
1246 if (spa->spa_sync_on) {
1247 txg_sync_stop(spa->spa_dsl_pool);
1248 spa->spa_sync_on = B_FALSE;
1249 }
1250
1251 /*
b128c09f 1252 * Wait for any outstanding async I/O to complete.
34dc7c2f 1253 */
9babb374 1254 if (spa->spa_async_zio_root != NULL) {
e022864d
MA
1255 for (i = 0; i < max_ncpus; i++)
1256 (void) zio_wait(spa->spa_async_zio_root[i]);
1257 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
9babb374
BB
1258 spa->spa_async_zio_root = NULL;
1259 }
34dc7c2f 1260
428870ff
BB
1261 bpobj_close(&spa->spa_deferred_bpobj);
1262
93cf2076
GW
1263 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1264
1265 /*
1266 * Close all vdevs.
1267 */
1268 if (spa->spa_root_vdev)
1269 vdev_free(spa->spa_root_vdev);
1270 ASSERT(spa->spa_root_vdev == NULL);
1271
34dc7c2f
BB
1272 /*
1273 * Close the dsl pool.
1274 */
1275 if (spa->spa_dsl_pool) {
1276 dsl_pool_close(spa->spa_dsl_pool);
1277 spa->spa_dsl_pool = NULL;
428870ff 1278 spa->spa_meta_objset = NULL;
34dc7c2f
BB
1279 }
1280
428870ff
BB
1281 ddt_unload(spa);
1282
fb5f0bc8
BB
1283
1284 /*
1285 * Drop and purge level 2 cache
1286 */
1287 spa_l2cache_drop(spa);
1288
34dc7c2f
BB
1289 for (i = 0; i < spa->spa_spares.sav_count; i++)
1290 vdev_free(spa->spa_spares.sav_vdevs[i]);
1291 if (spa->spa_spares.sav_vdevs) {
1292 kmem_free(spa->spa_spares.sav_vdevs,
1293 spa->spa_spares.sav_count * sizeof (void *));
1294 spa->spa_spares.sav_vdevs = NULL;
1295 }
1296 if (spa->spa_spares.sav_config) {
1297 nvlist_free(spa->spa_spares.sav_config);
1298 spa->spa_spares.sav_config = NULL;
1299 }
b128c09f 1300 spa->spa_spares.sav_count = 0;
34dc7c2f 1301
5ffb9d1d
GW
1302 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1303 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
34dc7c2f 1304 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
5ffb9d1d 1305 }
34dc7c2f
BB
1306 if (spa->spa_l2cache.sav_vdevs) {
1307 kmem_free(spa->spa_l2cache.sav_vdevs,
1308 spa->spa_l2cache.sav_count * sizeof (void *));
1309 spa->spa_l2cache.sav_vdevs = NULL;
1310 }
1311 if (spa->spa_l2cache.sav_config) {
1312 nvlist_free(spa->spa_l2cache.sav_config);
1313 spa->spa_l2cache.sav_config = NULL;
1314 }
b128c09f 1315 spa->spa_l2cache.sav_count = 0;
34dc7c2f
BB
1316
1317 spa->spa_async_suspended = 0;
fb5f0bc8 1318
d96eb2b1
DM
1319 if (spa->spa_comment != NULL) {
1320 spa_strfree(spa->spa_comment);
1321 spa->spa_comment = NULL;
1322 }
1323
fb5f0bc8 1324 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
1325}
1326
1327/*
1328 * Load (or re-load) the current list of vdevs describing the active spares for
1329 * this pool. When this is called, we have some form of basic information in
1330 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1331 * then re-generate a more complete list including status information.
1332 */
1333static void
1334spa_load_spares(spa_t *spa)
1335{
1336 nvlist_t **spares;
1337 uint_t nspares;
1338 int i;
1339 vdev_t *vd, *tvd;
1340
b128c09f
BB
1341 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1342
34dc7c2f
BB
1343 /*
1344 * First, close and free any existing spare vdevs.
1345 */
1346 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1347 vd = spa->spa_spares.sav_vdevs[i];
1348
1349 /* Undo the call to spa_activate() below */
b128c09f
BB
1350 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1351 B_FALSE)) != NULL && tvd->vdev_isspare)
34dc7c2f
BB
1352 spa_spare_remove(tvd);
1353 vdev_close(vd);
1354 vdev_free(vd);
1355 }
1356
1357 if (spa->spa_spares.sav_vdevs)
1358 kmem_free(spa->spa_spares.sav_vdevs,
1359 spa->spa_spares.sav_count * sizeof (void *));
1360
1361 if (spa->spa_spares.sav_config == NULL)
1362 nspares = 0;
1363 else
1364 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1365 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1366
1367 spa->spa_spares.sav_count = (int)nspares;
1368 spa->spa_spares.sav_vdevs = NULL;
1369
1370 if (nspares == 0)
1371 return;
1372
1373 /*
1374 * Construct the array of vdevs, opening them to get status in the
1375 * process. For each spare, there is potentially two different vdev_t
1376 * structures associated with it: one in the list of spares (used only
1377 * for basic validation purposes) and one in the active vdev
1378 * configuration (if it's spared in). During this phase we open and
1379 * validate each vdev on the spare list. If the vdev also exists in the
1380 * active configuration, then we also mark this vdev as an active spare.
1381 */
904ea276 1382 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
79c76d5b 1383 KM_SLEEP);
34dc7c2f
BB
1384 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1385 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1386 VDEV_ALLOC_SPARE) == 0);
1387 ASSERT(vd != NULL);
1388
1389 spa->spa_spares.sav_vdevs[i] = vd;
1390
b128c09f
BB
1391 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1392 B_FALSE)) != NULL) {
34dc7c2f
BB
1393 if (!tvd->vdev_isspare)
1394 spa_spare_add(tvd);
1395
1396 /*
1397 * We only mark the spare active if we were successfully
1398 * able to load the vdev. Otherwise, importing a pool
1399 * with a bad active spare would result in strange
1400 * behavior, because multiple pool would think the spare
1401 * is actively in use.
1402 *
1403 * There is a vulnerability here to an equally bizarre
1404 * circumstance, where a dead active spare is later
1405 * brought back to life (onlined or otherwise). Given
1406 * the rarity of this scenario, and the extra complexity
1407 * it adds, we ignore the possibility.
1408 */
1409 if (!vdev_is_dead(tvd))
1410 spa_spare_activate(tvd);
1411 }
1412
b128c09f 1413 vd->vdev_top = vd;
9babb374 1414 vd->vdev_aux = &spa->spa_spares;
b128c09f 1415
34dc7c2f
BB
1416 if (vdev_open(vd) != 0)
1417 continue;
1418
34dc7c2f
BB
1419 if (vdev_validate_aux(vd) == 0)
1420 spa_spare_add(vd);
1421 }
1422
1423 /*
1424 * Recompute the stashed list of spares, with status information
1425 * this time.
1426 */
1427 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1428 DATA_TYPE_NVLIST_ARRAY) == 0);
1429
1430 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
79c76d5b 1431 KM_SLEEP);
34dc7c2f
BB
1432 for (i = 0; i < spa->spa_spares.sav_count; i++)
1433 spares[i] = vdev_config_generate(spa,
428870ff 1434 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
34dc7c2f
BB
1435 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1436 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1437 for (i = 0; i < spa->spa_spares.sav_count; i++)
1438 nvlist_free(spares[i]);
1439 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1440}
1441
1442/*
1443 * Load (or re-load) the current list of vdevs describing the active l2cache for
1444 * this pool. When this is called, we have some form of basic information in
1445 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1446 * then re-generate a more complete list including status information.
1447 * Devices which are already active have their details maintained, and are
1448 * not re-opened.
1449 */
1450static void
1451spa_load_l2cache(spa_t *spa)
1452{
1453 nvlist_t **l2cache;
1454 uint_t nl2cache;
1455 int i, j, oldnvdevs;
9babb374 1456 uint64_t guid;
a117a6d6 1457 vdev_t *vd, **oldvdevs, **newvdevs;
34dc7c2f
BB
1458 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1459
b128c09f
BB
1460 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1461
34dc7c2f
BB
1462 if (sav->sav_config != NULL) {
1463 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1464 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
79c76d5b 1465 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
34dc7c2f
BB
1466 } else {
1467 nl2cache = 0;
a117a6d6 1468 newvdevs = NULL;
34dc7c2f
BB
1469 }
1470
1471 oldvdevs = sav->sav_vdevs;
1472 oldnvdevs = sav->sav_count;
1473 sav->sav_vdevs = NULL;
1474 sav->sav_count = 0;
1475
1476 /*
1477 * Process new nvlist of vdevs.
1478 */
1479 for (i = 0; i < nl2cache; i++) {
1480 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1481 &guid) == 0);
1482
1483 newvdevs[i] = NULL;
1484 for (j = 0; j < oldnvdevs; j++) {
1485 vd = oldvdevs[j];
1486 if (vd != NULL && guid == vd->vdev_guid) {
1487 /*
1488 * Retain previous vdev for add/remove ops.
1489 */
1490 newvdevs[i] = vd;
1491 oldvdevs[j] = NULL;
1492 break;
1493 }
1494 }
1495
1496 if (newvdevs[i] == NULL) {
1497 /*
1498 * Create new vdev
1499 */
1500 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1501 VDEV_ALLOC_L2CACHE) == 0);
1502 ASSERT(vd != NULL);
1503 newvdevs[i] = vd;
1504
1505 /*
1506 * Commit this vdev as an l2cache device,
1507 * even if it fails to open.
1508 */
1509 spa_l2cache_add(vd);
1510
b128c09f
BB
1511 vd->vdev_top = vd;
1512 vd->vdev_aux = sav;
1513
1514 spa_l2cache_activate(vd);
1515
34dc7c2f
BB
1516 if (vdev_open(vd) != 0)
1517 continue;
1518
34dc7c2f
BB
1519 (void) vdev_validate_aux(vd);
1520
9babb374
BB
1521 if (!vdev_is_dead(vd))
1522 l2arc_add_vdev(spa, vd);
34dc7c2f
BB
1523 }
1524 }
1525
1526 /*
1527 * Purge vdevs that were dropped
1528 */
1529 for (i = 0; i < oldnvdevs; i++) {
1530 uint64_t pool;
1531
1532 vd = oldvdevs[i];
1533 if (vd != NULL) {
5ffb9d1d
GW
1534 ASSERT(vd->vdev_isl2cache);
1535
fb5f0bc8
BB
1536 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1537 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 1538 l2arc_remove_vdev(vd);
5ffb9d1d
GW
1539 vdev_clear_stats(vd);
1540 vdev_free(vd);
34dc7c2f
BB
1541 }
1542 }
1543
1544 if (oldvdevs)
1545 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1546
1547 if (sav->sav_config == NULL)
1548 goto out;
1549
1550 sav->sav_vdevs = newvdevs;
1551 sav->sav_count = (int)nl2cache;
1552
1553 /*
1554 * Recompute the stashed list of l2cache devices, with status
1555 * information this time.
1556 */
1557 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1558 DATA_TYPE_NVLIST_ARRAY) == 0);
1559
79c76d5b 1560 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
34dc7c2f
BB
1561 for (i = 0; i < sav->sav_count; i++)
1562 l2cache[i] = vdev_config_generate(spa,
428870ff 1563 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
1564 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1565 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1566out:
1567 for (i = 0; i < sav->sav_count; i++)
1568 nvlist_free(l2cache[i]);
1569 if (sav->sav_count)
1570 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1571}
1572
1573static int
1574load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1575{
1576 dmu_buf_t *db;
1577 char *packed = NULL;
1578 size_t nvsize = 0;
1579 int error;
1580 *value = NULL;
1581
c3275b56
BB
1582 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1583 if (error)
1584 return (error);
1585
34dc7c2f
BB
1586 nvsize = *(uint64_t *)db->db_data;
1587 dmu_buf_rele(db, FTAG);
1588
77aef6f6 1589 packed = vmem_alloc(nvsize, KM_SLEEP);
9babb374
BB
1590 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1591 DMU_READ_PREFETCH);
34dc7c2f
BB
1592 if (error == 0)
1593 error = nvlist_unpack(packed, nvsize, value, 0);
77aef6f6 1594 vmem_free(packed, nvsize);
34dc7c2f
BB
1595
1596 return (error);
1597}
1598
1599/*
1600 * Checks to see if the given vdev could not be opened, in which case we post a
1601 * sysevent to notify the autoreplace code that the device has been removed.
1602 */
1603static void
1604spa_check_removed(vdev_t *vd)
1605{
d6320ddb
BB
1606 int c;
1607
1608 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
1609 spa_check_removed(vd->vdev_child[c]);
1610
7011fb60
YP
1611 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1612 !vd->vdev_ishole) {
26685276
BB
1613 zfs_ereport_post(FM_EREPORT_RESOURCE_AUTOREPLACE,
1614 vd->vdev_spa, vd, NULL, 0, 0);
1615 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_CHECK);
34dc7c2f
BB
1616 }
1617}
1618
9babb374 1619/*
572e2857 1620 * Validate the current config against the MOS config
9babb374 1621 */
572e2857
BB
1622static boolean_t
1623spa_config_valid(spa_t *spa, nvlist_t *config)
9babb374 1624{
572e2857
BB
1625 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1626 nvlist_t *nv;
d6320ddb 1627 int c, i;
572e2857
BB
1628
1629 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1630
1631 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1632 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1633
1634 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
9babb374 1635
428870ff 1636 /*
572e2857
BB
1637 * If we're doing a normal import, then build up any additional
1638 * diagnostic information about missing devices in this config.
1639 * We'll pass this up to the user for further processing.
428870ff 1640 */
572e2857
BB
1641 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1642 nvlist_t **child, *nv;
1643 uint64_t idx = 0;
1644
1645 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
79c76d5b
BB
1646 KM_SLEEP);
1647 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
572e2857 1648
d6320ddb 1649 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1650 vdev_t *tvd = rvd->vdev_child[c];
1651 vdev_t *mtvd = mrvd->vdev_child[c];
1652
1653 if (tvd->vdev_ops == &vdev_missing_ops &&
1654 mtvd->vdev_ops != &vdev_missing_ops &&
1655 mtvd->vdev_islog)
1656 child[idx++] = vdev_config_generate(spa, mtvd,
1657 B_FALSE, 0);
1658 }
9babb374 1659
572e2857
BB
1660 if (idx) {
1661 VERIFY(nvlist_add_nvlist_array(nv,
1662 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1663 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1664 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1665
d6320ddb 1666 for (i = 0; i < idx; i++)
572e2857
BB
1667 nvlist_free(child[i]);
1668 }
1669 nvlist_free(nv);
1670 kmem_free(child, rvd->vdev_children * sizeof (char **));
1671 }
1672
1673 /*
1674 * Compare the root vdev tree with the information we have
1675 * from the MOS config (mrvd). Check each top-level vdev
1676 * with the corresponding MOS config top-level (mtvd).
1677 */
d6320ddb 1678 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1679 vdev_t *tvd = rvd->vdev_child[c];
1680 vdev_t *mtvd = mrvd->vdev_child[c];
1681
1682 /*
1683 * Resolve any "missing" vdevs in the current configuration.
1684 * If we find that the MOS config has more accurate information
1685 * about the top-level vdev then use that vdev instead.
1686 */
1687 if (tvd->vdev_ops == &vdev_missing_ops &&
1688 mtvd->vdev_ops != &vdev_missing_ops) {
1689
1690 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1691 continue;
1692
1693 /*
1694 * Device specific actions.
1695 */
1696 if (mtvd->vdev_islog) {
1697 spa_set_log_state(spa, SPA_LOG_CLEAR);
1698 } else {
1699 /*
1700 * XXX - once we have 'readonly' pool
1701 * support we should be able to handle
1702 * missing data devices by transitioning
1703 * the pool to readonly.
1704 */
1705 continue;
1706 }
1707
1708 /*
1709 * Swap the missing vdev with the data we were
1710 * able to obtain from the MOS config.
1711 */
1712 vdev_remove_child(rvd, tvd);
1713 vdev_remove_child(mrvd, mtvd);
1714
1715 vdev_add_child(rvd, mtvd);
1716 vdev_add_child(mrvd, tvd);
1717
1718 spa_config_exit(spa, SCL_ALL, FTAG);
1719 vdev_load(mtvd);
1720 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1721
1722 vdev_reopen(rvd);
1723 } else if (mtvd->vdev_islog) {
1724 /*
1725 * Load the slog device's state from the MOS config
1726 * since it's possible that the label does not
1727 * contain the most up-to-date information.
1728 */
1729 vdev_load_log_state(tvd, mtvd);
1730 vdev_reopen(tvd);
1731 }
9babb374 1732 }
572e2857 1733 vdev_free(mrvd);
428870ff 1734 spa_config_exit(spa, SCL_ALL, FTAG);
572e2857
BB
1735
1736 /*
1737 * Ensure we were able to validate the config.
1738 */
1739 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
9babb374
BB
1740}
1741
b128c09f
BB
1742/*
1743 * Check for missing log devices
1744 */
13fe0198 1745static boolean_t
b128c09f
BB
1746spa_check_logs(spa_t *spa)
1747{
13fe0198
MA
1748 boolean_t rv = B_FALSE;
1749
b128c09f 1750 switch (spa->spa_log_state) {
e75c13c3
BB
1751 default:
1752 break;
b128c09f
BB
1753 case SPA_LOG_MISSING:
1754 /* need to recheck in case slog has been restored */
1755 case SPA_LOG_UNKNOWN:
13fe0198
MA
1756 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1757 NULL, DS_FIND_CHILDREN) != 0);
1758 if (rv)
428870ff 1759 spa_set_log_state(spa, SPA_LOG_MISSING);
b128c09f 1760 break;
b128c09f 1761 }
13fe0198 1762 return (rv);
b128c09f
BB
1763}
1764
428870ff
BB
1765static boolean_t
1766spa_passivate_log(spa_t *spa)
34dc7c2f 1767{
428870ff
BB
1768 vdev_t *rvd = spa->spa_root_vdev;
1769 boolean_t slog_found = B_FALSE;
d6320ddb 1770 int c;
b128c09f 1771
428870ff 1772 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
fb5f0bc8 1773
428870ff
BB
1774 if (!spa_has_slogs(spa))
1775 return (B_FALSE);
34dc7c2f 1776
d6320ddb 1777 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1778 vdev_t *tvd = rvd->vdev_child[c];
1779 metaslab_group_t *mg = tvd->vdev_mg;
34dc7c2f 1780
428870ff
BB
1781 if (tvd->vdev_islog) {
1782 metaslab_group_passivate(mg);
1783 slog_found = B_TRUE;
1784 }
34dc7c2f
BB
1785 }
1786
428870ff
BB
1787 return (slog_found);
1788}
34dc7c2f 1789
428870ff
BB
1790static void
1791spa_activate_log(spa_t *spa)
1792{
1793 vdev_t *rvd = spa->spa_root_vdev;
d6320ddb 1794 int c;
34dc7c2f 1795
428870ff
BB
1796 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1797
d6320ddb 1798 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1799 vdev_t *tvd = rvd->vdev_child[c];
1800 metaslab_group_t *mg = tvd->vdev_mg;
1801
1802 if (tvd->vdev_islog)
1803 metaslab_group_activate(mg);
34dc7c2f 1804 }
428870ff 1805}
34dc7c2f 1806
428870ff
BB
1807int
1808spa_offline_log(spa_t *spa)
1809{
13fe0198 1810 int error;
9babb374 1811
13fe0198
MA
1812 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1813 NULL, DS_FIND_CHILDREN);
1814 if (error == 0) {
428870ff
BB
1815 /*
1816 * We successfully offlined the log device, sync out the
1817 * current txg so that the "stubby" block can be removed
1818 * by zil_sync().
1819 */
1820 txg_wait_synced(spa->spa_dsl_pool, 0);
1821 }
1822 return (error);
1823}
34dc7c2f 1824
428870ff
BB
1825static void
1826spa_aux_check_removed(spa_aux_vdev_t *sav)
1827{
d6320ddb
BB
1828 int i;
1829
1830 for (i = 0; i < sav->sav_count; i++)
428870ff
BB
1831 spa_check_removed(sav->sav_vdevs[i]);
1832}
34dc7c2f 1833
428870ff
BB
1834void
1835spa_claim_notify(zio_t *zio)
1836{
1837 spa_t *spa = zio->io_spa;
34dc7c2f 1838
428870ff
BB
1839 if (zio->io_error)
1840 return;
34dc7c2f 1841
428870ff
BB
1842 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1843 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1844 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1845 mutex_exit(&spa->spa_props_lock);
1846}
34dc7c2f 1847
428870ff
BB
1848typedef struct spa_load_error {
1849 uint64_t sle_meta_count;
1850 uint64_t sle_data_count;
1851} spa_load_error_t;
34dc7c2f 1852
428870ff
BB
1853static void
1854spa_load_verify_done(zio_t *zio)
1855{
1856 blkptr_t *bp = zio->io_bp;
1857 spa_load_error_t *sle = zio->io_private;
1858 dmu_object_type_t type = BP_GET_TYPE(bp);
1859 int error = zio->io_error;
dea377c0 1860 spa_t *spa = zio->io_spa;
34dc7c2f 1861
428870ff 1862 if (error) {
9ae529ec 1863 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
428870ff
BB
1864 type != DMU_OT_INTENT_LOG)
1865 atomic_add_64(&sle->sle_meta_count, 1);
1866 else
1867 atomic_add_64(&sle->sle_data_count, 1);
34dc7c2f 1868 }
428870ff 1869 zio_data_buf_free(zio->io_data, zio->io_size);
dea377c0
MA
1870
1871 mutex_enter(&spa->spa_scrub_lock);
1872 spa->spa_scrub_inflight--;
1873 cv_broadcast(&spa->spa_scrub_io_cv);
1874 mutex_exit(&spa->spa_scrub_lock);
428870ff 1875}
34dc7c2f 1876
dea377c0
MA
1877/*
1878 * Maximum number of concurrent scrub i/os to create while verifying
1879 * a pool while importing it.
1880 */
1881int spa_load_verify_maxinflight = 10000;
1882int spa_load_verify_metadata = B_TRUE;
1883int spa_load_verify_data = B_TRUE;
1884
428870ff
BB
1885/*ARGSUSED*/
1886static int
1887spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
5dbd68a3 1888 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
428870ff 1889{
dea377c0
MA
1890 zio_t *rio;
1891 size_t size;
1892 void *data;
34dc7c2f 1893
dea377c0
MA
1894 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1895 return (0);
1896 /*
1897 * Note: normally this routine will not be called if
1898 * spa_load_verify_metadata is not set. However, it may be useful
1899 * to manually set the flag after the traversal has begun.
1900 */
1901 if (!spa_load_verify_metadata)
1902 return (0);
1903 if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data)
1904 return (0);
1905
1906 rio = arg;
1907 size = BP_GET_PSIZE(bp);
1908 data = zio_data_buf_alloc(size);
1909
1910 mutex_enter(&spa->spa_scrub_lock);
1911 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
1912 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1913 spa->spa_scrub_inflight++;
1914 mutex_exit(&spa->spa_scrub_lock);
1915
1916 zio_nowait(zio_read(rio, spa, bp, data, size,
1917 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1918 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1919 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
428870ff
BB
1920 return (0);
1921}
34dc7c2f 1922
428870ff
BB
1923static int
1924spa_load_verify(spa_t *spa)
1925{
1926 zio_t *rio;
1927 spa_load_error_t sle = { 0 };
1928 zpool_rewind_policy_t policy;
1929 boolean_t verify_ok = B_FALSE;
dea377c0 1930 int error = 0;
34dc7c2f 1931
428870ff 1932 zpool_get_rewind_policy(spa->spa_config, &policy);
34dc7c2f 1933
428870ff
BB
1934 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1935 return (0);
34dc7c2f 1936
428870ff
BB
1937 rio = zio_root(spa, NULL, &sle,
1938 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
34dc7c2f 1939
dea377c0
MA
1940 if (spa_load_verify_metadata) {
1941 error = traverse_pool(spa, spa->spa_verify_min_txg,
1942 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
1943 spa_load_verify_cb, rio);
1944 }
428870ff
BB
1945
1946 (void) zio_wait(rio);
1947
1948 spa->spa_load_meta_errors = sle.sle_meta_count;
1949 spa->spa_load_data_errors = sle.sle_data_count;
1950
1951 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1952 sle.sle_data_count <= policy.zrp_maxdata) {
572e2857
BB
1953 int64_t loss = 0;
1954
428870ff
BB
1955 verify_ok = B_TRUE;
1956 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1957 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
572e2857
BB
1958
1959 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1960 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1961 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1962 VERIFY(nvlist_add_int64(spa->spa_load_info,
1963 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1964 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1965 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
428870ff
BB
1966 } else {
1967 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1968 }
1969
1970 if (error) {
1971 if (error != ENXIO && error != EIO)
2e528b49 1972 error = SET_ERROR(EIO);
428870ff
BB
1973 return (error);
1974 }
1975
1976 return (verify_ok ? 0 : EIO);
1977}
1978
1979/*
1980 * Find a value in the pool props object.
1981 */
1982static void
1983spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1984{
1985 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1986 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1987}
1988
1989/*
1990 * Find a value in the pool directory object.
1991 */
1992static int
1993spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1994{
1995 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1996 name, sizeof (uint64_t), 1, val));
1997}
1998
1999static int
2000spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2001{
2002 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2003 return (err);
2004}
2005
2006/*
2007 * Fix up config after a partly-completed split. This is done with the
2008 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
2009 * pool have that entry in their config, but only the splitting one contains
2010 * a list of all the guids of the vdevs that are being split off.
2011 *
2012 * This function determines what to do with that list: either rejoin
2013 * all the disks to the pool, or complete the splitting process. To attempt
2014 * the rejoin, each disk that is offlined is marked online again, and
2015 * we do a reopen() call. If the vdev label for every disk that was
2016 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2017 * then we call vdev_split() on each disk, and complete the split.
2018 *
2019 * Otherwise we leave the config alone, with all the vdevs in place in
2020 * the original pool.
2021 */
2022static void
2023spa_try_repair(spa_t *spa, nvlist_t *config)
2024{
2025 uint_t extracted;
2026 uint64_t *glist;
2027 uint_t i, gcount;
2028 nvlist_t *nvl;
2029 vdev_t **vd;
2030 boolean_t attempt_reopen;
2031
2032 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2033 return;
2034
2035 /* check that the config is complete */
2036 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2037 &glist, &gcount) != 0)
2038 return;
2039
79c76d5b 2040 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
428870ff
BB
2041
2042 /* attempt to online all the vdevs & validate */
2043 attempt_reopen = B_TRUE;
2044 for (i = 0; i < gcount; i++) {
2045 if (glist[i] == 0) /* vdev is hole */
2046 continue;
2047
2048 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2049 if (vd[i] == NULL) {
2050 /*
2051 * Don't bother attempting to reopen the disks;
2052 * just do the split.
2053 */
2054 attempt_reopen = B_FALSE;
2055 } else {
2056 /* attempt to re-online it */
2057 vd[i]->vdev_offline = B_FALSE;
2058 }
2059 }
2060
2061 if (attempt_reopen) {
2062 vdev_reopen(spa->spa_root_vdev);
2063
2064 /* check each device to see what state it's in */
2065 for (extracted = 0, i = 0; i < gcount; i++) {
2066 if (vd[i] != NULL &&
2067 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2068 break;
2069 ++extracted;
2070 }
2071 }
2072
2073 /*
2074 * If every disk has been moved to the new pool, or if we never
2075 * even attempted to look at them, then we split them off for
2076 * good.
2077 */
2078 if (!attempt_reopen || gcount == extracted) {
2079 for (i = 0; i < gcount; i++)
2080 if (vd[i] != NULL)
2081 vdev_split(vd[i]);
2082 vdev_reopen(spa->spa_root_vdev);
2083 }
2084
2085 kmem_free(vd, gcount * sizeof (vdev_t *));
2086}
2087
2088static int
2089spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2090 boolean_t mosconfig)
2091{
2092 nvlist_t *config = spa->spa_config;
2093 char *ereport = FM_EREPORT_ZFS_POOL;
d96eb2b1 2094 char *comment;
428870ff
BB
2095 int error;
2096 uint64_t pool_guid;
2097 nvlist_t *nvl;
2098
2099 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2e528b49 2100 return (SET_ERROR(EINVAL));
428870ff 2101
d96eb2b1
DM
2102 ASSERT(spa->spa_comment == NULL);
2103 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2104 spa->spa_comment = spa_strdup(comment);
2105
428870ff
BB
2106 /*
2107 * Versioning wasn't explicitly added to the label until later, so if
2108 * it's not present treat it as the initial version.
2109 */
2110 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2111 &spa->spa_ubsync.ub_version) != 0)
2112 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2113
2114 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2115 &spa->spa_config_txg);
2116
2117 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2118 spa_guid_exists(pool_guid, 0)) {
2e528b49 2119 error = SET_ERROR(EEXIST);
428870ff 2120 } else {
3541dc6d 2121 spa->spa_config_guid = pool_guid;
428870ff
BB
2122
2123 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2124 &nvl) == 0) {
2125 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
79c76d5b 2126 KM_SLEEP) == 0);
428870ff
BB
2127 }
2128
9ae529ec
CS
2129 nvlist_free(spa->spa_load_info);
2130 spa->spa_load_info = fnvlist_alloc();
2131
572e2857 2132 gethrestime(&spa->spa_loaded_ts);
428870ff
BB
2133 error = spa_load_impl(spa, pool_guid, config, state, type,
2134 mosconfig, &ereport);
2135 }
2136
2137 spa->spa_minref = refcount_count(&spa->spa_refcount);
572e2857
BB
2138 if (error) {
2139 if (error != EEXIST) {
2140 spa->spa_loaded_ts.tv_sec = 0;
2141 spa->spa_loaded_ts.tv_nsec = 0;
2142 }
2143 if (error != EBADF) {
2144 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2145 }
2146 }
428870ff
BB
2147 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2148 spa->spa_ena = 0;
2149
2150 return (error);
2151}
2152
2153/*
2154 * Load an existing storage pool, using the pool's builtin spa_config as a
2155 * source of configuration information.
2156 */
bf701a83
BB
2157__attribute__((always_inline))
2158static inline int
428870ff
BB
2159spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2160 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2161 char **ereport)
2162{
2163 int error = 0;
2164 nvlist_t *nvroot = NULL;
9ae529ec 2165 nvlist_t *label;
428870ff
BB
2166 vdev_t *rvd;
2167 uberblock_t *ub = &spa->spa_uberblock;
572e2857 2168 uint64_t children, config_cache_txg = spa->spa_config_txg;
428870ff 2169 int orig_mode = spa->spa_mode;
e022864d 2170 int parse, i;
428870ff 2171 uint64_t obj;
9ae529ec 2172 boolean_t missing_feat_write = B_FALSE;
428870ff
BB
2173
2174 /*
2175 * If this is an untrusted config, access the pool in read-only mode.
2176 * This prevents things like resilvering recently removed devices.
2177 */
2178 if (!mosconfig)
2179 spa->spa_mode = FREAD;
2180
2181 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2182
2183 spa->spa_load_state = state;
2184
2185 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2e528b49 2186 return (SET_ERROR(EINVAL));
428870ff
BB
2187
2188 parse = (type == SPA_IMPORT_EXISTING ?
2189 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2190
2191 /*
2192 * Create "The Godfather" zio to hold all async IOs
2193 */
e022864d
MA
2194 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2195 KM_SLEEP);
2196 for (i = 0; i < max_ncpus; i++) {
2197 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2198 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2199 ZIO_FLAG_GODFATHER);
2200 }
428870ff
BB
2201
2202 /*
2203 * Parse the configuration into a vdev tree. We explicitly set the
2204 * value that will be returned by spa_version() since parsing the
2205 * configuration requires knowing the version number.
2206 */
2207 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2208 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2209 spa_config_exit(spa, SCL_ALL, FTAG);
2210
2211 if (error != 0)
2212 return (error);
2213
2214 ASSERT(spa->spa_root_vdev == rvd);
2215
2216 if (type != SPA_IMPORT_ASSEMBLE) {
2217 ASSERT(spa_guid(spa) == pool_guid);
2218 }
2219
2220 /*
2221 * Try to open all vdevs, loading each label in the process.
2222 */
2223 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2224 error = vdev_open(rvd);
2225 spa_config_exit(spa, SCL_ALL, FTAG);
2226 if (error != 0)
2227 return (error);
2228
2229 /*
2230 * We need to validate the vdev labels against the configuration that
2231 * we have in hand, which is dependent on the setting of mosconfig. If
2232 * mosconfig is true then we're validating the vdev labels based on
2233 * that config. Otherwise, we're validating against the cached config
2234 * (zpool.cache) that was read when we loaded the zfs module, and then
2235 * later we will recursively call spa_load() and validate against
2236 * the vdev config.
2237 *
2238 * If we're assembling a new pool that's been split off from an
2239 * existing pool, the labels haven't yet been updated so we skip
2240 * validation for now.
2241 */
2242 if (type != SPA_IMPORT_ASSEMBLE) {
2243 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
c7f2d69d 2244 error = vdev_validate(rvd, mosconfig);
428870ff
BB
2245 spa_config_exit(spa, SCL_ALL, FTAG);
2246
2247 if (error != 0)
2248 return (error);
2249
2250 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2e528b49 2251 return (SET_ERROR(ENXIO));
428870ff
BB
2252 }
2253
2254 /*
2255 * Find the best uberblock.
2256 */
9ae529ec 2257 vdev_uberblock_load(rvd, ub, &label);
428870ff
BB
2258
2259 /*
2260 * If we weren't able to find a single valid uberblock, return failure.
2261 */
9ae529ec
CS
2262 if (ub->ub_txg == 0) {
2263 nvlist_free(label);
428870ff 2264 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
9ae529ec 2265 }
428870ff
BB
2266
2267 /*
9ae529ec 2268 * If the pool has an unsupported version we can't open it.
428870ff 2269 */
9ae529ec
CS
2270 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2271 nvlist_free(label);
428870ff 2272 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
9ae529ec
CS
2273 }
2274
2275 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2276 nvlist_t *features;
2277
2278 /*
2279 * If we weren't able to find what's necessary for reading the
2280 * MOS in the label, return failure.
2281 */
2282 if (label == NULL || nvlist_lookup_nvlist(label,
2283 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2284 nvlist_free(label);
2285 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2286 ENXIO));
2287 }
2288
2289 /*
2290 * Update our in-core representation with the definitive values
2291 * from the label.
2292 */
2293 nvlist_free(spa->spa_label_features);
2294 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2295 }
2296
2297 nvlist_free(label);
2298
2299 /*
2300 * Look through entries in the label nvlist's features_for_read. If
2301 * there is a feature listed there which we don't understand then we
2302 * cannot open a pool.
2303 */
2304 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2305 nvlist_t *unsup_feat;
2306 nvpair_t *nvp;
2307
2308 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2309 0);
2310
2311 for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL);
2312 nvp != NULL;
2313 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2314 if (!zfeature_is_supported(nvpair_name(nvp))) {
2315 VERIFY(nvlist_add_string(unsup_feat,
2316 nvpair_name(nvp), "") == 0);
2317 }
2318 }
2319
2320 if (!nvlist_empty(unsup_feat)) {
2321 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2322 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2323 nvlist_free(unsup_feat);
2324 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2325 ENOTSUP));
2326 }
2327
2328 nvlist_free(unsup_feat);
2329 }
428870ff
BB
2330
2331 /*
2332 * If the vdev guid sum doesn't match the uberblock, we have an
572e2857
BB
2333 * incomplete configuration. We first check to see if the pool
2334 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2335 * If it is, defer the vdev_guid_sum check till later so we
2336 * can handle missing vdevs.
428870ff 2337 */
572e2857
BB
2338 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2339 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
428870ff
BB
2340 rvd->vdev_guid_sum != ub->ub_guid_sum)
2341 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2342
2343 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2344 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2345 spa_try_repair(spa, config);
2346 spa_config_exit(spa, SCL_ALL, FTAG);
2347 nvlist_free(spa->spa_config_splitting);
2348 spa->spa_config_splitting = NULL;
2349 }
2350
2351 /*
2352 * Initialize internal SPA structures.
2353 */
2354 spa->spa_state = POOL_STATE_ACTIVE;
2355 spa->spa_ubsync = spa->spa_uberblock;
2356 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2357 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2358 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2359 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2360 spa->spa_claim_max_txg = spa->spa_first_txg;
2361 spa->spa_prev_software_version = ub->ub_software_version;
2362
9ae529ec 2363 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
428870ff
BB
2364 if (error)
2365 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2366 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2367
2368 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2369 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2370
9ae529ec
CS
2371 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2372 boolean_t missing_feat_read = B_FALSE;
b9b24bb4 2373 nvlist_t *unsup_feat, *enabled_feat;
b0bc7a84 2374 spa_feature_t i;
9ae529ec
CS
2375
2376 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2377 &spa->spa_feat_for_read_obj) != 0) {
2378 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2379 }
2380
2381 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2382 &spa->spa_feat_for_write_obj) != 0) {
2383 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2384 }
2385
2386 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2387 &spa->spa_feat_desc_obj) != 0) {
2388 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2389 }
2390
b9b24bb4
CS
2391 enabled_feat = fnvlist_alloc();
2392 unsup_feat = fnvlist_alloc();
9ae529ec 2393
fa86b5db 2394 if (!spa_features_check(spa, B_FALSE,
b9b24bb4 2395 unsup_feat, enabled_feat))
9ae529ec
CS
2396 missing_feat_read = B_TRUE;
2397
2398 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
fa86b5db 2399 if (!spa_features_check(spa, B_TRUE,
b9b24bb4 2400 unsup_feat, enabled_feat)) {
9ae529ec 2401 missing_feat_write = B_TRUE;
b9b24bb4 2402 }
9ae529ec
CS
2403 }
2404
b9b24bb4
CS
2405 fnvlist_add_nvlist(spa->spa_load_info,
2406 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2407
9ae529ec 2408 if (!nvlist_empty(unsup_feat)) {
b9b24bb4
CS
2409 fnvlist_add_nvlist(spa->spa_load_info,
2410 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
9ae529ec
CS
2411 }
2412
b9b24bb4
CS
2413 fnvlist_free(enabled_feat);
2414 fnvlist_free(unsup_feat);
9ae529ec
CS
2415
2416 if (!missing_feat_read) {
2417 fnvlist_add_boolean(spa->spa_load_info,
2418 ZPOOL_CONFIG_CAN_RDONLY);
2419 }
2420
2421 /*
2422 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2423 * twofold: to determine whether the pool is available for
2424 * import in read-write mode and (if it is not) whether the
2425 * pool is available for import in read-only mode. If the pool
2426 * is available for import in read-write mode, it is displayed
2427 * as available in userland; if it is not available for import
2428 * in read-only mode, it is displayed as unavailable in
2429 * userland. If the pool is available for import in read-only
2430 * mode but not read-write mode, it is displayed as unavailable
2431 * in userland with a special note that the pool is actually
2432 * available for open in read-only mode.
2433 *
2434 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2435 * missing a feature for write, we must first determine whether
2436 * the pool can be opened read-only before returning to
2437 * userland in order to know whether to display the
2438 * abovementioned note.
2439 */
2440 if (missing_feat_read || (missing_feat_write &&
2441 spa_writeable(spa))) {
2442 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2443 ENOTSUP));
2444 }
b0bc7a84
MG
2445
2446 /*
2447 * Load refcounts for ZFS features from disk into an in-memory
2448 * cache during SPA initialization.
2449 */
2450 for (i = 0; i < SPA_FEATURES; i++) {
2451 uint64_t refcount;
2452
2453 error = feature_get_refcount_from_disk(spa,
2454 &spa_feature_table[i], &refcount);
2455 if (error == 0) {
2456 spa->spa_feat_refcount_cache[i] = refcount;
2457 } else if (error == ENOTSUP) {
2458 spa->spa_feat_refcount_cache[i] =
2459 SPA_FEATURE_DISABLED;
2460 } else {
2461 return (spa_vdev_err(rvd,
2462 VDEV_AUX_CORRUPT_DATA, EIO));
2463 }
2464 }
2465 }
2466
2467 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2468 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
9b67f605 2469 &spa->spa_feat_enabled_txg_obj) != 0)
b0bc7a84 2470 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
9ae529ec
CS
2471 }
2472
2473 spa->spa_is_initializing = B_TRUE;
2474 error = dsl_pool_open(spa->spa_dsl_pool);
2475 spa->spa_is_initializing = B_FALSE;
2476 if (error != 0)
2477 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2478
428870ff
BB
2479 if (!mosconfig) {
2480 uint64_t hostid;
2481 nvlist_t *policy = NULL, *nvconfig;
2482
2483 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2484 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2485
2486 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
b128c09f 2487 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
34dc7c2f
BB
2488 char *hostname;
2489 unsigned long myhostid = 0;
2490
428870ff 2491 VERIFY(nvlist_lookup_string(nvconfig,
34dc7c2f
BB
2492 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2493
d164b209
BB
2494#ifdef _KERNEL
2495 myhostid = zone_get_hostid(NULL);
2496#else /* _KERNEL */
2497 /*
2498 * We're emulating the system's hostid in userland, so
2499 * we can't use zone_get_hostid().
2500 */
34dc7c2f 2501 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
d164b209 2502#endif /* _KERNEL */
34dc7c2f 2503 if (hostid != 0 && myhostid != 0 &&
d164b209 2504 hostid != myhostid) {
428870ff 2505 nvlist_free(nvconfig);
34dc7c2f 2506 cmn_err(CE_WARN, "pool '%s' could not be "
d1d7e268
MK
2507 "loaded as it was last accessed by another "
2508 "system (host: %s hostid: 0x%lx). See: "
2509 "http://zfsonlinux.org/msg/ZFS-8000-EY",
b128c09f 2510 spa_name(spa), hostname,
34dc7c2f 2511 (unsigned long)hostid);
2e528b49 2512 return (SET_ERROR(EBADF));
34dc7c2f
BB
2513 }
2514 }
428870ff
BB
2515 if (nvlist_lookup_nvlist(spa->spa_config,
2516 ZPOOL_REWIND_POLICY, &policy) == 0)
2517 VERIFY(nvlist_add_nvlist(nvconfig,
2518 ZPOOL_REWIND_POLICY, policy) == 0);
34dc7c2f 2519
428870ff 2520 spa_config_set(spa, nvconfig);
34dc7c2f
BB
2521 spa_unload(spa);
2522 spa_deactivate(spa);
fb5f0bc8 2523 spa_activate(spa, orig_mode);
34dc7c2f 2524
428870ff 2525 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
34dc7c2f
BB
2526 }
2527
428870ff
BB
2528 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2529 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2530 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2531 if (error != 0)
2532 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2533
2534 /*
2535 * Load the bit that tells us to use the new accounting function
2536 * (raid-z deflation). If we have an older pool, this will not
2537 * be present.
2538 */
428870ff
BB
2539 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2540 if (error != 0 && error != ENOENT)
2541 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2542
2543 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2544 &spa->spa_creation_version);
2545 if (error != 0 && error != ENOENT)
2546 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2547
2548 /*
2549 * Load the persistent error log. If we have an older pool, this will
2550 * not be present.
2551 */
428870ff
BB
2552 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2553 if (error != 0 && error != ENOENT)
2554 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2555
428870ff
BB
2556 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2557 &spa->spa_errlog_scrub);
2558 if (error != 0 && error != ENOENT)
2559 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2560
2561 /*
2562 * Load the history object. If we have an older pool, this
2563 * will not be present.
2564 */
428870ff
BB
2565 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2566 if (error != 0 && error != ENOENT)
2567 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2568
2569 /*
2570 * If we're assembling the pool from the split-off vdevs of
2571 * an existing pool, we don't want to attach the spares & cache
2572 * devices.
2573 */
34dc7c2f
BB
2574
2575 /*
2576 * Load any hot spares for this pool.
2577 */
428870ff
BB
2578 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2579 if (error != 0 && error != ENOENT)
2580 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2581 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2582 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2583 if (load_nvlist(spa, spa->spa_spares.sav_object,
428870ff
BB
2584 &spa->spa_spares.sav_config) != 0)
2585 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2586
b128c09f 2587 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2588 spa_load_spares(spa);
b128c09f 2589 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2590 } else if (error == 0) {
2591 spa->spa_spares.sav_sync = B_TRUE;
34dc7c2f
BB
2592 }
2593
2594 /*
2595 * Load any level 2 ARC devices for this pool.
2596 */
428870ff 2597 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
34dc7c2f 2598 &spa->spa_l2cache.sav_object);
428870ff
BB
2599 if (error != 0 && error != ENOENT)
2600 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2601 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2602 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2603 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
428870ff
BB
2604 &spa->spa_l2cache.sav_config) != 0)
2605 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2606
b128c09f 2607 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2608 spa_load_l2cache(spa);
b128c09f 2609 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2610 } else if (error == 0) {
2611 spa->spa_l2cache.sav_sync = B_TRUE;
b128c09f
BB
2612 }
2613
34dc7c2f
BB
2614 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2615
428870ff
BB
2616 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2617 if (error && error != ENOENT)
2618 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2619
2620 if (error == 0) {
2dbedf54 2621 uint64_t autoreplace = 0;
428870ff
BB
2622
2623 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2624 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2625 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2626 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2627 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2628 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2629 &spa->spa_dedup_ditto);
2630
2631 spa->spa_autoreplace = (autoreplace != 0);
34dc7c2f
BB
2632 }
2633
2634 /*
2635 * If the 'autoreplace' property is set, then post a resource notifying
2636 * the ZFS DE that it should not issue any faults for unopenable
2637 * devices. We also iterate over the vdevs, and post a sysevent for any
2638 * unopenable vdevs so that the normal autoreplace handler can take
2639 * over.
2640 */
428870ff 2641 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
34dc7c2f 2642 spa_check_removed(spa->spa_root_vdev);
428870ff
BB
2643 /*
2644 * For the import case, this is done in spa_import(), because
2645 * at this point we're using the spare definitions from
2646 * the MOS config, not necessarily from the userland config.
2647 */
2648 if (state != SPA_LOAD_IMPORT) {
2649 spa_aux_check_removed(&spa->spa_spares);
2650 spa_aux_check_removed(&spa->spa_l2cache);
2651 }
2652 }
34dc7c2f
BB
2653
2654 /*
2655 * Load the vdev state for all toplevel vdevs.
2656 */
2657 vdev_load(rvd);
2658
2659 /*
2660 * Propagate the leaf DTLs we just loaded all the way up the tree.
2661 */
b128c09f 2662 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2663 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
b128c09f 2664 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 2665
428870ff
BB
2666 /*
2667 * Load the DDTs (dedup tables).
2668 */
2669 error = ddt_load(spa);
2670 if (error != 0)
2671 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2672
2673 spa_update_dspace(spa);
2674
428870ff 2675 /*
572e2857
BB
2676 * Validate the config, using the MOS config to fill in any
2677 * information which might be missing. If we fail to validate
2678 * the config then declare the pool unfit for use. If we're
2679 * assembling a pool from a split, the log is not transferred
2680 * over.
428870ff
BB
2681 */
2682 if (type != SPA_IMPORT_ASSEMBLE) {
2683 nvlist_t *nvconfig;
2684
2685 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2686 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2687
572e2857
BB
2688 if (!spa_config_valid(spa, nvconfig)) {
2689 nvlist_free(nvconfig);
2690 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2691 ENXIO));
2692 }
428870ff
BB
2693 nvlist_free(nvconfig);
2694
572e2857 2695 /*
9ae529ec 2696 * Now that we've validated the config, check the state of the
572e2857
BB
2697 * root vdev. If it can't be opened, it indicates one or
2698 * more toplevel vdevs are faulted.
2699 */
2700 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2e528b49 2701 return (SET_ERROR(ENXIO));
572e2857 2702
428870ff
BB
2703 if (spa_check_logs(spa)) {
2704 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2705 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2706 }
2707 }
2708
9ae529ec
CS
2709 if (missing_feat_write) {
2710 ASSERT(state == SPA_LOAD_TRYIMPORT);
2711
2712 /*
2713 * At this point, we know that we can open the pool in
2714 * read-only mode but not read-write mode. We now have enough
2715 * information and can return to userland.
2716 */
2717 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2718 }
2719
572e2857
BB
2720 /*
2721 * We've successfully opened the pool, verify that we're ready
2722 * to start pushing transactions.
2723 */
2724 if (state != SPA_LOAD_TRYIMPORT) {
c65aa5b2 2725 if ((error = spa_load_verify(spa)))
572e2857
BB
2726 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2727 error));
2728 }
2729
428870ff
BB
2730 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2731 spa->spa_load_max_txg == UINT64_MAX)) {
34dc7c2f
BB
2732 dmu_tx_t *tx;
2733 int need_update = B_FALSE;
d6320ddb 2734 int c;
fb5f0bc8
BB
2735
2736 ASSERT(state != SPA_LOAD_TRYIMPORT);
34dc7c2f
BB
2737
2738 /*
2739 * Claim log blocks that haven't been committed yet.
2740 * This must all happen in a single txg.
428870ff
BB
2741 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2742 * invoked from zil_claim_log_block()'s i/o done callback.
2743 * Price of rollback is that we abandon the log.
34dc7c2f 2744 */
428870ff
BB
2745 spa->spa_claiming = B_TRUE;
2746
34dc7c2f
BB
2747 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2748 spa_first_txg(spa));
b128c09f 2749 (void) dmu_objset_find(spa_name(spa),
34dc7c2f
BB
2750 zil_claim, tx, DS_FIND_CHILDREN);
2751 dmu_tx_commit(tx);
2752
428870ff
BB
2753 spa->spa_claiming = B_FALSE;
2754
2755 spa_set_log_state(spa, SPA_LOG_GOOD);
34dc7c2f
BB
2756 spa->spa_sync_on = B_TRUE;
2757 txg_sync_start(spa->spa_dsl_pool);
2758
2759 /*
428870ff
BB
2760 * Wait for all claims to sync. We sync up to the highest
2761 * claimed log block birth time so that claimed log blocks
2762 * don't appear to be from the future. spa_claim_max_txg
2763 * will have been set for us by either zil_check_log_chain()
2764 * (invoked from spa_check_logs()) or zil_claim() above.
34dc7c2f 2765 */
428870ff 2766 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
34dc7c2f
BB
2767
2768 /*
2769 * If the config cache is stale, or we have uninitialized
2770 * metaslabs (see spa_vdev_add()), then update the config.
45d1cae3 2771 *
572e2857 2772 * If this is a verbatim import, trust the current
45d1cae3 2773 * in-core spa_config and update the disk labels.
34dc7c2f
BB
2774 */
2775 if (config_cache_txg != spa->spa_config_txg ||
572e2857
BB
2776 state == SPA_LOAD_IMPORT ||
2777 state == SPA_LOAD_RECOVER ||
2778 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
34dc7c2f
BB
2779 need_update = B_TRUE;
2780
d6320ddb 2781 for (c = 0; c < rvd->vdev_children; c++)
34dc7c2f
BB
2782 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2783 need_update = B_TRUE;
2784
2785 /*
2786 * Update the config cache asychronously in case we're the
2787 * root pool, in which case the config cache isn't writable yet.
2788 */
2789 if (need_update)
2790 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
fb5f0bc8
BB
2791
2792 /*
2793 * Check all DTLs to see if anything needs resilvering.
2794 */
428870ff
BB
2795 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2796 vdev_resilver_needed(rvd, NULL, NULL))
fb5f0bc8 2797 spa_async_request(spa, SPA_ASYNC_RESILVER);
428870ff 2798
6f1ffb06
MA
2799 /*
2800 * Log the fact that we booted up (so that we can detect if
2801 * we rebooted in the middle of an operation).
2802 */
2803 spa_history_log_version(spa, "open");
2804
428870ff
BB
2805 /*
2806 * Delete any inconsistent datasets.
2807 */
2808 (void) dmu_objset_find(spa_name(spa),
2809 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2810
2811 /*
2812 * Clean up any stale temporary dataset userrefs.
2813 */
2814 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
34dc7c2f
BB
2815 }
2816
428870ff
BB
2817 return (0);
2818}
34dc7c2f 2819
428870ff
BB
2820static int
2821spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2822{
572e2857
BB
2823 int mode = spa->spa_mode;
2824
428870ff
BB
2825 spa_unload(spa);
2826 spa_deactivate(spa);
2827
dea377c0 2828 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
428870ff 2829
572e2857 2830 spa_activate(spa, mode);
428870ff
BB
2831 spa_async_suspend(spa);
2832
2833 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2834}
2835
9ae529ec
CS
2836/*
2837 * If spa_load() fails this function will try loading prior txg's. If
2838 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2839 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2840 * function will not rewind the pool and will return the same error as
2841 * spa_load().
2842 */
428870ff
BB
2843static int
2844spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2845 uint64_t max_request, int rewind_flags)
2846{
9ae529ec 2847 nvlist_t *loadinfo = NULL;
428870ff
BB
2848 nvlist_t *config = NULL;
2849 int load_error, rewind_error;
2850 uint64_t safe_rewind_txg;
2851 uint64_t min_txg;
2852
2853 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2854 spa->spa_load_max_txg = spa->spa_load_txg;
2855 spa_set_log_state(spa, SPA_LOG_CLEAR);
2856 } else {
2857 spa->spa_load_max_txg = max_request;
dea377c0
MA
2858 if (max_request != UINT64_MAX)
2859 spa->spa_extreme_rewind = B_TRUE;
428870ff
BB
2860 }
2861
2862 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2863 mosconfig);
2864 if (load_error == 0)
2865 return (0);
2866
2867 if (spa->spa_root_vdev != NULL)
2868 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2869
2870 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2871 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2872
2873 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2874 nvlist_free(config);
2875 return (load_error);
2876 }
2877
9ae529ec
CS
2878 if (state == SPA_LOAD_RECOVER) {
2879 /* Price of rolling back is discarding txgs, including log */
428870ff 2880 spa_set_log_state(spa, SPA_LOG_CLEAR);
9ae529ec
CS
2881 } else {
2882 /*
2883 * If we aren't rolling back save the load info from our first
2884 * import attempt so that we can restore it after attempting
2885 * to rewind.
2886 */
2887 loadinfo = spa->spa_load_info;
2888 spa->spa_load_info = fnvlist_alloc();
2889 }
428870ff
BB
2890
2891 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2892 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2893 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2894 TXG_INITIAL : safe_rewind_txg;
2895
2896 /*
2897 * Continue as long as we're finding errors, we're still within
2898 * the acceptable rewind range, and we're still finding uberblocks
2899 */
2900 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2901 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2902 if (spa->spa_load_max_txg < safe_rewind_txg)
2903 spa->spa_extreme_rewind = B_TRUE;
2904 rewind_error = spa_load_retry(spa, state, mosconfig);
2905 }
2906
428870ff
BB
2907 spa->spa_extreme_rewind = B_FALSE;
2908 spa->spa_load_max_txg = UINT64_MAX;
2909
2910 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2911 spa_config_set(spa, config);
2912
9ae529ec
CS
2913 if (state == SPA_LOAD_RECOVER) {
2914 ASSERT3P(loadinfo, ==, NULL);
2915 return (rewind_error);
2916 } else {
2917 /* Store the rewind info as part of the initial load info */
2918 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2919 spa->spa_load_info);
2920
2921 /* Restore the initial load info */
2922 fnvlist_free(spa->spa_load_info);
2923 spa->spa_load_info = loadinfo;
2924
2925 return (load_error);
2926 }
34dc7c2f
BB
2927}
2928
2929/*
2930 * Pool Open/Import
2931 *
2932 * The import case is identical to an open except that the configuration is sent
2933 * down from userland, instead of grabbed from the configuration cache. For the
2934 * case of an open, the pool configuration will exist in the
2935 * POOL_STATE_UNINITIALIZED state.
2936 *
2937 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2938 * the same time open the pool, without having to keep around the spa_t in some
2939 * ambiguous state.
2940 */
2941static int
428870ff
BB
2942spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2943 nvlist_t **config)
34dc7c2f
BB
2944{
2945 spa_t *spa;
572e2857 2946 spa_load_state_t state = SPA_LOAD_OPEN;
34dc7c2f 2947 int error;
34dc7c2f 2948 int locked = B_FALSE;
526af785 2949 int firstopen = B_FALSE;
34dc7c2f
BB
2950
2951 *spapp = NULL;
2952
2953 /*
2954 * As disgusting as this is, we need to support recursive calls to this
2955 * function because dsl_dir_open() is called during spa_load(), and ends
2956 * up calling spa_open() again. The real fix is to figure out how to
2957 * avoid dsl_dir_open() calling this in the first place.
2958 */
2959 if (mutex_owner(&spa_namespace_lock) != curthread) {
2960 mutex_enter(&spa_namespace_lock);
2961 locked = B_TRUE;
2962 }
2963
2964 if ((spa = spa_lookup(pool)) == NULL) {
2965 if (locked)
2966 mutex_exit(&spa_namespace_lock);
2e528b49 2967 return (SET_ERROR(ENOENT));
34dc7c2f 2968 }
428870ff 2969
34dc7c2f 2970 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
428870ff
BB
2971 zpool_rewind_policy_t policy;
2972
526af785
PJD
2973 firstopen = B_TRUE;
2974
428870ff
BB
2975 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2976 &policy);
2977 if (policy.zrp_request & ZPOOL_DO_REWIND)
2978 state = SPA_LOAD_RECOVER;
34dc7c2f 2979
fb5f0bc8 2980 spa_activate(spa, spa_mode_global);
34dc7c2f 2981
428870ff
BB
2982 if (state != SPA_LOAD_RECOVER)
2983 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2984
2985 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2986 policy.zrp_request);
34dc7c2f
BB
2987
2988 if (error == EBADF) {
2989 /*
2990 * If vdev_validate() returns failure (indicated by
2991 * EBADF), it indicates that one of the vdevs indicates
2992 * that the pool has been exported or destroyed. If
2993 * this is the case, the config cache is out of sync and
2994 * we should remove the pool from the namespace.
2995 */
34dc7c2f
BB
2996 spa_unload(spa);
2997 spa_deactivate(spa);
b128c09f 2998 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 2999 spa_remove(spa);
34dc7c2f
BB
3000 if (locked)
3001 mutex_exit(&spa_namespace_lock);
2e528b49 3002 return (SET_ERROR(ENOENT));
34dc7c2f
BB
3003 }
3004
3005 if (error) {
3006 /*
3007 * We can't open the pool, but we still have useful
3008 * information: the state of each vdev after the
3009 * attempted vdev_open(). Return this to the user.
3010 */
572e2857 3011 if (config != NULL && spa->spa_config) {
428870ff 3012 VERIFY(nvlist_dup(spa->spa_config, config,
79c76d5b 3013 KM_SLEEP) == 0);
572e2857
BB
3014 VERIFY(nvlist_add_nvlist(*config,
3015 ZPOOL_CONFIG_LOAD_INFO,
3016 spa->spa_load_info) == 0);
3017 }
34dc7c2f
BB
3018 spa_unload(spa);
3019 spa_deactivate(spa);
428870ff 3020 spa->spa_last_open_failed = error;
34dc7c2f
BB
3021 if (locked)
3022 mutex_exit(&spa_namespace_lock);
3023 *spapp = NULL;
3024 return (error);
34dc7c2f 3025 }
34dc7c2f
BB
3026 }
3027
3028 spa_open_ref(spa, tag);
3029
b128c09f 3030 if (config != NULL)
34dc7c2f 3031 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f 3032
572e2857
BB
3033 /*
3034 * If we've recovered the pool, pass back any information we
3035 * gathered while doing the load.
3036 */
3037 if (state == SPA_LOAD_RECOVER) {
3038 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3039 spa->spa_load_info) == 0);
3040 }
3041
428870ff
BB
3042 if (locked) {
3043 spa->spa_last_open_failed = 0;
3044 spa->spa_last_ubsync_txg = 0;
3045 spa->spa_load_txg = 0;
3046 mutex_exit(&spa_namespace_lock);
3047 }
3048
526af785
PJD
3049#ifdef _KERNEL
3050 if (firstopen)
3051 zvol_create_minors(spa->spa_name);
3052#endif
3053
428870ff
BB
3054 *spapp = spa;
3055
34dc7c2f
BB
3056 return (0);
3057}
3058
428870ff
BB
3059int
3060spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3061 nvlist_t **config)
3062{
3063 return (spa_open_common(name, spapp, tag, policy, config));
3064}
3065
34dc7c2f
BB
3066int
3067spa_open(const char *name, spa_t **spapp, void *tag)
3068{
428870ff 3069 return (spa_open_common(name, spapp, tag, NULL, NULL));
34dc7c2f
BB
3070}
3071
3072/*
3073 * Lookup the given spa_t, incrementing the inject count in the process,
3074 * preventing it from being exported or destroyed.
3075 */
3076spa_t *
3077spa_inject_addref(char *name)
3078{
3079 spa_t *spa;
3080
3081 mutex_enter(&spa_namespace_lock);
3082 if ((spa = spa_lookup(name)) == NULL) {
3083 mutex_exit(&spa_namespace_lock);
3084 return (NULL);
3085 }
3086 spa->spa_inject_ref++;
3087 mutex_exit(&spa_namespace_lock);
3088
3089 return (spa);
3090}
3091
3092void
3093spa_inject_delref(spa_t *spa)
3094{
3095 mutex_enter(&spa_namespace_lock);
3096 spa->spa_inject_ref--;
3097 mutex_exit(&spa_namespace_lock);
3098}
3099
3100/*
3101 * Add spares device information to the nvlist.
3102 */
3103static void
3104spa_add_spares(spa_t *spa, nvlist_t *config)
3105{
3106 nvlist_t **spares;
3107 uint_t i, nspares;
3108 nvlist_t *nvroot;
3109 uint64_t guid;
3110 vdev_stat_t *vs;
3111 uint_t vsc;
3112 uint64_t pool;
3113
9babb374
BB
3114 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3115
34dc7c2f
BB
3116 if (spa->spa_spares.sav_count == 0)
3117 return;
3118
3119 VERIFY(nvlist_lookup_nvlist(config,
3120 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3121 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3122 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3123 if (nspares != 0) {
3124 VERIFY(nvlist_add_nvlist_array(nvroot,
3125 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3126 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3127 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3128
3129 /*
3130 * Go through and find any spares which have since been
3131 * repurposed as an active spare. If this is the case, update
3132 * their status appropriately.
3133 */
3134 for (i = 0; i < nspares; i++) {
3135 VERIFY(nvlist_lookup_uint64(spares[i],
3136 ZPOOL_CONFIG_GUID, &guid) == 0);
b128c09f
BB
3137 if (spa_spare_exists(guid, &pool, NULL) &&
3138 pool != 0ULL) {
34dc7c2f 3139 VERIFY(nvlist_lookup_uint64_array(
428870ff 3140 spares[i], ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3141 (uint64_t **)&vs, &vsc) == 0);
3142 vs->vs_state = VDEV_STATE_CANT_OPEN;
3143 vs->vs_aux = VDEV_AUX_SPARED;
3144 }
3145 }
3146 }
3147}
3148
3149/*
3150 * Add l2cache device information to the nvlist, including vdev stats.
3151 */
3152static void
3153spa_add_l2cache(spa_t *spa, nvlist_t *config)
3154{
3155 nvlist_t **l2cache;
3156 uint_t i, j, nl2cache;
3157 nvlist_t *nvroot;
3158 uint64_t guid;
3159 vdev_t *vd;
3160 vdev_stat_t *vs;
3161 uint_t vsc;
3162
9babb374
BB
3163 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3164
34dc7c2f
BB
3165 if (spa->spa_l2cache.sav_count == 0)
3166 return;
3167
34dc7c2f
BB
3168 VERIFY(nvlist_lookup_nvlist(config,
3169 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3170 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3171 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3172 if (nl2cache != 0) {
3173 VERIFY(nvlist_add_nvlist_array(nvroot,
3174 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3175 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3176 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3177
3178 /*
3179 * Update level 2 cache device stats.
3180 */
3181
3182 for (i = 0; i < nl2cache; i++) {
3183 VERIFY(nvlist_lookup_uint64(l2cache[i],
3184 ZPOOL_CONFIG_GUID, &guid) == 0);
3185
3186 vd = NULL;
3187 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3188 if (guid ==
3189 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3190 vd = spa->spa_l2cache.sav_vdevs[j];
3191 break;
3192 }
3193 }
3194 ASSERT(vd != NULL);
3195
3196 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
428870ff
BB
3197 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3198 == 0);
34dc7c2f
BB
3199 vdev_get_stats(vd, vs);
3200 }
3201 }
34dc7c2f
BB
3202}
3203
9ae529ec 3204static void
417104bd 3205spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
9ae529ec 3206{
9ae529ec
CS
3207 zap_cursor_t zc;
3208 zap_attribute_t za;
3209
9ae529ec
CS
3210 if (spa->spa_feat_for_read_obj != 0) {
3211 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3212 spa->spa_feat_for_read_obj);
3213 zap_cursor_retrieve(&zc, &za) == 0;
3214 zap_cursor_advance(&zc)) {
3215 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3216 za.za_num_integers == 1);
417104bd 3217 VERIFY0(nvlist_add_uint64(features, za.za_name,
9ae529ec
CS
3218 za.za_first_integer));
3219 }
3220 zap_cursor_fini(&zc);
3221 }
3222
3223 if (spa->spa_feat_for_write_obj != 0) {
3224 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3225 spa->spa_feat_for_write_obj);
3226 zap_cursor_retrieve(&zc, &za) == 0;
3227 zap_cursor_advance(&zc)) {
3228 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3229 za.za_num_integers == 1);
417104bd 3230 VERIFY0(nvlist_add_uint64(features, za.za_name,
9ae529ec
CS
3231 za.za_first_integer));
3232 }
3233 zap_cursor_fini(&zc);
3234 }
417104bd
NB
3235}
3236
3237static void
3238spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
3239{
3240 int i;
3241
3242 for (i = 0; i < SPA_FEATURES; i++) {
3243 zfeature_info_t feature = spa_feature_table[i];
3244 uint64_t refcount;
3245
3246 if (feature_get_refcount(spa, &feature, &refcount) != 0)
3247 continue;
3248
3249 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
3250 }
3251}
3252
3253/*
3254 * Store a list of pool features and their reference counts in the
3255 * config.
3256 *
3257 * The first time this is called on a spa, allocate a new nvlist, fetch
3258 * the pool features and reference counts from disk, then save the list
3259 * in the spa. In subsequent calls on the same spa use the saved nvlist
3260 * and refresh its values from the cached reference counts. This
3261 * ensures we don't block here on I/O on a suspended pool so 'zpool
3262 * clear' can resume the pool.
3263 */
3264static void
3265spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3266{
3267 nvlist_t *features = spa->spa_feat_stats;
3268
3269 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3270
3271 if (features != NULL) {
3272 spa_feature_stats_from_cache(spa, features);
3273 } else {
3274 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
3275 spa->spa_feat_stats = features;
3276 spa_feature_stats_from_disk(spa, features);
3277 }
9ae529ec 3278
417104bd
NB
3279 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3280 features));
9ae529ec
CS
3281}
3282
34dc7c2f 3283int
9ae529ec
CS
3284spa_get_stats(const char *name, nvlist_t **config,
3285 char *altroot, size_t buflen)
34dc7c2f
BB
3286{
3287 int error;
3288 spa_t *spa;
3289
3290 *config = NULL;
428870ff 3291 error = spa_open_common(name, &spa, FTAG, NULL, config);
34dc7c2f 3292
9babb374
BB
3293 if (spa != NULL) {
3294 /*
3295 * This still leaves a window of inconsistency where the spares
3296 * or l2cache devices could change and the config would be
3297 * self-inconsistent.
3298 */
3299 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f 3300
9babb374 3301 if (*config != NULL) {
572e2857
BB
3302 uint64_t loadtimes[2];
3303
3304 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3305 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3306 VERIFY(nvlist_add_uint64_array(*config,
3307 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3308
b128c09f 3309 VERIFY(nvlist_add_uint64(*config,
9babb374
BB
3310 ZPOOL_CONFIG_ERRCOUNT,
3311 spa_get_errlog_size(spa)) == 0);
3312
3313 if (spa_suspended(spa))
3314 VERIFY(nvlist_add_uint64(*config,
3315 ZPOOL_CONFIG_SUSPENDED,
3316 spa->spa_failmode) == 0);
b128c09f 3317
9babb374
BB
3318 spa_add_spares(spa, *config);
3319 spa_add_l2cache(spa, *config);
9ae529ec 3320 spa_add_feature_stats(spa, *config);
9babb374 3321 }
34dc7c2f
BB
3322 }
3323
3324 /*
3325 * We want to get the alternate root even for faulted pools, so we cheat
3326 * and call spa_lookup() directly.
3327 */
3328 if (altroot) {
3329 if (spa == NULL) {
3330 mutex_enter(&spa_namespace_lock);
3331 spa = spa_lookup(name);
3332 if (spa)
3333 spa_altroot(spa, altroot, buflen);
3334 else
3335 altroot[0] = '\0';
3336 spa = NULL;
3337 mutex_exit(&spa_namespace_lock);
3338 } else {
3339 spa_altroot(spa, altroot, buflen);
3340 }
3341 }
3342
9babb374
BB
3343 if (spa != NULL) {
3344 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 3345 spa_close(spa, FTAG);
9babb374 3346 }
34dc7c2f
BB
3347
3348 return (error);
3349}
3350
3351/*
3352 * Validate that the auxiliary device array is well formed. We must have an
3353 * array of nvlists, each which describes a valid leaf vdev. If this is an
3354 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3355 * specified, as long as they are well-formed.
3356 */
3357static int
3358spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3359 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3360 vdev_labeltype_t label)
3361{
3362 nvlist_t **dev;
3363 uint_t i, ndev;
3364 vdev_t *vd;
3365 int error;
3366
b128c09f
BB
3367 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3368
34dc7c2f
BB
3369 /*
3370 * It's acceptable to have no devs specified.
3371 */
3372 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3373 return (0);
3374
3375 if (ndev == 0)
2e528b49 3376 return (SET_ERROR(EINVAL));
34dc7c2f
BB
3377
3378 /*
3379 * Make sure the pool is formatted with a version that supports this
3380 * device type.
3381 */
3382 if (spa_version(spa) < version)
2e528b49 3383 return (SET_ERROR(ENOTSUP));
34dc7c2f
BB
3384
3385 /*
3386 * Set the pending device list so we correctly handle device in-use
3387 * checking.
3388 */
3389 sav->sav_pending = dev;
3390 sav->sav_npending = ndev;
3391
3392 for (i = 0; i < ndev; i++) {
3393 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3394 mode)) != 0)
3395 goto out;
3396
3397 if (!vd->vdev_ops->vdev_op_leaf) {
3398 vdev_free(vd);
2e528b49 3399 error = SET_ERROR(EINVAL);
34dc7c2f
BB
3400 goto out;
3401 }
3402
3403 /*
b128c09f
BB
3404 * The L2ARC currently only supports disk devices in
3405 * kernel context. For user-level testing, we allow it.
34dc7c2f 3406 */
b128c09f 3407#ifdef _KERNEL
34dc7c2f
BB
3408 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3409 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
2e528b49 3410 error = SET_ERROR(ENOTBLK);
5ffb9d1d 3411 vdev_free(vd);
34dc7c2f
BB
3412 goto out;
3413 }
b128c09f 3414#endif
34dc7c2f
BB
3415 vd->vdev_top = vd;
3416
3417 if ((error = vdev_open(vd)) == 0 &&
3418 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3419 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3420 vd->vdev_guid) == 0);
3421 }
3422
3423 vdev_free(vd);
3424
3425 if (error &&
3426 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3427 goto out;
3428 else
3429 error = 0;
3430 }
3431
3432out:
3433 sav->sav_pending = NULL;
3434 sav->sav_npending = 0;
3435 return (error);
3436}
3437
3438static int
3439spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3440{
3441 int error;
3442
b128c09f
BB
3443 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3444
34dc7c2f
BB
3445 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3446 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3447 VDEV_LABEL_SPARE)) != 0) {
3448 return (error);
3449 }
3450
3451 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3452 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3453 VDEV_LABEL_L2CACHE));
3454}
3455
3456static void
3457spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3458 const char *config)
3459{
3460 int i;
3461
3462 if (sav->sav_config != NULL) {
3463 nvlist_t **olddevs;
3464 uint_t oldndevs;
3465 nvlist_t **newdevs;
3466
3467 /*
3468 * Generate new dev list by concatentating with the
3469 * current dev list.
3470 */
3471 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3472 &olddevs, &oldndevs) == 0);
3473
3474 newdevs = kmem_alloc(sizeof (void *) *
79c76d5b 3475 (ndevs + oldndevs), KM_SLEEP);
34dc7c2f
BB
3476 for (i = 0; i < oldndevs; i++)
3477 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
79c76d5b 3478 KM_SLEEP) == 0);
34dc7c2f
BB
3479 for (i = 0; i < ndevs; i++)
3480 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
79c76d5b 3481 KM_SLEEP) == 0);
34dc7c2f
BB
3482
3483 VERIFY(nvlist_remove(sav->sav_config, config,
3484 DATA_TYPE_NVLIST_ARRAY) == 0);
3485
3486 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3487 config, newdevs, ndevs + oldndevs) == 0);
3488 for (i = 0; i < oldndevs + ndevs; i++)
3489 nvlist_free(newdevs[i]);
3490 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3491 } else {
3492 /*
3493 * Generate a new dev list.
3494 */
3495 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
79c76d5b 3496 KM_SLEEP) == 0);
34dc7c2f
BB
3497 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3498 devs, ndevs) == 0);
3499 }
3500}
3501
3502/*
3503 * Stop and drop level 2 ARC devices
3504 */
3505void
3506spa_l2cache_drop(spa_t *spa)
3507{
3508 vdev_t *vd;
3509 int i;
3510 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3511
3512 for (i = 0; i < sav->sav_count; i++) {
3513 uint64_t pool;
3514
3515 vd = sav->sav_vdevs[i];
3516 ASSERT(vd != NULL);
3517
fb5f0bc8
BB
3518 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3519 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 3520 l2arc_remove_vdev(vd);
34dc7c2f
BB
3521 }
3522}
3523
3524/*
3525 * Pool Creation
3526 */
3527int
3528spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
6f1ffb06 3529 nvlist_t *zplprops)
34dc7c2f
BB
3530{
3531 spa_t *spa;
3532 char *altroot = NULL;
3533 vdev_t *rvd;
3534 dsl_pool_t *dp;
3535 dmu_tx_t *tx;
9babb374 3536 int error = 0;
34dc7c2f
BB
3537 uint64_t txg = TXG_INITIAL;
3538 nvlist_t **spares, **l2cache;
3539 uint_t nspares, nl2cache;
428870ff 3540 uint64_t version, obj;
9ae529ec
CS
3541 boolean_t has_features;
3542 nvpair_t *elem;
e022864d 3543 int c, i;
83e9986f
RY
3544 char *poolname;
3545 nvlist_t *nvl;
3546
3547 if (nvlist_lookup_string(props, "tname", &poolname) != 0)
3548 poolname = (char *)pool;
34dc7c2f
BB
3549
3550 /*
3551 * If this pool already exists, return failure.
3552 */
3553 mutex_enter(&spa_namespace_lock);
83e9986f 3554 if (spa_lookup(poolname) != NULL) {
34dc7c2f 3555 mutex_exit(&spa_namespace_lock);
2e528b49 3556 return (SET_ERROR(EEXIST));
34dc7c2f
BB
3557 }
3558
3559 /*
3560 * Allocate a new spa_t structure.
3561 */
83e9986f
RY
3562 nvl = fnvlist_alloc();
3563 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
34dc7c2f
BB
3564 (void) nvlist_lookup_string(props,
3565 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
83e9986f
RY
3566 spa = spa_add(poolname, nvl, altroot);
3567 fnvlist_free(nvl);
fb5f0bc8 3568 spa_activate(spa, spa_mode_global);
34dc7c2f 3569
34dc7c2f 3570 if (props && (error = spa_prop_validate(spa, props))) {
34dc7c2f
BB
3571 spa_deactivate(spa);
3572 spa_remove(spa);
b128c09f 3573 mutex_exit(&spa_namespace_lock);
34dc7c2f
BB
3574 return (error);
3575 }
3576
83e9986f
RY
3577 /*
3578 * Temporary pool names should never be written to disk.
3579 */
3580 if (poolname != pool)
3581 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
3582
9ae529ec
CS
3583 has_features = B_FALSE;
3584 for (elem = nvlist_next_nvpair(props, NULL);
3585 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3586 if (zpool_prop_feature(nvpair_name(elem)))
3587 has_features = B_TRUE;
3588 }
3589
3590 if (has_features || nvlist_lookup_uint64(props,
3591 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
34dc7c2f 3592 version = SPA_VERSION;
9ae529ec
CS
3593 }
3594 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
428870ff
BB
3595
3596 spa->spa_first_txg = txg;
3597 spa->spa_uberblock.ub_txg = txg - 1;
34dc7c2f
BB
3598 spa->spa_uberblock.ub_version = version;
3599 spa->spa_ubsync = spa->spa_uberblock;
3600
9babb374
BB
3601 /*
3602 * Create "The Godfather" zio to hold all async IOs
3603 */
e022864d
MA
3604 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3605 KM_SLEEP);
3606 for (i = 0; i < max_ncpus; i++) {
3607 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3608 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3609 ZIO_FLAG_GODFATHER);
3610 }
9babb374 3611
34dc7c2f
BB
3612 /*
3613 * Create the root vdev.
3614 */
b128c09f 3615 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
3616
3617 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3618
3619 ASSERT(error != 0 || rvd != NULL);
3620 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3621
3622 if (error == 0 && !zfs_allocatable_devs(nvroot))
2e528b49 3623 error = SET_ERROR(EINVAL);
34dc7c2f
BB
3624
3625 if (error == 0 &&
3626 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3627 (error = spa_validate_aux(spa, nvroot, txg,
3628 VDEV_ALLOC_ADD)) == 0) {
d6320ddb 3629 for (c = 0; c < rvd->vdev_children; c++) {
9babb374
BB
3630 vdev_metaslab_set_size(rvd->vdev_child[c]);
3631 vdev_expand(rvd->vdev_child[c], txg);
3632 }
34dc7c2f
BB
3633 }
3634
b128c09f 3635 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3636
3637 if (error != 0) {
3638 spa_unload(spa);
3639 spa_deactivate(spa);
3640 spa_remove(spa);
3641 mutex_exit(&spa_namespace_lock);
3642 return (error);
3643 }
3644
3645 /*
3646 * Get the list of spares, if specified.
3647 */
3648 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3649 &spares, &nspares) == 0) {
3650 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
79c76d5b 3651 KM_SLEEP) == 0);
34dc7c2f
BB
3652 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3653 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 3654 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3655 spa_load_spares(spa);
b128c09f 3656 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3657 spa->spa_spares.sav_sync = B_TRUE;
3658 }
3659
3660 /*
3661 * Get the list of level 2 cache devices, if specified.
3662 */
3663 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3664 &l2cache, &nl2cache) == 0) {
3665 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
79c76d5b 3666 NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
3667 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3668 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 3669 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3670 spa_load_l2cache(spa);
b128c09f 3671 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3672 spa->spa_l2cache.sav_sync = B_TRUE;
3673 }
3674
9ae529ec 3675 spa->spa_is_initializing = B_TRUE;
b128c09f 3676 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
34dc7c2f 3677 spa->spa_meta_objset = dp->dp_meta_objset;
9ae529ec 3678 spa->spa_is_initializing = B_FALSE;
34dc7c2f 3679
428870ff
BB
3680 /*
3681 * Create DDTs (dedup tables).
3682 */
3683 ddt_create(spa);
3684
3685 spa_update_dspace(spa);
3686
34dc7c2f
BB
3687 tx = dmu_tx_create_assigned(dp, txg);
3688
3689 /*
3690 * Create the pool config object.
3691 */
3692 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
b128c09f 3693 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
34dc7c2f
BB
3694 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3695
3696 if (zap_add(spa->spa_meta_objset,
3697 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3698 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3699 cmn_err(CE_PANIC, "failed to add pool config");
3700 }
3701
9ae529ec
CS
3702 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3703 spa_feature_create_zap_objects(spa, tx);
3704
428870ff
BB
3705 if (zap_add(spa->spa_meta_objset,
3706 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3707 sizeof (uint64_t), 1, &version, tx) != 0) {
3708 cmn_err(CE_PANIC, "failed to add pool version");
3709 }
3710
34dc7c2f
BB
3711 /* Newly created pools with the right version are always deflated. */
3712 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3713 spa->spa_deflate = TRUE;
3714 if (zap_add(spa->spa_meta_objset,
3715 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3716 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3717 cmn_err(CE_PANIC, "failed to add deflate");
3718 }
3719 }
3720
3721 /*
428870ff 3722 * Create the deferred-free bpobj. Turn off compression
34dc7c2f
BB
3723 * because sync-to-convergence takes longer if the blocksize
3724 * keeps changing.
3725 */
428870ff
BB
3726 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3727 dmu_object_set_compress(spa->spa_meta_objset, obj,
34dc7c2f 3728 ZIO_COMPRESS_OFF, tx);
34dc7c2f 3729 if (zap_add(spa->spa_meta_objset,
428870ff
BB
3730 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3731 sizeof (uint64_t), 1, &obj, tx) != 0) {
3732 cmn_err(CE_PANIC, "failed to add bpobj");
34dc7c2f 3733 }
428870ff
BB
3734 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3735 spa->spa_meta_objset, obj));
34dc7c2f
BB
3736
3737 /*
3738 * Create the pool's history object.
3739 */
3740 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3741 spa_history_create_obj(spa, tx);
3742
3743 /*
3744 * Set pool properties.
3745 */
3746 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3747 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3748 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
9babb374 3749 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
428870ff 3750
d164b209
BB
3751 if (props != NULL) {
3752 spa_configfile_set(spa, props, B_FALSE);
13fe0198 3753 spa_sync_props(props, tx);
d164b209 3754 }
34dc7c2f
BB
3755
3756 dmu_tx_commit(tx);
3757
3758 spa->spa_sync_on = B_TRUE;
3759 txg_sync_start(spa->spa_dsl_pool);
3760
3761 /*
3762 * We explicitly wait for the first transaction to complete so that our
3763 * bean counters are appropriately updated.
3764 */
3765 txg_wait_synced(spa->spa_dsl_pool, txg);
3766
b128c09f 3767 spa_config_sync(spa, B_FALSE, B_TRUE);
34dc7c2f 3768
6f1ffb06 3769 spa_history_log_version(spa, "create");
34dc7c2f 3770
b128c09f
BB
3771 spa->spa_minref = refcount_count(&spa->spa_refcount);
3772
d164b209
BB
3773 mutex_exit(&spa_namespace_lock);
3774
34dc7c2f
BB
3775 return (0);
3776}
3777
9babb374 3778#ifdef _KERNEL
34dc7c2f 3779/*
9babb374
BB
3780 * Get the root pool information from the root disk, then import the root pool
3781 * during the system boot up time.
34dc7c2f 3782 */
9babb374
BB
3783extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3784
3785static nvlist_t *
3786spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3787{
3788 nvlist_t *config;
3789 nvlist_t *nvtop, *nvroot;
3790 uint64_t pgid;
3791
3792 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3793 return (NULL);
3794
3795 /*
3796 * Add this top-level vdev to the child array.
3797 */
3798 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3799 &nvtop) == 0);
3800 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3801 &pgid) == 0);
3802 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3803
3804 /*
3805 * Put this pool's top-level vdevs into a root vdev.
3806 */
79c76d5b 3807 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
9babb374
BB
3808 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3809 VDEV_TYPE_ROOT) == 0);
3810 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3811 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3812 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3813 &nvtop, 1) == 0);
3814
3815 /*
3816 * Replace the existing vdev_tree with the new root vdev in
3817 * this pool's configuration (remove the old, add the new).
3818 */
3819 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3820 nvlist_free(nvroot);
3821 return (config);
3822}
3823
3824/*
3825 * Walk the vdev tree and see if we can find a device with "better"
3826 * configuration. A configuration is "better" if the label on that
3827 * device has a more recent txg.
3828 */
3829static void
3830spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3831{
d6320ddb
BB
3832 int c;
3833
3834 for (c = 0; c < vd->vdev_children; c++)
9babb374
BB
3835 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3836
3837 if (vd->vdev_ops->vdev_op_leaf) {
3838 nvlist_t *label;
3839 uint64_t label_txg;
3840
3841 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3842 &label) != 0)
3843 return;
3844
3845 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3846 &label_txg) == 0);
3847
3848 /*
3849 * Do we have a better boot device?
3850 */
3851 if (label_txg > *txg) {
3852 *txg = label_txg;
3853 *avd = vd;
3854 }
3855 nvlist_free(label);
3856 }
3857}
3858
3859/*
3860 * Import a root pool.
3861 *
3862 * For x86. devpath_list will consist of devid and/or physpath name of
3863 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3864 * The GRUB "findroot" command will return the vdev we should boot.
3865 *
3866 * For Sparc, devpath_list consists the physpath name of the booting device
3867 * no matter the rootpool is a single device pool or a mirrored pool.
3868 * e.g.
3869 * "/pci@1f,0/ide@d/disk@0,0:a"
3870 */
3871int
3872spa_import_rootpool(char *devpath, char *devid)
3873{
3874 spa_t *spa;
3875 vdev_t *rvd, *bvd, *avd = NULL;
3876 nvlist_t *config, *nvtop;
3877 uint64_t guid, txg;
3878 char *pname;
3879 int error;
3880
3881 /*
3882 * Read the label from the boot device and generate a configuration.
3883 */
428870ff
BB
3884 config = spa_generate_rootconf(devpath, devid, &guid);
3885#if defined(_OBP) && defined(_KERNEL)
3886 if (config == NULL) {
3887 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3888 /* iscsi boot */
3889 get_iscsi_bootpath_phy(devpath);
3890 config = spa_generate_rootconf(devpath, devid, &guid);
3891 }
3892 }
3893#endif
3894 if (config == NULL) {
9ae529ec 3895 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
9babb374 3896 devpath);
2e528b49 3897 return (SET_ERROR(EIO));
9babb374
BB
3898 }
3899
3900 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3901 &pname) == 0);
3902 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3903
3904 mutex_enter(&spa_namespace_lock);
3905 if ((spa = spa_lookup(pname)) != NULL) {
3906 /*
3907 * Remove the existing root pool from the namespace so that we
3908 * can replace it with the correct config we just read in.
3909 */
3910 spa_remove(spa);
3911 }
3912
428870ff 3913 spa = spa_add(pname, config, NULL);
9babb374 3914 spa->spa_is_root = B_TRUE;
572e2857 3915 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
9babb374
BB
3916
3917 /*
3918 * Build up a vdev tree based on the boot device's label config.
3919 */
3920 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3921 &nvtop) == 0);
3922 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3923 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3924 VDEV_ALLOC_ROOTPOOL);
3925 spa_config_exit(spa, SCL_ALL, FTAG);
3926 if (error) {
3927 mutex_exit(&spa_namespace_lock);
3928 nvlist_free(config);
3929 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3930 pname);
3931 return (error);
3932 }
3933
3934 /*
3935 * Get the boot vdev.
3936 */
3937 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3938 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3939 (u_longlong_t)guid);
2e528b49 3940 error = SET_ERROR(ENOENT);
9babb374
BB
3941 goto out;
3942 }
3943
3944 /*
3945 * Determine if there is a better boot device.
3946 */
3947 avd = bvd;
3948 spa_alt_rootvdev(rvd, &avd, &txg);
3949 if (avd != bvd) {
3950 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3951 "try booting from '%s'", avd->vdev_path);
2e528b49 3952 error = SET_ERROR(EINVAL);
9babb374
BB
3953 goto out;
3954 }
3955
3956 /*
3957 * If the boot device is part of a spare vdev then ensure that
3958 * we're booting off the active spare.
3959 */
3960 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3961 !bvd->vdev_isspare) {
3962 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3963 "try booting from '%s'",
572e2857
BB
3964 bvd->vdev_parent->
3965 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
2e528b49 3966 error = SET_ERROR(EINVAL);
9babb374
BB
3967 goto out;
3968 }
3969
9babb374
BB
3970 error = 0;
3971out:
3972 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3973 vdev_free(rvd);
3974 spa_config_exit(spa, SCL_ALL, FTAG);
3975 mutex_exit(&spa_namespace_lock);
3976
3977 nvlist_free(config);
3978 return (error);
3979}
3980
3981#endif
3982
9babb374
BB
3983/*
3984 * Import a non-root pool into the system.
3985 */
3986int
13fe0198 3987spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
34dc7c2f
BB
3988{
3989 spa_t *spa;
3990 char *altroot = NULL;
428870ff
BB
3991 spa_load_state_t state = SPA_LOAD_IMPORT;
3992 zpool_rewind_policy_t policy;
572e2857
BB
3993 uint64_t mode = spa_mode_global;
3994 uint64_t readonly = B_FALSE;
9babb374 3995 int error;
34dc7c2f
BB
3996 nvlist_t *nvroot;
3997 nvlist_t **spares, **l2cache;
3998 uint_t nspares, nl2cache;
34dc7c2f
BB
3999
4000 /*
4001 * If a pool with this name exists, return failure.
4002 */
4003 mutex_enter(&spa_namespace_lock);
428870ff 4004 if (spa_lookup(pool) != NULL) {
9babb374 4005 mutex_exit(&spa_namespace_lock);
2e528b49 4006 return (SET_ERROR(EEXIST));
34dc7c2f
BB
4007 }
4008
4009 /*
4010 * Create and initialize the spa structure.
4011 */
4012 (void) nvlist_lookup_string(props,
4013 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
572e2857
BB
4014 (void) nvlist_lookup_uint64(props,
4015 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
4016 if (readonly)
4017 mode = FREAD;
428870ff 4018 spa = spa_add(pool, config, altroot);
572e2857
BB
4019 spa->spa_import_flags = flags;
4020
4021 /*
4022 * Verbatim import - Take a pool and insert it into the namespace
4023 * as if it had been loaded at boot.
4024 */
4025 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
4026 if (props != NULL)
4027 spa_configfile_set(spa, props, B_FALSE);
4028
4029 spa_config_sync(spa, B_FALSE, B_TRUE);
4030
4031 mutex_exit(&spa_namespace_lock);
572e2857
BB
4032 return (0);
4033 }
4034
4035 spa_activate(spa, mode);
34dc7c2f 4036
9babb374
BB
4037 /*
4038 * Don't start async tasks until we know everything is healthy.
4039 */
4040 spa_async_suspend(spa);
b128c09f 4041
572e2857
BB
4042 zpool_get_rewind_policy(config, &policy);
4043 if (policy.zrp_request & ZPOOL_DO_REWIND)
4044 state = SPA_LOAD_RECOVER;
4045
34dc7c2f 4046 /*
9babb374
BB
4047 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
4048 * because the user-supplied config is actually the one to trust when
b128c09f 4049 * doing an import.
34dc7c2f 4050 */
428870ff
BB
4051 if (state != SPA_LOAD_RECOVER)
4052 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
572e2857 4053
428870ff
BB
4054 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
4055 policy.zrp_request);
4056
4057 /*
572e2857
BB
4058 * Propagate anything learned while loading the pool and pass it
4059 * back to caller (i.e. rewind info, missing devices, etc).
428870ff 4060 */
572e2857
BB
4061 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4062 spa->spa_load_info) == 0);
34dc7c2f 4063
b128c09f 4064 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4065 /*
9babb374
BB
4066 * Toss any existing sparelist, as it doesn't have any validity
4067 * anymore, and conflicts with spa_has_spare().
34dc7c2f 4068 */
9babb374 4069 if (spa->spa_spares.sav_config) {
34dc7c2f
BB
4070 nvlist_free(spa->spa_spares.sav_config);
4071 spa->spa_spares.sav_config = NULL;
4072 spa_load_spares(spa);
4073 }
9babb374 4074 if (spa->spa_l2cache.sav_config) {
34dc7c2f
BB
4075 nvlist_free(spa->spa_l2cache.sav_config);
4076 spa->spa_l2cache.sav_config = NULL;
4077 spa_load_l2cache(spa);
4078 }
4079
4080 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4081 &nvroot) == 0);
4082 if (error == 0)
9babb374
BB
4083 error = spa_validate_aux(spa, nvroot, -1ULL,
4084 VDEV_ALLOC_SPARE);
34dc7c2f
BB
4085 if (error == 0)
4086 error = spa_validate_aux(spa, nvroot, -1ULL,
4087 VDEV_ALLOC_L2CACHE);
b128c09f 4088 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 4089
d164b209
BB
4090 if (props != NULL)
4091 spa_configfile_set(spa, props, B_FALSE);
4092
fb5f0bc8
BB
4093 if (error != 0 || (props && spa_writeable(spa) &&
4094 (error = spa_prop_set(spa, props)))) {
9babb374
BB
4095 spa_unload(spa);
4096 spa_deactivate(spa);
4097 spa_remove(spa);
34dc7c2f
BB
4098 mutex_exit(&spa_namespace_lock);
4099 return (error);
4100 }
4101
572e2857
BB
4102 spa_async_resume(spa);
4103
34dc7c2f
BB
4104 /*
4105 * Override any spares and level 2 cache devices as specified by
4106 * the user, as these may have correct device names/devids, etc.
4107 */
4108 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4109 &spares, &nspares) == 0) {
4110 if (spa->spa_spares.sav_config)
4111 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4112 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4113 else
4114 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
79c76d5b 4115 NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
4116 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4117 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 4118 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4119 spa_load_spares(spa);
b128c09f 4120 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4121 spa->spa_spares.sav_sync = B_TRUE;
4122 }
4123 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4124 &l2cache, &nl2cache) == 0) {
4125 if (spa->spa_l2cache.sav_config)
4126 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4127 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4128 else
4129 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
79c76d5b 4130 NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
4131 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4132 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 4133 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4134 spa_load_l2cache(spa);
b128c09f 4135 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4136 spa->spa_l2cache.sav_sync = B_TRUE;
4137 }
4138
428870ff
BB
4139 /*
4140 * Check for any removed devices.
4141 */
4142 if (spa->spa_autoreplace) {
4143 spa_aux_check_removed(&spa->spa_spares);
4144 spa_aux_check_removed(&spa->spa_l2cache);
4145 }
4146
fb5f0bc8 4147 if (spa_writeable(spa)) {
b128c09f
BB
4148 /*
4149 * Update the config cache to include the newly-imported pool.
4150 */
45d1cae3 4151 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
b128c09f 4152 }
34dc7c2f 4153
34dc7c2f 4154 /*
9babb374
BB
4155 * It's possible that the pool was expanded while it was exported.
4156 * We kick off an async task to handle this for us.
34dc7c2f 4157 */
9babb374 4158 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
b128c09f 4159
9babb374 4160 mutex_exit(&spa_namespace_lock);
6f1ffb06 4161 spa_history_log_version(spa, "import");
b128c09f 4162
526af785
PJD
4163#ifdef _KERNEL
4164 zvol_create_minors(pool);
4165#endif
4166
b128c09f
BB
4167 return (0);
4168}
4169
34dc7c2f
BB
4170nvlist_t *
4171spa_tryimport(nvlist_t *tryconfig)
4172{
4173 nvlist_t *config = NULL;
4174 char *poolname;
4175 spa_t *spa;
4176 uint64_t state;
d164b209 4177 int error;
34dc7c2f
BB
4178
4179 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4180 return (NULL);
4181
4182 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4183 return (NULL);
4184
4185 /*
4186 * Create and initialize the spa structure.
4187 */
4188 mutex_enter(&spa_namespace_lock);
428870ff 4189 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
fb5f0bc8 4190 spa_activate(spa, FREAD);
34dc7c2f
BB
4191
4192 /*
4193 * Pass off the heavy lifting to spa_load().
4194 * Pass TRUE for mosconfig because the user-supplied config
4195 * is actually the one to trust when doing an import.
4196 */
428870ff 4197 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
34dc7c2f
BB
4198
4199 /*
4200 * If 'tryconfig' was at least parsable, return the current config.
4201 */
4202 if (spa->spa_root_vdev != NULL) {
34dc7c2f 4203 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f
BB
4204 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4205 poolname) == 0);
4206 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4207 state) == 0);
4208 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4209 spa->spa_uberblock.ub_timestamp) == 0);
9ae529ec
CS
4210 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4211 spa->spa_load_info) == 0);
ffe9d382
BB
4212 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
4213 spa->spa_errata) == 0);
34dc7c2f
BB
4214
4215 /*
4216 * If the bootfs property exists on this pool then we
4217 * copy it out so that external consumers can tell which
4218 * pools are bootable.
4219 */
d164b209 4220 if ((!error || error == EEXIST) && spa->spa_bootfs) {
79c76d5b 4221 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
34dc7c2f
BB
4222
4223 /*
4224 * We have to play games with the name since the
4225 * pool was opened as TRYIMPORT_NAME.
4226 */
b128c09f 4227 if (dsl_dsobj_to_dsname(spa_name(spa),
34dc7c2f
BB
4228 spa->spa_bootfs, tmpname) == 0) {
4229 char *cp;
d1d7e268
MK
4230 char *dsname;
4231
79c76d5b 4232 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
34dc7c2f
BB
4233
4234 cp = strchr(tmpname, '/');
4235 if (cp == NULL) {
4236 (void) strlcpy(dsname, tmpname,
4237 MAXPATHLEN);
4238 } else {
4239 (void) snprintf(dsname, MAXPATHLEN,
4240 "%s/%s", poolname, ++cp);
4241 }
4242 VERIFY(nvlist_add_string(config,
4243 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4244 kmem_free(dsname, MAXPATHLEN);
4245 }
4246 kmem_free(tmpname, MAXPATHLEN);
4247 }
4248
4249 /*
4250 * Add the list of hot spares and level 2 cache devices.
4251 */
9babb374 4252 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
4253 spa_add_spares(spa, config);
4254 spa_add_l2cache(spa, config);
9babb374 4255 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f
BB
4256 }
4257
4258 spa_unload(spa);
4259 spa_deactivate(spa);
4260 spa_remove(spa);
4261 mutex_exit(&spa_namespace_lock);
4262
4263 return (config);
4264}
4265
4266/*
4267 * Pool export/destroy
4268 *
4269 * The act of destroying or exporting a pool is very simple. We make sure there
4270 * is no more pending I/O and any references to the pool are gone. Then, we
4271 * update the pool state and sync all the labels to disk, removing the
fb5f0bc8
BB
4272 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4273 * we don't sync the labels or remove the configuration cache.
34dc7c2f
BB
4274 */
4275static int
b128c09f 4276spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
fb5f0bc8 4277 boolean_t force, boolean_t hardforce)
34dc7c2f
BB
4278{
4279 spa_t *spa;
4280
4281 if (oldconfig)
4282 *oldconfig = NULL;
4283
fb5f0bc8 4284 if (!(spa_mode_global & FWRITE))
2e528b49 4285 return (SET_ERROR(EROFS));
34dc7c2f
BB
4286
4287 mutex_enter(&spa_namespace_lock);
4288 if ((spa = spa_lookup(pool)) == NULL) {
4289 mutex_exit(&spa_namespace_lock);
2e528b49 4290 return (SET_ERROR(ENOENT));
34dc7c2f
BB
4291 }
4292
4293 /*
4294 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4295 * reacquire the namespace lock, and see if we can export.
4296 */
4297 spa_open_ref(spa, FTAG);
4298 mutex_exit(&spa_namespace_lock);
4299 spa_async_suspend(spa);
4300 mutex_enter(&spa_namespace_lock);
4301 spa_close(spa, FTAG);
4302
d14cfd83
IH
4303 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
4304 goto export_spa;
34dc7c2f 4305 /*
d14cfd83
IH
4306 * The pool will be in core if it's openable, in which case we can
4307 * modify its state. Objsets may be open only because they're dirty,
4308 * so we have to force it to sync before checking spa_refcnt.
34dc7c2f 4309 */
d14cfd83 4310 if (spa->spa_sync_on)
34dc7c2f
BB
4311 txg_wait_synced(spa->spa_dsl_pool, 0);
4312
d14cfd83
IH
4313 /*
4314 * A pool cannot be exported or destroyed if there are active
4315 * references. If we are resetting a pool, allow references by
4316 * fault injection handlers.
4317 */
4318 if (!spa_refcount_zero(spa) ||
4319 (spa->spa_inject_ref != 0 &&
4320 new_state != POOL_STATE_UNINITIALIZED)) {
4321 spa_async_resume(spa);
4322 mutex_exit(&spa_namespace_lock);
4323 return (SET_ERROR(EBUSY));
4324 }
34dc7c2f 4325
d14cfd83 4326 if (spa->spa_sync_on) {
b128c09f
BB
4327 /*
4328 * A pool cannot be exported if it has an active shared spare.
4329 * This is to prevent other pools stealing the active spare
4330 * from an exported pool. At user's own will, such pool can
4331 * be forcedly exported.
4332 */
4333 if (!force && new_state == POOL_STATE_EXPORTED &&
4334 spa_has_active_shared_spare(spa)) {
4335 spa_async_resume(spa);
4336 mutex_exit(&spa_namespace_lock);
2e528b49 4337 return (SET_ERROR(EXDEV));
b128c09f 4338 }
34dc7c2f
BB
4339
4340 /*
4341 * We want this to be reflected on every label,
4342 * so mark them all dirty. spa_unload() will do the
4343 * final sync that pushes these changes out.
4344 */
fb5f0bc8 4345 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
b128c09f 4346 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4347 spa->spa_state = new_state;
428870ff
BB
4348 spa->spa_final_txg = spa_last_synced_txg(spa) +
4349 TXG_DEFER_SIZE + 1;
34dc7c2f 4350 vdev_config_dirty(spa->spa_root_vdev);
b128c09f 4351 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4352 }
4353 }
4354
d14cfd83 4355export_spa:
26685276 4356 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_DESTROY);
34dc7c2f
BB
4357
4358 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4359 spa_unload(spa);
4360 spa_deactivate(spa);
4361 }
4362
4363 if (oldconfig && spa->spa_config)
4364 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4365
4366 if (new_state != POOL_STATE_UNINITIALIZED) {
fb5f0bc8
BB
4367 if (!hardforce)
4368 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 4369 spa_remove(spa);
34dc7c2f
BB
4370 }
4371 mutex_exit(&spa_namespace_lock);
4372
4373 return (0);
4374}
4375
4376/*
4377 * Destroy a storage pool.
4378 */
4379int
4380spa_destroy(char *pool)
4381{
fb5f0bc8
BB
4382 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4383 B_FALSE, B_FALSE));
34dc7c2f
BB
4384}
4385
4386/*
4387 * Export a storage pool.
4388 */
4389int
fb5f0bc8
BB
4390spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4391 boolean_t hardforce)
34dc7c2f 4392{
fb5f0bc8
BB
4393 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4394 force, hardforce));
34dc7c2f
BB
4395}
4396
4397/*
4398 * Similar to spa_export(), this unloads the spa_t without actually removing it
4399 * from the namespace in any way.
4400 */
4401int
4402spa_reset(char *pool)
4403{
b128c09f 4404 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
fb5f0bc8 4405 B_FALSE, B_FALSE));
34dc7c2f
BB
4406}
4407
34dc7c2f
BB
4408/*
4409 * ==========================================================================
4410 * Device manipulation
4411 * ==========================================================================
4412 */
4413
4414/*
4415 * Add a device to a storage pool.
4416 */
4417int
4418spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4419{
428870ff 4420 uint64_t txg, id;
fb5f0bc8 4421 int error;
34dc7c2f
BB
4422 vdev_t *rvd = spa->spa_root_vdev;
4423 vdev_t *vd, *tvd;
4424 nvlist_t **spares, **l2cache;
4425 uint_t nspares, nl2cache;
d6320ddb 4426 int c;
34dc7c2f 4427
572e2857
BB
4428 ASSERT(spa_writeable(spa));
4429
34dc7c2f
BB
4430 txg = spa_vdev_enter(spa);
4431
4432 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4433 VDEV_ALLOC_ADD)) != 0)
4434 return (spa_vdev_exit(spa, NULL, txg, error));
4435
b128c09f 4436 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
34dc7c2f
BB
4437
4438 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4439 &nspares) != 0)
4440 nspares = 0;
4441
4442 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4443 &nl2cache) != 0)
4444 nl2cache = 0;
4445
b128c09f 4446 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
34dc7c2f 4447 return (spa_vdev_exit(spa, vd, txg, EINVAL));
34dc7c2f 4448
b128c09f
BB
4449 if (vd->vdev_children != 0 &&
4450 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4451 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4452
4453 /*
4454 * We must validate the spares and l2cache devices after checking the
4455 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4456 */
b128c09f 4457 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
34dc7c2f 4458 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4459
4460 /*
4461 * Transfer each new top-level vdev from vd to rvd.
4462 */
d6320ddb 4463 for (c = 0; c < vd->vdev_children; c++) {
428870ff
BB
4464
4465 /*
4466 * Set the vdev id to the first hole, if one exists.
4467 */
4468 for (id = 0; id < rvd->vdev_children; id++) {
4469 if (rvd->vdev_child[id]->vdev_ishole) {
4470 vdev_free(rvd->vdev_child[id]);
4471 break;
4472 }
4473 }
34dc7c2f
BB
4474 tvd = vd->vdev_child[c];
4475 vdev_remove_child(vd, tvd);
428870ff 4476 tvd->vdev_id = id;
34dc7c2f
BB
4477 vdev_add_child(rvd, tvd);
4478 vdev_config_dirty(tvd);
4479 }
4480
4481 if (nspares != 0) {
4482 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4483 ZPOOL_CONFIG_SPARES);
4484 spa_load_spares(spa);
4485 spa->spa_spares.sav_sync = B_TRUE;
4486 }
4487
4488 if (nl2cache != 0) {
4489 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4490 ZPOOL_CONFIG_L2CACHE);
4491 spa_load_l2cache(spa);
4492 spa->spa_l2cache.sav_sync = B_TRUE;
4493 }
4494
4495 /*
4496 * We have to be careful when adding new vdevs to an existing pool.
4497 * If other threads start allocating from these vdevs before we
4498 * sync the config cache, and we lose power, then upon reboot we may
4499 * fail to open the pool because there are DVAs that the config cache
4500 * can't translate. Therefore, we first add the vdevs without
4501 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4502 * and then let spa_config_update() initialize the new metaslabs.
4503 *
4504 * spa_load() checks for added-but-not-initialized vdevs, so that
4505 * if we lose power at any point in this sequence, the remaining
4506 * steps will be completed the next time we load the pool.
4507 */
4508 (void) spa_vdev_exit(spa, vd, txg, 0);
4509
4510 mutex_enter(&spa_namespace_lock);
4511 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4512 mutex_exit(&spa_namespace_lock);
4513
4514 return (0);
4515}
4516
4517/*
4518 * Attach a device to a mirror. The arguments are the path to any device
4519 * in the mirror, and the nvroot for the new device. If the path specifies
4520 * a device that is not mirrored, we automatically insert the mirror vdev.
4521 *
4522 * If 'replacing' is specified, the new device is intended to replace the
4523 * existing device; in this case the two devices are made into their own
4524 * mirror using the 'replacing' vdev, which is functionally identical to
4525 * the mirror vdev (it actually reuses all the same ops) but has a few
4526 * extra rules: you can't attach to it after it's been created, and upon
4527 * completion of resilvering, the first disk (the one being replaced)
4528 * is automatically detached.
4529 */
4530int
4531spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4532{
428870ff 4533 uint64_t txg, dtl_max_txg;
34dc7c2f
BB
4534 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4535 vdev_ops_t *pvops;
b128c09f
BB
4536 char *oldvdpath, *newvdpath;
4537 int newvd_isspare;
4538 int error;
2e528b49 4539 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
34dc7c2f 4540
572e2857
BB
4541 ASSERT(spa_writeable(spa));
4542
34dc7c2f
BB
4543 txg = spa_vdev_enter(spa);
4544
b128c09f 4545 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4546
4547 if (oldvd == NULL)
4548 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4549
4550 if (!oldvd->vdev_ops->vdev_op_leaf)
4551 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4552
4553 pvd = oldvd->vdev_parent;
4554
4555 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
5ffb9d1d 4556 VDEV_ALLOC_ATTACH)) != 0)
34dc7c2f
BB
4557 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4558
4559 if (newrootvd->vdev_children != 1)
4560 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4561
4562 newvd = newrootvd->vdev_child[0];
4563
4564 if (!newvd->vdev_ops->vdev_op_leaf)
4565 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4566
4567 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4568 return (spa_vdev_exit(spa, newrootvd, txg, error));
4569
4570 /*
4571 * Spares can't replace logs
4572 */
b128c09f 4573 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
34dc7c2f
BB
4574 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4575
4576 if (!replacing) {
4577 /*
4578 * For attach, the only allowable parent is a mirror or the root
4579 * vdev.
4580 */
4581 if (pvd->vdev_ops != &vdev_mirror_ops &&
4582 pvd->vdev_ops != &vdev_root_ops)
4583 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4584
4585 pvops = &vdev_mirror_ops;
4586 } else {
4587 /*
4588 * Active hot spares can only be replaced by inactive hot
4589 * spares.
4590 */
4591 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857 4592 oldvd->vdev_isspare &&
34dc7c2f
BB
4593 !spa_has_spare(spa, newvd->vdev_guid))
4594 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4595
4596 /*
4597 * If the source is a hot spare, and the parent isn't already a
4598 * spare, then we want to create a new hot spare. Otherwise, we
4599 * want to create a replacing vdev. The user is not allowed to
4600 * attach to a spared vdev child unless the 'isspare' state is
4601 * the same (spare replaces spare, non-spare replaces
4602 * non-spare).
4603 */
572e2857
BB
4604 if (pvd->vdev_ops == &vdev_replacing_ops &&
4605 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
34dc7c2f 4606 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4607 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4608 newvd->vdev_isspare != oldvd->vdev_isspare) {
34dc7c2f 4609 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4610 }
4611
4612 if (newvd->vdev_isspare)
34dc7c2f
BB
4613 pvops = &vdev_spare_ops;
4614 else
4615 pvops = &vdev_replacing_ops;
4616 }
4617
4618 /*
9babb374 4619 * Make sure the new device is big enough.
34dc7c2f 4620 */
9babb374 4621 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
34dc7c2f
BB
4622 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4623
4624 /*
4625 * The new device cannot have a higher alignment requirement
4626 * than the top-level vdev.
4627 */
4628 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4629 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4630
4631 /*
4632 * If this is an in-place replacement, update oldvd's path and devid
4633 * to make it distinguishable from newvd, and unopenable from now on.
4634 */
4635 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4636 spa_strfree(oldvd->vdev_path);
4637 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
79c76d5b 4638 KM_SLEEP);
34dc7c2f
BB
4639 (void) sprintf(oldvd->vdev_path, "%s/%s",
4640 newvd->vdev_path, "old");
4641 if (oldvd->vdev_devid != NULL) {
4642 spa_strfree(oldvd->vdev_devid);
4643 oldvd->vdev_devid = NULL;
4644 }
4645 }
4646
572e2857 4647 /* mark the device being resilvered */
5d1f7fb6 4648 newvd->vdev_resilver_txg = txg;
572e2857 4649
34dc7c2f
BB
4650 /*
4651 * If the parent is not a mirror, or if we're replacing, insert the new
4652 * mirror/replacing/spare vdev above oldvd.
4653 */
4654 if (pvd->vdev_ops != pvops)
4655 pvd = vdev_add_parent(oldvd, pvops);
4656
4657 ASSERT(pvd->vdev_top->vdev_parent == rvd);
4658 ASSERT(pvd->vdev_ops == pvops);
4659 ASSERT(oldvd->vdev_parent == pvd);
4660
4661 /*
4662 * Extract the new device from its root and add it to pvd.
4663 */
4664 vdev_remove_child(newrootvd, newvd);
4665 newvd->vdev_id = pvd->vdev_children;
428870ff 4666 newvd->vdev_crtxg = oldvd->vdev_crtxg;
34dc7c2f
BB
4667 vdev_add_child(pvd, newvd);
4668
34dc7c2f
BB
4669 tvd = newvd->vdev_top;
4670 ASSERT(pvd->vdev_top == tvd);
4671 ASSERT(tvd->vdev_parent == rvd);
4672
4673 vdev_config_dirty(tvd);
4674
4675 /*
428870ff
BB
4676 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4677 * for any dmu_sync-ed blocks. It will propagate upward when
4678 * spa_vdev_exit() calls vdev_dtl_reassess().
34dc7c2f 4679 */
428870ff 4680 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
34dc7c2f 4681
428870ff
BB
4682 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4683 dtl_max_txg - TXG_INITIAL);
34dc7c2f 4684
9babb374 4685 if (newvd->vdev_isspare) {
34dc7c2f 4686 spa_spare_activate(newvd);
26685276 4687 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_DEVICE_SPARE);
9babb374
BB
4688 }
4689
b128c09f
BB
4690 oldvdpath = spa_strdup(oldvd->vdev_path);
4691 newvdpath = spa_strdup(newvd->vdev_path);
4692 newvd_isspare = newvd->vdev_isspare;
34dc7c2f
BB
4693
4694 /*
4695 * Mark newvd's DTL dirty in this txg.
4696 */
4697 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4698
428870ff 4699 /*
93cf2076
GW
4700 * Schedule the resilver to restart in the future. We do this to
4701 * ensure that dmu_sync-ed blocks have been stitched into the
4702 * respective datasets.
428870ff
BB
4703 */
4704 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4705
4706 /*
4707 * Commit the config
4708 */
4709 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
34dc7c2f 4710
6f1ffb06 4711 spa_history_log_internal(spa, "vdev attach", NULL,
428870ff 4712 "%s vdev=%s %s vdev=%s",
45d1cae3
BB
4713 replacing && newvd_isspare ? "spare in" :
4714 replacing ? "replace" : "attach", newvdpath,
4715 replacing ? "for" : "to", oldvdpath);
b128c09f
BB
4716
4717 spa_strfree(oldvdpath);
4718 spa_strfree(newvdpath);
4719
572e2857 4720 if (spa->spa_bootfs)
26685276 4721 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_BOOTFS_VDEV_ATTACH);
572e2857 4722
34dc7c2f
BB
4723 return (0);
4724}
4725
4726/*
4727 * Detach a device from a mirror or replacing vdev.
d3cc8b15 4728 *
34dc7c2f
BB
4729 * If 'replace_done' is specified, only detach if the parent
4730 * is a replacing vdev.
4731 */
4732int
fb5f0bc8 4733spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
34dc7c2f
BB
4734{
4735 uint64_t txg;
fb5f0bc8 4736 int error;
34dc7c2f
BB
4737 vdev_t *vd, *pvd, *cvd, *tvd;
4738 boolean_t unspare = B_FALSE;
d4ed6673 4739 uint64_t unspare_guid = 0;
428870ff 4740 char *vdpath;
d6320ddb 4741 int c, t;
2e528b49 4742 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
572e2857
BB
4743 ASSERT(spa_writeable(spa));
4744
34dc7c2f
BB
4745 txg = spa_vdev_enter(spa);
4746
b128c09f 4747 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4748
4749 if (vd == NULL)
4750 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4751
4752 if (!vd->vdev_ops->vdev_op_leaf)
4753 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4754
4755 pvd = vd->vdev_parent;
4756
fb5f0bc8
BB
4757 /*
4758 * If the parent/child relationship is not as expected, don't do it.
4759 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4760 * vdev that's replacing B with C. The user's intent in replacing
4761 * is to go from M(A,B) to M(A,C). If the user decides to cancel
4762 * the replace by detaching C, the expected behavior is to end up
4763 * M(A,B). But suppose that right after deciding to detach C,
4764 * the replacement of B completes. We would have M(A,C), and then
4765 * ask to detach C, which would leave us with just A -- not what
4766 * the user wanted. To prevent this, we make sure that the
4767 * parent/child relationship hasn't changed -- in this example,
4768 * that C's parent is still the replacing vdev R.
4769 */
4770 if (pvd->vdev_guid != pguid && pguid != 0)
4771 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4772
34dc7c2f 4773 /*
572e2857 4774 * Only 'replacing' or 'spare' vdevs can be replaced.
34dc7c2f 4775 */
572e2857
BB
4776 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4777 pvd->vdev_ops != &vdev_spare_ops)
4778 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
34dc7c2f
BB
4779
4780 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4781 spa_version(spa) >= SPA_VERSION_SPARES);
4782
4783 /*
4784 * Only mirror, replacing, and spare vdevs support detach.
4785 */
4786 if (pvd->vdev_ops != &vdev_replacing_ops &&
4787 pvd->vdev_ops != &vdev_mirror_ops &&
4788 pvd->vdev_ops != &vdev_spare_ops)
4789 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4790
4791 /*
fb5f0bc8
BB
4792 * If this device has the only valid copy of some data,
4793 * we cannot safely detach it.
34dc7c2f 4794 */
fb5f0bc8 4795 if (vdev_dtl_required(vd))
34dc7c2f
BB
4796 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4797
fb5f0bc8 4798 ASSERT(pvd->vdev_children >= 2);
34dc7c2f 4799
b128c09f
BB
4800 /*
4801 * If we are detaching the second disk from a replacing vdev, then
4802 * check to see if we changed the original vdev's path to have "/old"
4803 * at the end in spa_vdev_attach(). If so, undo that change now.
4804 */
572e2857
BB
4805 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4806 vd->vdev_path != NULL) {
4807 size_t len = strlen(vd->vdev_path);
4808
d6320ddb 4809 for (c = 0; c < pvd->vdev_children; c++) {
572e2857
BB
4810 cvd = pvd->vdev_child[c];
4811
4812 if (cvd == vd || cvd->vdev_path == NULL)
4813 continue;
4814
4815 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4816 strcmp(cvd->vdev_path + len, "/old") == 0) {
4817 spa_strfree(cvd->vdev_path);
4818 cvd->vdev_path = spa_strdup(vd->vdev_path);
4819 break;
4820 }
b128c09f
BB
4821 }
4822 }
4823
34dc7c2f
BB
4824 /*
4825 * If we are detaching the original disk from a spare, then it implies
4826 * that the spare should become a real disk, and be removed from the
4827 * active spare list for the pool.
4828 */
4829 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857
BB
4830 vd->vdev_id == 0 &&
4831 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
34dc7c2f
BB
4832 unspare = B_TRUE;
4833
4834 /*
4835 * Erase the disk labels so the disk can be used for other things.
4836 * This must be done after all other error cases are handled,
4837 * but before we disembowel vd (so we can still do I/O to it).
4838 * But if we can't do it, don't treat the error as fatal --
4839 * it may be that the unwritability of the disk is the reason
4840 * it's being detached!
4841 */
4842 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4843
4844 /*
4845 * Remove vd from its parent and compact the parent's children.
4846 */
4847 vdev_remove_child(pvd, vd);
4848 vdev_compact_children(pvd);
4849
4850 /*
4851 * Remember one of the remaining children so we can get tvd below.
4852 */
572e2857 4853 cvd = pvd->vdev_child[pvd->vdev_children - 1];
34dc7c2f
BB
4854
4855 /*
4856 * If we need to remove the remaining child from the list of hot spares,
fb5f0bc8
BB
4857 * do it now, marking the vdev as no longer a spare in the process.
4858 * We must do this before vdev_remove_parent(), because that can
4859 * change the GUID if it creates a new toplevel GUID. For a similar
4860 * reason, we must remove the spare now, in the same txg as the detach;
4861 * otherwise someone could attach a new sibling, change the GUID, and
4862 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
34dc7c2f
BB
4863 */
4864 if (unspare) {
4865 ASSERT(cvd->vdev_isspare);
4866 spa_spare_remove(cvd);
4867 unspare_guid = cvd->vdev_guid;
fb5f0bc8 4868 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
572e2857 4869 cvd->vdev_unspare = B_TRUE;
34dc7c2f
BB
4870 }
4871
428870ff
BB
4872 /*
4873 * If the parent mirror/replacing vdev only has one child,
4874 * the parent is no longer needed. Remove it from the tree.
4875 */
572e2857
BB
4876 if (pvd->vdev_children == 1) {
4877 if (pvd->vdev_ops == &vdev_spare_ops)
4878 cvd->vdev_unspare = B_FALSE;
428870ff 4879 vdev_remove_parent(cvd);
572e2857
BB
4880 }
4881
428870ff
BB
4882
4883 /*
4884 * We don't set tvd until now because the parent we just removed
4885 * may have been the previous top-level vdev.
4886 */
4887 tvd = cvd->vdev_top;
4888 ASSERT(tvd->vdev_parent == rvd);
4889
4890 /*
4891 * Reevaluate the parent vdev state.
4892 */
4893 vdev_propagate_state(cvd);
4894
4895 /*
4896 * If the 'autoexpand' property is set on the pool then automatically
4897 * try to expand the size of the pool. For example if the device we
4898 * just detached was smaller than the others, it may be possible to
4899 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4900 * first so that we can obtain the updated sizes of the leaf vdevs.
4901 */
4902 if (spa->spa_autoexpand) {
4903 vdev_reopen(tvd);
4904 vdev_expand(tvd, txg);
4905 }
4906
4907 vdev_config_dirty(tvd);
4908
4909 /*
4910 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
4911 * vd->vdev_detached is set and free vd's DTL object in syncing context.
4912 * But first make sure we're not on any *other* txg's DTL list, to
4913 * prevent vd from being accessed after it's freed.
4914 */
4915 vdpath = spa_strdup(vd->vdev_path);
d6320ddb 4916 for (t = 0; t < TXG_SIZE; t++)
428870ff
BB
4917 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4918 vd->vdev_detached = B_TRUE;
4919 vdev_dirty(tvd, VDD_DTL, vd, txg);
4920
26685276 4921 spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_REMOVE);
428870ff 4922
572e2857
BB
4923 /* hang on to the spa before we release the lock */
4924 spa_open_ref(spa, FTAG);
4925
428870ff
BB
4926 error = spa_vdev_exit(spa, vd, txg, 0);
4927
6f1ffb06 4928 spa_history_log_internal(spa, "detach", NULL,
428870ff
BB
4929 "vdev=%s", vdpath);
4930 spa_strfree(vdpath);
4931
4932 /*
4933 * If this was the removal of the original device in a hot spare vdev,
4934 * then we want to go through and remove the device from the hot spare
4935 * list of every other pool.
4936 */
4937 if (unspare) {
572e2857
BB
4938 spa_t *altspa = NULL;
4939
428870ff 4940 mutex_enter(&spa_namespace_lock);
572e2857
BB
4941 while ((altspa = spa_next(altspa)) != NULL) {
4942 if (altspa->spa_state != POOL_STATE_ACTIVE ||
4943 altspa == spa)
428870ff 4944 continue;
572e2857
BB
4945
4946 spa_open_ref(altspa, FTAG);
428870ff 4947 mutex_exit(&spa_namespace_lock);
572e2857 4948 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
428870ff 4949 mutex_enter(&spa_namespace_lock);
572e2857 4950 spa_close(altspa, FTAG);
428870ff
BB
4951 }
4952 mutex_exit(&spa_namespace_lock);
572e2857
BB
4953
4954 /* search the rest of the vdevs for spares to remove */
4955 spa_vdev_resilver_done(spa);
428870ff
BB
4956 }
4957
572e2857
BB
4958 /* all done with the spa; OK to release */
4959 mutex_enter(&spa_namespace_lock);
4960 spa_close(spa, FTAG);
4961 mutex_exit(&spa_namespace_lock);
4962
428870ff
BB
4963 return (error);
4964}
4965
4966/*
4967 * Split a set of devices from their mirrors, and create a new pool from them.
4968 */
4969int
4970spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4971 nvlist_t *props, boolean_t exp)
4972{
4973 int error = 0;
4974 uint64_t txg, *glist;
4975 spa_t *newspa;
4976 uint_t c, children, lastlog;
4977 nvlist_t **child, *nvl, *tmp;
4978 dmu_tx_t *tx;
4979 char *altroot = NULL;
4980 vdev_t *rvd, **vml = NULL; /* vdev modify list */
4981 boolean_t activate_slog;
4982
572e2857 4983 ASSERT(spa_writeable(spa));
428870ff
BB
4984
4985 txg = spa_vdev_enter(spa);
4986
4987 /* clear the log and flush everything up to now */
4988 activate_slog = spa_passivate_log(spa);
4989 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4990 error = spa_offline_log(spa);
4991 txg = spa_vdev_config_enter(spa);
4992
4993 if (activate_slog)
4994 spa_activate_log(spa);
4995
4996 if (error != 0)
4997 return (spa_vdev_exit(spa, NULL, txg, error));
4998
4999 /* check new spa name before going any further */
5000 if (spa_lookup(newname) != NULL)
5001 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
5002
5003 /*
5004 * scan through all the children to ensure they're all mirrors
5005 */
5006 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
5007 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
5008 &children) != 0)
5009 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5010
5011 /* first, check to ensure we've got the right child count */
5012 rvd = spa->spa_root_vdev;
5013 lastlog = 0;
5014 for (c = 0; c < rvd->vdev_children; c++) {
5015 vdev_t *vd = rvd->vdev_child[c];
5016
5017 /* don't count the holes & logs as children */
5018 if (vd->vdev_islog || vd->vdev_ishole) {
5019 if (lastlog == 0)
5020 lastlog = c;
5021 continue;
5022 }
5023
5024 lastlog = 0;
5025 }
5026 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
5027 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5028
5029 /* next, ensure no spare or cache devices are part of the split */
5030 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
5031 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
5032 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5033
79c76d5b
BB
5034 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
5035 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
428870ff
BB
5036
5037 /* then, loop over each vdev and validate it */
5038 for (c = 0; c < children; c++) {
5039 uint64_t is_hole = 0;
5040
5041 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
5042 &is_hole);
5043
5044 if (is_hole != 0) {
5045 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
5046 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
5047 continue;
5048 } else {
2e528b49 5049 error = SET_ERROR(EINVAL);
428870ff
BB
5050 break;
5051 }
5052 }
5053
5054 /* which disk is going to be split? */
5055 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
5056 &glist[c]) != 0) {
2e528b49 5057 error = SET_ERROR(EINVAL);
428870ff
BB
5058 break;
5059 }
5060
5061 /* look it up in the spa */
5062 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
5063 if (vml[c] == NULL) {
2e528b49 5064 error = SET_ERROR(ENODEV);
428870ff
BB
5065 break;
5066 }
5067
5068 /* make sure there's nothing stopping the split */
5069 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
5070 vml[c]->vdev_islog ||
5071 vml[c]->vdev_ishole ||
5072 vml[c]->vdev_isspare ||
5073 vml[c]->vdev_isl2cache ||
5074 !vdev_writeable(vml[c]) ||
5075 vml[c]->vdev_children != 0 ||
5076 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
5077 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
2e528b49 5078 error = SET_ERROR(EINVAL);
428870ff
BB
5079 break;
5080 }
5081
5082 if (vdev_dtl_required(vml[c])) {
2e528b49 5083 error = SET_ERROR(EBUSY);
428870ff
BB
5084 break;
5085 }
5086
5087 /* we need certain info from the top level */
5088 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
5089 vml[c]->vdev_top->vdev_ms_array) == 0);
5090 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
5091 vml[c]->vdev_top->vdev_ms_shift) == 0);
5092 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
5093 vml[c]->vdev_top->vdev_asize) == 0);
5094 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
5095 vml[c]->vdev_top->vdev_ashift) == 0);
5096 }
5097
5098 if (error != 0) {
5099 kmem_free(vml, children * sizeof (vdev_t *));
5100 kmem_free(glist, children * sizeof (uint64_t));
5101 return (spa_vdev_exit(spa, NULL, txg, error));
5102 }
5103
5104 /* stop writers from using the disks */
5105 for (c = 0; c < children; c++) {
5106 if (vml[c] != NULL)
5107 vml[c]->vdev_offline = B_TRUE;
5108 }
5109 vdev_reopen(spa->spa_root_vdev);
34dc7c2f
BB
5110
5111 /*
428870ff
BB
5112 * Temporarily record the splitting vdevs in the spa config. This
5113 * will disappear once the config is regenerated.
34dc7c2f 5114 */
79c76d5b 5115 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
428870ff
BB
5116 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5117 glist, children) == 0);
5118 kmem_free(glist, children * sizeof (uint64_t));
34dc7c2f 5119
428870ff
BB
5120 mutex_enter(&spa->spa_props_lock);
5121 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5122 nvl) == 0);
5123 mutex_exit(&spa->spa_props_lock);
5124 spa->spa_config_splitting = nvl;
5125 vdev_config_dirty(spa->spa_root_vdev);
5126
5127 /* configure and create the new pool */
5128 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5129 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5130 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5131 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5132 spa_version(spa)) == 0);
5133 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5134 spa->spa_config_txg) == 0);
5135 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5136 spa_generate_guid(NULL)) == 0);
5137 (void) nvlist_lookup_string(props,
5138 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
34dc7c2f 5139
428870ff
BB
5140 /* add the new pool to the namespace */
5141 newspa = spa_add(newname, config, altroot);
5142 newspa->spa_config_txg = spa->spa_config_txg;
5143 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5144
5145 /* release the spa config lock, retaining the namespace lock */
5146 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5147
5148 if (zio_injection_enabled)
5149 zio_handle_panic_injection(spa, FTAG, 1);
5150
5151 spa_activate(newspa, spa_mode_global);
5152 spa_async_suspend(newspa);
5153
5154 /* create the new pool from the disks of the original pool */
5155 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5156 if (error)
5157 goto out;
5158
5159 /* if that worked, generate a real config for the new pool */
5160 if (newspa->spa_root_vdev != NULL) {
5161 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
79c76d5b 5162 NV_UNIQUE_NAME, KM_SLEEP) == 0);
428870ff
BB
5163 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5164 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5165 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5166 B_TRUE));
9babb374 5167 }
34dc7c2f 5168
428870ff
BB
5169 /* set the props */
5170 if (props != NULL) {
5171 spa_configfile_set(newspa, props, B_FALSE);
5172 error = spa_prop_set(newspa, props);
5173 if (error)
5174 goto out;
5175 }
34dc7c2f 5176
428870ff
BB
5177 /* flush everything */
5178 txg = spa_vdev_config_enter(newspa);
5179 vdev_config_dirty(newspa->spa_root_vdev);
5180 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
34dc7c2f 5181
428870ff
BB
5182 if (zio_injection_enabled)
5183 zio_handle_panic_injection(spa, FTAG, 2);
34dc7c2f 5184
428870ff 5185 spa_async_resume(newspa);
34dc7c2f 5186
428870ff
BB
5187 /* finally, update the original pool's config */
5188 txg = spa_vdev_config_enter(spa);
5189 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5190 error = dmu_tx_assign(tx, TXG_WAIT);
5191 if (error != 0)
5192 dmu_tx_abort(tx);
5193 for (c = 0; c < children; c++) {
5194 if (vml[c] != NULL) {
5195 vdev_split(vml[c]);
5196 if (error == 0)
6f1ffb06
MA
5197 spa_history_log_internal(spa, "detach", tx,
5198 "vdev=%s", vml[c]->vdev_path);
428870ff 5199 vdev_free(vml[c]);
34dc7c2f 5200 }
34dc7c2f 5201 }
428870ff
BB
5202 vdev_config_dirty(spa->spa_root_vdev);
5203 spa->spa_config_splitting = NULL;
5204 nvlist_free(nvl);
5205 if (error == 0)
5206 dmu_tx_commit(tx);
5207 (void) spa_vdev_exit(spa, NULL, txg, 0);
5208
5209 if (zio_injection_enabled)
5210 zio_handle_panic_injection(spa, FTAG, 3);
5211
5212 /* split is complete; log a history record */
6f1ffb06
MA
5213 spa_history_log_internal(newspa, "split", NULL,
5214 "from pool %s", spa_name(spa));
428870ff
BB
5215
5216 kmem_free(vml, children * sizeof (vdev_t *));
5217
5218 /* if we're not going to mount the filesystems in userland, export */
5219 if (exp)
5220 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5221 B_FALSE, B_FALSE);
5222
5223 return (error);
5224
5225out:
5226 spa_unload(newspa);
5227 spa_deactivate(newspa);
5228 spa_remove(newspa);
5229
5230 txg = spa_vdev_config_enter(spa);
5231
5232 /* re-online all offlined disks */
5233 for (c = 0; c < children; c++) {
5234 if (vml[c] != NULL)
5235 vml[c]->vdev_offline = B_FALSE;
5236 }
5237 vdev_reopen(spa->spa_root_vdev);
5238
5239 nvlist_free(spa->spa_config_splitting);
5240 spa->spa_config_splitting = NULL;
5241 (void) spa_vdev_exit(spa, NULL, txg, error);
34dc7c2f 5242
428870ff 5243 kmem_free(vml, children * sizeof (vdev_t *));
34dc7c2f
BB
5244 return (error);
5245}
5246
b128c09f
BB
5247static nvlist_t *
5248spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
34dc7c2f 5249{
d6320ddb
BB
5250 int i;
5251
5252 for (i = 0; i < count; i++) {
b128c09f 5253 uint64_t guid;
34dc7c2f 5254
b128c09f
BB
5255 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5256 &guid) == 0);
34dc7c2f 5257
b128c09f
BB
5258 if (guid == target_guid)
5259 return (nvpp[i]);
34dc7c2f
BB
5260 }
5261
b128c09f 5262 return (NULL);
34dc7c2f
BB
5263}
5264
b128c09f
BB
5265static void
5266spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5267 nvlist_t *dev_to_remove)
34dc7c2f 5268{
b128c09f 5269 nvlist_t **newdev = NULL;
d6320ddb 5270 int i, j;
34dc7c2f 5271
b128c09f 5272 if (count > 1)
79c76d5b 5273 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
34dc7c2f 5274
d6320ddb 5275 for (i = 0, j = 0; i < count; i++) {
b128c09f
BB
5276 if (dev[i] == dev_to_remove)
5277 continue;
79c76d5b 5278 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
34dc7c2f
BB
5279 }
5280
b128c09f
BB
5281 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5282 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
34dc7c2f 5283
d6320ddb 5284 for (i = 0; i < count - 1; i++)
b128c09f 5285 nvlist_free(newdev[i]);
34dc7c2f 5286
b128c09f
BB
5287 if (count > 1)
5288 kmem_free(newdev, (count - 1) * sizeof (void *));
34dc7c2f
BB
5289}
5290
428870ff
BB
5291/*
5292 * Evacuate the device.
5293 */
5294static int
5295spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5296{
5297 uint64_t txg;
5298 int error = 0;
5299
5300 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5301 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5302 ASSERT(vd == vd->vdev_top);
5303
5304 /*
5305 * Evacuate the device. We don't hold the config lock as writer
5306 * since we need to do I/O but we do keep the
5307 * spa_namespace_lock held. Once this completes the device
5308 * should no longer have any blocks allocated on it.
5309 */
5310 if (vd->vdev_islog) {
5311 if (vd->vdev_stat.vs_alloc != 0)
5312 error = spa_offline_log(spa);
5313 } else {
2e528b49 5314 error = SET_ERROR(ENOTSUP);
428870ff
BB
5315 }
5316
5317 if (error)
5318 return (error);
5319
5320 /*
5321 * The evacuation succeeded. Remove any remaining MOS metadata
5322 * associated with this vdev, and wait for these changes to sync.
5323 */
c99c9001 5324 ASSERT0(vd->vdev_stat.vs_alloc);
428870ff
BB
5325 txg = spa_vdev_config_enter(spa);
5326 vd->vdev_removing = B_TRUE;
93cf2076 5327 vdev_dirty_leaves(vd, VDD_DTL, txg);
428870ff
BB
5328 vdev_config_dirty(vd);
5329 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5330
5331 return (0);
5332}
5333
5334/*
5335 * Complete the removal by cleaning up the namespace.
5336 */
5337static void
5338spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5339{
5340 vdev_t *rvd = spa->spa_root_vdev;
5341 uint64_t id = vd->vdev_id;
5342 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5343
5344 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5345 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5346 ASSERT(vd == vd->vdev_top);
5347
5348 /*
5349 * Only remove any devices which are empty.
5350 */
5351 if (vd->vdev_stat.vs_alloc != 0)
5352 return;
5353
5354 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5355
5356 if (list_link_active(&vd->vdev_state_dirty_node))
5357 vdev_state_clean(vd);
5358 if (list_link_active(&vd->vdev_config_dirty_node))
5359 vdev_config_clean(vd);
5360
5361 vdev_free(vd);
5362
5363 if (last_vdev) {
5364 vdev_compact_children(rvd);
5365 } else {
5366 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5367 vdev_add_child(rvd, vd);
5368 }
5369 vdev_config_dirty(rvd);
5370
5371 /*
5372 * Reassess the health of our root vdev.
5373 */
5374 vdev_reopen(rvd);
5375}
5376
5377/*
5378 * Remove a device from the pool -
5379 *
5380 * Removing a device from the vdev namespace requires several steps
5381 * and can take a significant amount of time. As a result we use
5382 * the spa_vdev_config_[enter/exit] functions which allow us to
5383 * grab and release the spa_config_lock while still holding the namespace
5384 * lock. During each step the configuration is synced out.
d3cc8b15
WA
5385 *
5386 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5387 * devices.
34dc7c2f
BB
5388 */
5389int
5390spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5391{
5392 vdev_t *vd;
428870ff 5393 metaslab_group_t *mg;
b128c09f 5394 nvlist_t **spares, **l2cache, *nv;
fb5f0bc8 5395 uint64_t txg = 0;
428870ff 5396 uint_t nspares, nl2cache;
34dc7c2f 5397 int error = 0;
fb5f0bc8 5398 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
34dc7c2f 5399
572e2857
BB
5400 ASSERT(spa_writeable(spa));
5401
fb5f0bc8
BB
5402 if (!locked)
5403 txg = spa_vdev_enter(spa);
34dc7c2f 5404
b128c09f 5405 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
5406
5407 if (spa->spa_spares.sav_vdevs != NULL &&
34dc7c2f 5408 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
b128c09f
BB
5409 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5410 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5411 /*
5412 * Only remove the hot spare if it's not currently in use
5413 * in this pool.
5414 */
5415 if (vd == NULL || unspare) {
5416 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5417 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5418 spa_load_spares(spa);
5419 spa->spa_spares.sav_sync = B_TRUE;
5420 } else {
2e528b49 5421 error = SET_ERROR(EBUSY);
b128c09f
BB
5422 }
5423 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
34dc7c2f 5424 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
b128c09f
BB
5425 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5426 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5427 /*
5428 * Cache devices can always be removed.
5429 */
5430 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5431 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
34dc7c2f
BB
5432 spa_load_l2cache(spa);
5433 spa->spa_l2cache.sav_sync = B_TRUE;
428870ff
BB
5434 } else if (vd != NULL && vd->vdev_islog) {
5435 ASSERT(!locked);
5436 ASSERT(vd == vd->vdev_top);
5437
428870ff
BB
5438 mg = vd->vdev_mg;
5439
5440 /*
5441 * Stop allocating from this vdev.
5442 */
5443 metaslab_group_passivate(mg);
5444
5445 /*
5446 * Wait for the youngest allocations and frees to sync,
5447 * and then wait for the deferral of those frees to finish.
5448 */
5449 spa_vdev_config_exit(spa, NULL,
5450 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5451
5452 /*
5453 * Attempt to evacuate the vdev.
5454 */
5455 error = spa_vdev_remove_evacuate(spa, vd);
5456
5457 txg = spa_vdev_config_enter(spa);
5458
5459 /*
5460 * If we couldn't evacuate the vdev, unwind.
5461 */
5462 if (error) {
5463 metaslab_group_activate(mg);
5464 return (spa_vdev_exit(spa, NULL, txg, error));
5465 }
5466
5467 /*
5468 * Clean up the vdev namespace.
5469 */
5470 spa_vdev_remove_from_namespace(spa, vd);
5471
b128c09f
BB
5472 } else if (vd != NULL) {
5473 /*
5474 * Normal vdevs cannot be removed (yet).
5475 */
2e528b49 5476 error = SET_ERROR(ENOTSUP);
b128c09f
BB
5477 } else {
5478 /*
5479 * There is no vdev of any kind with the specified guid.
5480 */
2e528b49 5481 error = SET_ERROR(ENOENT);
34dc7c2f
BB
5482 }
5483
fb5f0bc8
BB
5484 if (!locked)
5485 return (spa_vdev_exit(spa, NULL, txg, error));
5486
5487 return (error);
34dc7c2f
BB
5488}
5489
5490/*
5491 * Find any device that's done replacing, or a vdev marked 'unspare' that's
d3cc8b15 5492 * currently spared, so we can detach it.
34dc7c2f
BB
5493 */
5494static vdev_t *
5495spa_vdev_resilver_done_hunt(vdev_t *vd)
5496{
5497 vdev_t *newvd, *oldvd;
d6320ddb 5498 int c;
34dc7c2f 5499
d6320ddb 5500 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f
BB
5501 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5502 if (oldvd != NULL)
5503 return (oldvd);
5504 }
5505
5506 /*
572e2857
BB
5507 * Check for a completed replacement. We always consider the first
5508 * vdev in the list to be the oldest vdev, and the last one to be
5509 * the newest (see spa_vdev_attach() for how that works). In
5510 * the case where the newest vdev is faulted, we will not automatically
5511 * remove it after a resilver completes. This is OK as it will require
5512 * user intervention to determine which disk the admin wishes to keep.
34dc7c2f 5513 */
572e2857
BB
5514 if (vd->vdev_ops == &vdev_replacing_ops) {
5515 ASSERT(vd->vdev_children > 1);
5516
5517 newvd = vd->vdev_child[vd->vdev_children - 1];
34dc7c2f 5518 oldvd = vd->vdev_child[0];
34dc7c2f 5519
fb5f0bc8 5520 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5521 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
fb5f0bc8 5522 !vdev_dtl_required(oldvd))
34dc7c2f 5523 return (oldvd);
34dc7c2f
BB
5524 }
5525
5526 /*
5527 * Check for a completed resilver with the 'unspare' flag set.
5528 */
572e2857
BB
5529 if (vd->vdev_ops == &vdev_spare_ops) {
5530 vdev_t *first = vd->vdev_child[0];
5531 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5532
5533 if (last->vdev_unspare) {
5534 oldvd = first;
5535 newvd = last;
5536 } else if (first->vdev_unspare) {
5537 oldvd = last;
5538 newvd = first;
5539 } else {
5540 oldvd = NULL;
5541 }
34dc7c2f 5542
572e2857 5543 if (oldvd != NULL &&
fb5f0bc8 5544 vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5545 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
572e2857 5546 !vdev_dtl_required(oldvd))
34dc7c2f 5547 return (oldvd);
572e2857
BB
5548
5549 /*
5550 * If there are more than two spares attached to a disk,
5551 * and those spares are not required, then we want to
5552 * attempt to free them up now so that they can be used
5553 * by other pools. Once we're back down to a single
5554 * disk+spare, we stop removing them.
5555 */
5556 if (vd->vdev_children > 2) {
5557 newvd = vd->vdev_child[1];
5558
5559 if (newvd->vdev_isspare && last->vdev_isspare &&
5560 vdev_dtl_empty(last, DTL_MISSING) &&
5561 vdev_dtl_empty(last, DTL_OUTAGE) &&
5562 !vdev_dtl_required(newvd))
5563 return (newvd);
34dc7c2f 5564 }
34dc7c2f
BB
5565 }
5566
5567 return (NULL);
5568}
5569
5570static void
5571spa_vdev_resilver_done(spa_t *spa)
5572{
fb5f0bc8
BB
5573 vdev_t *vd, *pvd, *ppvd;
5574 uint64_t guid, sguid, pguid, ppguid;
34dc7c2f 5575
fb5f0bc8 5576 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5577
5578 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
fb5f0bc8
BB
5579 pvd = vd->vdev_parent;
5580 ppvd = pvd->vdev_parent;
34dc7c2f 5581 guid = vd->vdev_guid;
fb5f0bc8
BB
5582 pguid = pvd->vdev_guid;
5583 ppguid = ppvd->vdev_guid;
5584 sguid = 0;
34dc7c2f
BB
5585 /*
5586 * If we have just finished replacing a hot spared device, then
5587 * we need to detach the parent's first child (the original hot
5588 * spare) as well.
5589 */
572e2857
BB
5590 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5591 ppvd->vdev_children == 2) {
34dc7c2f 5592 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
fb5f0bc8 5593 sguid = ppvd->vdev_child[1]->vdev_guid;
34dc7c2f 5594 }
5d1f7fb6
GW
5595 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5596
fb5f0bc8
BB
5597 spa_config_exit(spa, SCL_ALL, FTAG);
5598 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
34dc7c2f 5599 return;
fb5f0bc8 5600 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
34dc7c2f 5601 return;
fb5f0bc8 5602 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5603 }
5604
fb5f0bc8 5605 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
5606}
5607
5608/*
428870ff 5609 * Update the stored path or FRU for this vdev.
34dc7c2f
BB
5610 */
5611int
9babb374
BB
5612spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5613 boolean_t ispath)
34dc7c2f 5614{
b128c09f 5615 vdev_t *vd;
428870ff 5616 boolean_t sync = B_FALSE;
34dc7c2f 5617
572e2857
BB
5618 ASSERT(spa_writeable(spa));
5619
428870ff 5620 spa_vdev_state_enter(spa, SCL_ALL);
34dc7c2f 5621
9babb374 5622 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
428870ff 5623 return (spa_vdev_state_exit(spa, NULL, ENOENT));
34dc7c2f
BB
5624
5625 if (!vd->vdev_ops->vdev_op_leaf)
428870ff 5626 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 5627
9babb374 5628 if (ispath) {
428870ff
BB
5629 if (strcmp(value, vd->vdev_path) != 0) {
5630 spa_strfree(vd->vdev_path);
5631 vd->vdev_path = spa_strdup(value);
5632 sync = B_TRUE;
5633 }
9babb374 5634 } else {
428870ff
BB
5635 if (vd->vdev_fru == NULL) {
5636 vd->vdev_fru = spa_strdup(value);
5637 sync = B_TRUE;
5638 } else if (strcmp(value, vd->vdev_fru) != 0) {
9babb374 5639 spa_strfree(vd->vdev_fru);
428870ff
BB
5640 vd->vdev_fru = spa_strdup(value);
5641 sync = B_TRUE;
5642 }
9babb374 5643 }
34dc7c2f 5644
428870ff 5645 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
34dc7c2f
BB
5646}
5647
9babb374
BB
5648int
5649spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5650{
5651 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5652}
5653
5654int
5655spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5656{
5657 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5658}
5659
34dc7c2f
BB
5660/*
5661 * ==========================================================================
428870ff 5662 * SPA Scanning
34dc7c2f
BB
5663 * ==========================================================================
5664 */
5665
34dc7c2f 5666int
428870ff
BB
5667spa_scan_stop(spa_t *spa)
5668{
5669 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5670 if (dsl_scan_resilvering(spa->spa_dsl_pool))
2e528b49 5671 return (SET_ERROR(EBUSY));
428870ff
BB
5672 return (dsl_scan_cancel(spa->spa_dsl_pool));
5673}
5674
5675int
5676spa_scan(spa_t *spa, pool_scan_func_t func)
34dc7c2f 5677{
b128c09f 5678 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
34dc7c2f 5679
428870ff 5680 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
2e528b49 5681 return (SET_ERROR(ENOTSUP));
34dc7c2f 5682
34dc7c2f 5683 /*
b128c09f
BB
5684 * If a resilver was requested, but there is no DTL on a
5685 * writeable leaf device, we have nothing to do.
34dc7c2f 5686 */
428870ff 5687 if (func == POOL_SCAN_RESILVER &&
b128c09f
BB
5688 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5689 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
34dc7c2f
BB
5690 return (0);
5691 }
5692
428870ff 5693 return (dsl_scan(spa->spa_dsl_pool, func));
34dc7c2f
BB
5694}
5695
5696/*
5697 * ==========================================================================
5698 * SPA async task processing
5699 * ==========================================================================
5700 */
5701
5702static void
5703spa_async_remove(spa_t *spa, vdev_t *vd)
5704{
d6320ddb
BB
5705 int c;
5706
b128c09f 5707 if (vd->vdev_remove_wanted) {
428870ff
BB
5708 vd->vdev_remove_wanted = B_FALSE;
5709 vd->vdev_delayed_close = B_FALSE;
b128c09f 5710 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
428870ff
BB
5711
5712 /*
5713 * We want to clear the stats, but we don't want to do a full
5714 * vdev_clear() as that will cause us to throw away
5715 * degraded/faulted state as well as attempt to reopen the
5716 * device, all of which is a waste.
5717 */
5718 vd->vdev_stat.vs_read_errors = 0;
5719 vd->vdev_stat.vs_write_errors = 0;
5720 vd->vdev_stat.vs_checksum_errors = 0;
5721
b128c09f
BB
5722 vdev_state_dirty(vd->vdev_top);
5723 }
34dc7c2f 5724
d6320ddb 5725 for (c = 0; c < vd->vdev_children; c++)
b128c09f
BB
5726 spa_async_remove(spa, vd->vdev_child[c]);
5727}
5728
5729static void
5730spa_async_probe(spa_t *spa, vdev_t *vd)
5731{
d6320ddb
BB
5732 int c;
5733
b128c09f 5734 if (vd->vdev_probe_wanted) {
428870ff 5735 vd->vdev_probe_wanted = B_FALSE;
b128c09f 5736 vdev_reopen(vd); /* vdev_open() does the actual probe */
34dc7c2f 5737 }
b128c09f 5738
d6320ddb 5739 for (c = 0; c < vd->vdev_children; c++)
b128c09f 5740 spa_async_probe(spa, vd->vdev_child[c]);
34dc7c2f
BB
5741}
5742
9babb374
BB
5743static void
5744spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5745{
d6320ddb 5746 int c;
9babb374
BB
5747
5748 if (!spa->spa_autoexpand)
5749 return;
5750
d6320ddb 5751 for (c = 0; c < vd->vdev_children; c++) {
9babb374
BB
5752 vdev_t *cvd = vd->vdev_child[c];
5753 spa_async_autoexpand(spa, cvd);
5754 }
5755
5756 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5757 return;
5758
26685276 5759 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_AUTOEXPAND);
9babb374
BB
5760}
5761
34dc7c2f
BB
5762static void
5763spa_async_thread(spa_t *spa)
5764{
d6320ddb 5765 int tasks, i;
34dc7c2f
BB
5766
5767 ASSERT(spa->spa_sync_on);
5768
5769 mutex_enter(&spa->spa_async_lock);
5770 tasks = spa->spa_async_tasks;
5771 spa->spa_async_tasks = 0;
5772 mutex_exit(&spa->spa_async_lock);
5773
5774 /*
5775 * See if the config needs to be updated.
5776 */
5777 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
428870ff 5778 uint64_t old_space, new_space;
9babb374 5779
34dc7c2f 5780 mutex_enter(&spa_namespace_lock);
428870ff 5781 old_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5782 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
428870ff 5783 new_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5784 mutex_exit(&spa_namespace_lock);
9babb374
BB
5785
5786 /*
5787 * If the pool grew as a result of the config update,
5788 * then log an internal history event.
5789 */
428870ff 5790 if (new_space != old_space) {
6f1ffb06 5791 spa_history_log_internal(spa, "vdev online", NULL,
45d1cae3 5792 "pool '%s' size: %llu(+%llu)",
428870ff 5793 spa_name(spa), new_space, new_space - old_space);
9babb374 5794 }
34dc7c2f
BB
5795 }
5796
5797 /*
5798 * See if any devices need to be marked REMOVED.
34dc7c2f 5799 */
b128c09f 5800 if (tasks & SPA_ASYNC_REMOVE) {
428870ff 5801 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 5802 spa_async_remove(spa, spa->spa_root_vdev);
d6320ddb 5803 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
b128c09f 5804 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
d6320ddb 5805 for (i = 0; i < spa->spa_spares.sav_count; i++)
b128c09f
BB
5806 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5807 (void) spa_vdev_state_exit(spa, NULL, 0);
34dc7c2f
BB
5808 }
5809
9babb374
BB
5810 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5811 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5812 spa_async_autoexpand(spa, spa->spa_root_vdev);
5813 spa_config_exit(spa, SCL_CONFIG, FTAG);
5814 }
5815
34dc7c2f 5816 /*
b128c09f 5817 * See if any devices need to be probed.
34dc7c2f 5818 */
b128c09f 5819 if (tasks & SPA_ASYNC_PROBE) {
428870ff 5820 spa_vdev_state_enter(spa, SCL_NONE);
b128c09f
BB
5821 spa_async_probe(spa, spa->spa_root_vdev);
5822 (void) spa_vdev_state_exit(spa, NULL, 0);
5823 }
34dc7c2f
BB
5824
5825 /*
b128c09f 5826 * If any devices are done replacing, detach them.
34dc7c2f 5827 */
b128c09f
BB
5828 if (tasks & SPA_ASYNC_RESILVER_DONE)
5829 spa_vdev_resilver_done(spa);
34dc7c2f
BB
5830
5831 /*
5832 * Kick off a resilver.
5833 */
b128c09f 5834 if (tasks & SPA_ASYNC_RESILVER)
428870ff 5835 dsl_resilver_restart(spa->spa_dsl_pool, 0);
34dc7c2f
BB
5836
5837 /*
5838 * Let the world know that we're done.
5839 */
5840 mutex_enter(&spa->spa_async_lock);
5841 spa->spa_async_thread = NULL;
5842 cv_broadcast(&spa->spa_async_cv);
5843 mutex_exit(&spa->spa_async_lock);
5844 thread_exit();
5845}
5846
5847void
5848spa_async_suspend(spa_t *spa)
5849{
5850 mutex_enter(&spa->spa_async_lock);
5851 spa->spa_async_suspended++;
5852 while (spa->spa_async_thread != NULL)
5853 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5854 mutex_exit(&spa->spa_async_lock);
5855}
5856
5857void
5858spa_async_resume(spa_t *spa)
5859{
5860 mutex_enter(&spa->spa_async_lock);
5861 ASSERT(spa->spa_async_suspended != 0);
5862 spa->spa_async_suspended--;
5863 mutex_exit(&spa->spa_async_lock);
5864}
5865
5866static void
5867spa_async_dispatch(spa_t *spa)
5868{
5869 mutex_enter(&spa->spa_async_lock);
5870 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
5871 spa->spa_async_thread == NULL &&
5872 rootdir != NULL && !vn_is_readonly(rootdir))
5873 spa->spa_async_thread = thread_create(NULL, 0,
5874 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5875 mutex_exit(&spa->spa_async_lock);
5876}
5877
5878void
5879spa_async_request(spa_t *spa, int task)
5880{
428870ff 5881 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
34dc7c2f
BB
5882 mutex_enter(&spa->spa_async_lock);
5883 spa->spa_async_tasks |= task;
5884 mutex_exit(&spa->spa_async_lock);
5885}
5886
5887/*
5888 * ==========================================================================
5889 * SPA syncing routines
5890 * ==========================================================================
5891 */
5892
428870ff
BB
5893static int
5894bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
34dc7c2f 5895{
428870ff
BB
5896 bpobj_t *bpo = arg;
5897 bpobj_enqueue(bpo, bp, tx);
5898 return (0);
5899}
34dc7c2f 5900
428870ff
BB
5901static int
5902spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5903{
5904 zio_t *zio = arg;
34dc7c2f 5905
428870ff
BB
5906 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5907 zio->io_flags));
5908 return (0);
34dc7c2f
BB
5909}
5910
e8b96c60
MA
5911/*
5912 * Note: this simple function is not inlined to make it easier to dtrace the
5913 * amount of time spent syncing frees.
5914 */
5915static void
5916spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
5917{
5918 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5919 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
5920 VERIFY(zio_wait(zio) == 0);
5921}
5922
5923/*
5924 * Note: this simple function is not inlined to make it easier to dtrace the
5925 * amount of time spent syncing deferred frees.
5926 */
5927static void
5928spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
5929{
5930 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5931 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
5932 spa_free_sync_cb, zio, tx), ==, 0);
5933 VERIFY0(zio_wait(zio));
5934}
5935
34dc7c2f
BB
5936static void
5937spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5938{
5939 char *packed = NULL;
b128c09f 5940 size_t bufsize;
34dc7c2f
BB
5941 size_t nvsize = 0;
5942 dmu_buf_t *db;
5943
5944 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5945
b128c09f
BB
5946 /*
5947 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
b0bc7a84 5948 * information. This avoids the dmu_buf_will_dirty() path and
b128c09f
BB
5949 * saves us a pre-read to get data we don't actually care about.
5950 */
9ae529ec 5951 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
79c76d5b 5952 packed = vmem_alloc(bufsize, KM_SLEEP);
34dc7c2f
BB
5953
5954 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
79c76d5b 5955 KM_SLEEP) == 0);
b128c09f 5956 bzero(packed + nvsize, bufsize - nvsize);
34dc7c2f 5957
b128c09f 5958 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
34dc7c2f 5959
00b46022 5960 vmem_free(packed, bufsize);
34dc7c2f
BB
5961
5962 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5963 dmu_buf_will_dirty(db, tx);
5964 *(uint64_t *)db->db_data = nvsize;
5965 dmu_buf_rele(db, FTAG);
5966}
5967
5968static void
5969spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5970 const char *config, const char *entry)
5971{
5972 nvlist_t *nvroot;
5973 nvlist_t **list;
5974 int i;
5975
5976 if (!sav->sav_sync)
5977 return;
5978
5979 /*
5980 * Update the MOS nvlist describing the list of available devices.
5981 * spa_validate_aux() will have already made sure this nvlist is
5982 * valid and the vdevs are labeled appropriately.
5983 */
5984 if (sav->sav_object == 0) {
5985 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5986 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5987 sizeof (uint64_t), tx);
5988 VERIFY(zap_update(spa->spa_meta_objset,
5989 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5990 &sav->sav_object, tx) == 0);
5991 }
5992
79c76d5b 5993 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
5994 if (sav->sav_count == 0) {
5995 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5996 } else {
79c76d5b 5997 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
34dc7c2f
BB
5998 for (i = 0; i < sav->sav_count; i++)
5999 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
428870ff 6000 B_FALSE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
6001 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
6002 sav->sav_count) == 0);
6003 for (i = 0; i < sav->sav_count; i++)
6004 nvlist_free(list[i]);
6005 kmem_free(list, sav->sav_count * sizeof (void *));
6006 }
6007
6008 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
6009 nvlist_free(nvroot);
6010
6011 sav->sav_sync = B_FALSE;
6012}
6013
6014static void
6015spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
6016{
6017 nvlist_t *config;
6018
b128c09f 6019 if (list_is_empty(&spa->spa_config_dirty_list))
34dc7c2f
BB
6020 return;
6021
b128c09f
BB
6022 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6023
6024 config = spa_config_generate(spa, spa->spa_root_vdev,
6025 dmu_tx_get_txg(tx), B_FALSE);
6026
ea0b2538
GW
6027 /*
6028 * If we're upgrading the spa version then make sure that
6029 * the config object gets updated with the correct version.
6030 */
6031 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
6032 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
6033 spa->spa_uberblock.ub_version);
6034
b128c09f 6035 spa_config_exit(spa, SCL_STATE, FTAG);
34dc7c2f
BB
6036
6037 if (spa->spa_config_syncing)
6038 nvlist_free(spa->spa_config_syncing);
6039 spa->spa_config_syncing = config;
6040
6041 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
6042}
6043
9ae529ec 6044static void
13fe0198 6045spa_sync_version(void *arg, dmu_tx_t *tx)
9ae529ec 6046{
13fe0198
MA
6047 uint64_t *versionp = arg;
6048 uint64_t version = *versionp;
6049 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9ae529ec
CS
6050
6051 /*
6052 * Setting the version is special cased when first creating the pool.
6053 */
6054 ASSERT(tx->tx_txg != TXG_INITIAL);
6055
8dca0a9a 6056 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
9ae529ec
CS
6057 ASSERT(version >= spa_version(spa));
6058
6059 spa->spa_uberblock.ub_version = version;
6060 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06 6061 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
9ae529ec
CS
6062}
6063
34dc7c2f
BB
6064/*
6065 * Set zpool properties.
6066 */
6067static void
13fe0198 6068spa_sync_props(void *arg, dmu_tx_t *tx)
34dc7c2f 6069{
13fe0198
MA
6070 nvlist_t *nvp = arg;
6071 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
34dc7c2f 6072 objset_t *mos = spa->spa_meta_objset;
9ae529ec 6073 nvpair_t *elem = NULL;
b128c09f
BB
6074
6075 mutex_enter(&spa->spa_props_lock);
34dc7c2f 6076
34dc7c2f 6077 while ((elem = nvlist_next_nvpair(nvp, elem))) {
9ae529ec
CS
6078 uint64_t intval;
6079 char *strval, *fname;
6080 zpool_prop_t prop;
6081 const char *propname;
6082 zprop_type_t proptype;
fa86b5db 6083 spa_feature_t fid;
9ae529ec
CS
6084
6085 prop = zpool_name_to_prop(nvpair_name(elem));
6086 switch ((int)prop) {
6087 case ZPROP_INVAL:
6088 /*
6089 * We checked this earlier in spa_prop_validate().
6090 */
6091 ASSERT(zpool_prop_feature(nvpair_name(elem)));
6092
6093 fname = strchr(nvpair_name(elem), '@') + 1;
fa86b5db 6094 VERIFY0(zfeature_lookup_name(fname, &fid));
9ae529ec 6095
fa86b5db 6096 spa_feature_enable(spa, fid, tx);
6f1ffb06
MA
6097 spa_history_log_internal(spa, "set", tx,
6098 "%s=enabled", nvpair_name(elem));
9ae529ec
CS
6099 break;
6100
34dc7c2f 6101 case ZPOOL_PROP_VERSION:
93cf2076 6102 intval = fnvpair_value_uint64(elem);
34dc7c2f 6103 /*
9ae529ec
CS
6104 * The version is synced seperatly before other
6105 * properties and should be correct by now.
34dc7c2f 6106 */
9ae529ec 6107 ASSERT3U(spa_version(spa), >=, intval);
34dc7c2f
BB
6108 break;
6109
6110 case ZPOOL_PROP_ALTROOT:
6111 /*
6112 * 'altroot' is a non-persistent property. It should
6113 * have been set temporarily at creation or import time.
6114 */
6115 ASSERT(spa->spa_root != NULL);
6116 break;
6117
572e2857 6118 case ZPOOL_PROP_READONLY:
34dc7c2f
BB
6119 case ZPOOL_PROP_CACHEFILE:
6120 /*
572e2857
BB
6121 * 'readonly' and 'cachefile' are also non-persisitent
6122 * properties.
34dc7c2f 6123 */
34dc7c2f 6124 break;
d96eb2b1 6125 case ZPOOL_PROP_COMMENT:
93cf2076 6126 strval = fnvpair_value_string(elem);
d96eb2b1
DM
6127 if (spa->spa_comment != NULL)
6128 spa_strfree(spa->spa_comment);
6129 spa->spa_comment = spa_strdup(strval);
6130 /*
6131 * We need to dirty the configuration on all the vdevs
6132 * so that their labels get updated. It's unnecessary
6133 * to do this for pool creation since the vdev's
6134 * configuratoin has already been dirtied.
6135 */
6136 if (tx->tx_txg != TXG_INITIAL)
6137 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06
MA
6138 spa_history_log_internal(spa, "set", tx,
6139 "%s=%s", nvpair_name(elem), strval);
d96eb2b1 6140 break;
34dc7c2f
BB
6141 default:
6142 /*
6143 * Set pool property values in the poolprops mos object.
6144 */
34dc7c2f 6145 if (spa->spa_pool_props_object == 0) {
9ae529ec
CS
6146 spa->spa_pool_props_object =
6147 zap_create_link(mos, DMU_OT_POOL_PROPS,
34dc7c2f 6148 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
9ae529ec 6149 tx);
34dc7c2f 6150 }
34dc7c2f
BB
6151
6152 /* normalize the property name */
6153 propname = zpool_prop_to_name(prop);
6154 proptype = zpool_prop_get_type(prop);
6155
6156 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6157 ASSERT(proptype == PROP_TYPE_STRING);
93cf2076
GW
6158 strval = fnvpair_value_string(elem);
6159 VERIFY0(zap_update(mos,
34dc7c2f 6160 spa->spa_pool_props_object, propname,
93cf2076 6161 1, strlen(strval) + 1, strval, tx));
6f1ffb06
MA
6162 spa_history_log_internal(spa, "set", tx,
6163 "%s=%s", nvpair_name(elem), strval);
34dc7c2f 6164 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
93cf2076 6165 intval = fnvpair_value_uint64(elem);
34dc7c2f
BB
6166
6167 if (proptype == PROP_TYPE_INDEX) {
6168 const char *unused;
93cf2076
GW
6169 VERIFY0(zpool_prop_index_to_string(
6170 prop, intval, &unused));
34dc7c2f 6171 }
93cf2076 6172 VERIFY0(zap_update(mos,
34dc7c2f 6173 spa->spa_pool_props_object, propname,
93cf2076 6174 8, 1, &intval, tx));
6f1ffb06
MA
6175 spa_history_log_internal(spa, "set", tx,
6176 "%s=%lld", nvpair_name(elem), intval);
34dc7c2f
BB
6177 } else {
6178 ASSERT(0); /* not allowed */
6179 }
6180
6181 switch (prop) {
6182 case ZPOOL_PROP_DELEGATION:
6183 spa->spa_delegation = intval;
6184 break;
6185 case ZPOOL_PROP_BOOTFS:
6186 spa->spa_bootfs = intval;
6187 break;
6188 case ZPOOL_PROP_FAILUREMODE:
6189 spa->spa_failmode = intval;
6190 break;
9babb374
BB
6191 case ZPOOL_PROP_AUTOEXPAND:
6192 spa->spa_autoexpand = intval;
428870ff
BB
6193 if (tx->tx_txg != TXG_INITIAL)
6194 spa_async_request(spa,
6195 SPA_ASYNC_AUTOEXPAND);
6196 break;
6197 case ZPOOL_PROP_DEDUPDITTO:
6198 spa->spa_dedup_ditto = intval;
9babb374 6199 break;
34dc7c2f
BB
6200 default:
6201 break;
6202 }
6203 }
6204
34dc7c2f 6205 }
b128c09f
BB
6206
6207 mutex_exit(&spa->spa_props_lock);
34dc7c2f
BB
6208}
6209
428870ff
BB
6210/*
6211 * Perform one-time upgrade on-disk changes. spa_version() does not
6212 * reflect the new version this txg, so there must be no changes this
6213 * txg to anything that the upgrade code depends on after it executes.
6214 * Therefore this must be called after dsl_pool_sync() does the sync
6215 * tasks.
6216 */
6217static void
6218spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6219{
6220 dsl_pool_t *dp = spa->spa_dsl_pool;
6221
6222 ASSERT(spa->spa_sync_pass == 1);
6223
13fe0198
MA
6224 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6225
428870ff
BB
6226 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6227 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6228 dsl_pool_create_origin(dp, tx);
6229
6230 /* Keeping the origin open increases spa_minref */
6231 spa->spa_minref += 3;
6232 }
6233
6234 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6235 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6236 dsl_pool_upgrade_clones(dp, tx);
6237 }
6238
6239 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6240 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6241 dsl_pool_upgrade_dir_clones(dp, tx);
6242
6243 /* Keeping the freedir open increases spa_minref */
6244 spa->spa_minref += 3;
6245 }
9ae529ec
CS
6246
6247 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6248 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6249 spa_feature_create_zap_objects(spa, tx);
6250 }
62bdd5eb
DL
6251
6252 /*
6253 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6254 * when possibility to use lz4 compression for metadata was added
6255 * Old pools that have this feature enabled must be upgraded to have
6256 * this feature active
6257 */
6258 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6259 boolean_t lz4_en = spa_feature_is_enabled(spa,
6260 SPA_FEATURE_LZ4_COMPRESS);
6261 boolean_t lz4_ac = spa_feature_is_active(spa,
6262 SPA_FEATURE_LZ4_COMPRESS);
6263
6264 if (lz4_en && !lz4_ac)
6265 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6266 }
13fe0198 6267 rrw_exit(&dp->dp_config_rwlock, FTAG);
428870ff
BB
6268}
6269
34dc7c2f
BB
6270/*
6271 * Sync the specified transaction group. New blocks may be dirtied as
6272 * part of the process, so we iterate until it converges.
6273 */
6274void
6275spa_sync(spa_t *spa, uint64_t txg)
6276{
6277 dsl_pool_t *dp = spa->spa_dsl_pool;
6278 objset_t *mos = spa->spa_meta_objset;
428870ff 6279 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
34dc7c2f
BB
6280 vdev_t *rvd = spa->spa_root_vdev;
6281 vdev_t *vd;
34dc7c2f 6282 dmu_tx_t *tx;
b128c09f 6283 int error;
d6320ddb 6284 int c;
34dc7c2f 6285
572e2857
BB
6286 VERIFY(spa_writeable(spa));
6287
34dc7c2f
BB
6288 /*
6289 * Lock out configuration changes.
6290 */
b128c09f 6291 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
6292
6293 spa->spa_syncing_txg = txg;
6294 spa->spa_sync_pass = 0;
6295
b128c09f
BB
6296 /*
6297 * If there are any pending vdev state changes, convert them
6298 * into config changes that go out with this transaction group.
6299 */
6300 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
fb5f0bc8
BB
6301 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6302 /*
6303 * We need the write lock here because, for aux vdevs,
6304 * calling vdev_config_dirty() modifies sav_config.
6305 * This is ugly and will become unnecessary when we
6306 * eliminate the aux vdev wart by integrating all vdevs
6307 * into the root vdev tree.
6308 */
6309 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6310 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6311 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6312 vdev_state_clean(vd);
6313 vdev_config_dirty(vd);
6314 }
6315 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6316 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
b128c09f
BB
6317 }
6318 spa_config_exit(spa, SCL_STATE, FTAG);
6319
34dc7c2f
BB
6320 tx = dmu_tx_create_assigned(dp, txg);
6321
cc92e9d0
GW
6322 spa->spa_sync_starttime = gethrtime();
6323 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6324 spa->spa_deadman_tqid = taskq_dispatch_delay(system_taskq,
79c76d5b 6325 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
cc92e9d0
GW
6326 NSEC_TO_TICK(spa->spa_deadman_synctime));
6327
34dc7c2f
BB
6328 /*
6329 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6330 * set spa_deflate if we have no raid-z vdevs.
6331 */
6332 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6333 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6334 int i;
6335
6336 for (i = 0; i < rvd->vdev_children; i++) {
6337 vd = rvd->vdev_child[i];
6338 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6339 break;
6340 }
6341 if (i == rvd->vdev_children) {
6342 spa->spa_deflate = TRUE;
6343 VERIFY(0 == zap_add(spa->spa_meta_objset,
6344 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6345 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6346 }
6347 }
6348
6349 /*
428870ff
BB
6350 * If anything has changed in this txg, or if someone is waiting
6351 * for this txg to sync (eg, spa_vdev_remove()), push the
6352 * deferred frees from the previous txg. If not, leave them
6353 * alone so that we don't generate work on an otherwise idle
6354 * system.
34dc7c2f
BB
6355 */
6356 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
6357 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
428870ff
BB
6358 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
6359 ((dsl_scan_active(dp->dp_scan) ||
6360 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
e8b96c60 6361 spa_sync_deferred_frees(spa, tx);
428870ff 6362 }
34dc7c2f
BB
6363
6364 /*
6365 * Iterate to convergence.
6366 */
6367 do {
428870ff 6368 int pass = ++spa->spa_sync_pass;
34dc7c2f
BB
6369
6370 spa_sync_config_object(spa, tx);
6371 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6372 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6373 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6374 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6375 spa_errlog_sync(spa, txg);
6376 dsl_pool_sync(dp, txg);
6377
55d85d5a 6378 if (pass < zfs_sync_pass_deferred_free) {
e8b96c60 6379 spa_sync_frees(spa, free_bpl, tx);
428870ff
BB
6380 } else {
6381 bplist_iterate(free_bpl, bpobj_enqueue_cb,
e8b96c60 6382 &spa->spa_deferred_bpobj, tx);
34dc7c2f
BB
6383 }
6384
428870ff
BB
6385 ddt_sync(spa, txg);
6386 dsl_scan_sync(dp, tx);
34dc7c2f 6387
c65aa5b2 6388 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
428870ff
BB
6389 vdev_sync(vd, txg);
6390
6391 if (pass == 1)
6392 spa_sync_upgrades(spa, tx);
34dc7c2f 6393
428870ff 6394 } while (dmu_objset_is_dirty(mos, txg));
34dc7c2f
BB
6395
6396 /*
6397 * Rewrite the vdev configuration (which includes the uberblock)
6398 * to commit the transaction group.
6399 *
6400 * If there are no dirty vdevs, we sync the uberblock to a few
6401 * random top-level vdevs that are known to be visible in the
b128c09f
BB
6402 * config cache (see spa_vdev_add() for a complete description).
6403 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
34dc7c2f 6404 */
b128c09f
BB
6405 for (;;) {
6406 /*
6407 * We hold SCL_STATE to prevent vdev open/close/etc.
6408 * while we're attempting to write the vdev labels.
6409 */
6410 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6411
6412 if (list_is_empty(&spa->spa_config_dirty_list)) {
6413 vdev_t *svd[SPA_DVAS_PER_BP];
6414 int svdcount = 0;
6415 int children = rvd->vdev_children;
6416 int c0 = spa_get_random(children);
b128c09f 6417
d6320ddb 6418 for (c = 0; c < children; c++) {
b128c09f
BB
6419 vd = rvd->vdev_child[(c0 + c) % children];
6420 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6421 continue;
6422 svd[svdcount++] = vd;
6423 if (svdcount == SPA_DVAS_PER_BP)
6424 break;
6425 }
9babb374
BB
6426 error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
6427 if (error != 0)
6428 error = vdev_config_sync(svd, svdcount, txg,
6429 B_TRUE);
b128c09f
BB
6430 } else {
6431 error = vdev_config_sync(rvd->vdev_child,
9babb374
BB
6432 rvd->vdev_children, txg, B_FALSE);
6433 if (error != 0)
6434 error = vdev_config_sync(rvd->vdev_child,
6435 rvd->vdev_children, txg, B_TRUE);
34dc7c2f 6436 }
34dc7c2f 6437
3bc7e0fb
GW
6438 if (error == 0)
6439 spa->spa_last_synced_guid = rvd->vdev_guid;
6440
b128c09f
BB
6441 spa_config_exit(spa, SCL_STATE, FTAG);
6442
6443 if (error == 0)
6444 break;
6445 zio_suspend(spa, NULL);
6446 zio_resume_wait(spa);
6447 }
34dc7c2f
BB
6448 dmu_tx_commit(tx);
6449
cc92e9d0
GW
6450 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6451 spa->spa_deadman_tqid = 0;
6452
34dc7c2f
BB
6453 /*
6454 * Clear the dirty config list.
6455 */
b128c09f 6456 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
34dc7c2f
BB
6457 vdev_config_clean(vd);
6458
6459 /*
6460 * Now that the new config has synced transactionally,
6461 * let it become visible to the config cache.
6462 */
6463 if (spa->spa_config_syncing != NULL) {
6464 spa_config_set(spa, spa->spa_config_syncing);
6465 spa->spa_config_txg = txg;
6466 spa->spa_config_syncing = NULL;
6467 }
6468
34dc7c2f 6469 spa->spa_ubsync = spa->spa_uberblock;
34dc7c2f 6470
428870ff 6471 dsl_pool_sync_done(dp, txg);
34dc7c2f
BB
6472
6473 /*
6474 * Update usable space statistics.
6475 */
c65aa5b2 6476 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
34dc7c2f
BB
6477 vdev_sync_done(vd, txg);
6478
428870ff
BB
6479 spa_update_dspace(spa);
6480
34dc7c2f
BB
6481 /*
6482 * It had better be the case that we didn't dirty anything
6483 * since vdev_config_sync().
6484 */
6485 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
6486 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6487 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
428870ff
BB
6488
6489 spa->spa_sync_pass = 0;
34dc7c2f 6490
b128c09f 6491 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 6492
428870ff
BB
6493 spa_handle_ignored_writes(spa);
6494
34dc7c2f
BB
6495 /*
6496 * If any async tasks have been requested, kick them off.
6497 */
6498 spa_async_dispatch(spa);
6499}
6500
6501/*
6502 * Sync all pools. We don't want to hold the namespace lock across these
6503 * operations, so we take a reference on the spa_t and drop the lock during the
6504 * sync.
6505 */
6506void
6507spa_sync_allpools(void)
6508{
6509 spa_t *spa = NULL;
6510 mutex_enter(&spa_namespace_lock);
6511 while ((spa = spa_next(spa)) != NULL) {
572e2857
BB
6512 if (spa_state(spa) != POOL_STATE_ACTIVE ||
6513 !spa_writeable(spa) || spa_suspended(spa))
34dc7c2f
BB
6514 continue;
6515 spa_open_ref(spa, FTAG);
6516 mutex_exit(&spa_namespace_lock);
6517 txg_wait_synced(spa_get_dsl(spa), 0);
6518 mutex_enter(&spa_namespace_lock);
6519 spa_close(spa, FTAG);
6520 }
6521 mutex_exit(&spa_namespace_lock);
6522}
6523
6524/*
6525 * ==========================================================================
6526 * Miscellaneous routines
6527 * ==========================================================================
6528 */
6529
6530/*
6531 * Remove all pools in the system.
6532 */
6533void
6534spa_evict_all(void)
6535{
6536 spa_t *spa;
6537
6538 /*
6539 * Remove all cached state. All pools should be closed now,
6540 * so every spa in the AVL tree should be unreferenced.
6541 */
6542 mutex_enter(&spa_namespace_lock);
6543 while ((spa = spa_next(NULL)) != NULL) {
6544 /*
6545 * Stop async tasks. The async thread may need to detach
6546 * a device that's been replaced, which requires grabbing
6547 * spa_namespace_lock, so we must drop it here.
6548 */
6549 spa_open_ref(spa, FTAG);
6550 mutex_exit(&spa_namespace_lock);
6551 spa_async_suspend(spa);
6552 mutex_enter(&spa_namespace_lock);
34dc7c2f
BB
6553 spa_close(spa, FTAG);
6554
6555 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6556 spa_unload(spa);
6557 spa_deactivate(spa);
6558 }
6559 spa_remove(spa);
6560 }
6561 mutex_exit(&spa_namespace_lock);
6562}
6563
6564vdev_t *
9babb374 6565spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
34dc7c2f 6566{
b128c09f
BB
6567 vdev_t *vd;
6568 int i;
6569
6570 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
6571 return (vd);
6572
9babb374 6573 if (aux) {
b128c09f
BB
6574 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
6575 vd = spa->spa_l2cache.sav_vdevs[i];
9babb374
BB
6576 if (vd->vdev_guid == guid)
6577 return (vd);
6578 }
6579
6580 for (i = 0; i < spa->spa_spares.sav_count; i++) {
6581 vd = spa->spa_spares.sav_vdevs[i];
b128c09f
BB
6582 if (vd->vdev_guid == guid)
6583 return (vd);
6584 }
6585 }
6586
6587 return (NULL);
34dc7c2f
BB
6588}
6589
6590void
6591spa_upgrade(spa_t *spa, uint64_t version)
6592{
572e2857
BB
6593 ASSERT(spa_writeable(spa));
6594
b128c09f 6595 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
6596
6597 /*
6598 * This should only be called for a non-faulted pool, and since a
6599 * future version would result in an unopenable pool, this shouldn't be
6600 * possible.
6601 */
8dca0a9a 6602 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
9b67f605 6603 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
34dc7c2f
BB
6604
6605 spa->spa_uberblock.ub_version = version;
6606 vdev_config_dirty(spa->spa_root_vdev);
6607
b128c09f 6608 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
6609
6610 txg_wait_synced(spa_get_dsl(spa), 0);
6611}
6612
6613boolean_t
6614spa_has_spare(spa_t *spa, uint64_t guid)
6615{
6616 int i;
6617 uint64_t spareguid;
6618 spa_aux_vdev_t *sav = &spa->spa_spares;
6619
6620 for (i = 0; i < sav->sav_count; i++)
6621 if (sav->sav_vdevs[i]->vdev_guid == guid)
6622 return (B_TRUE);
6623
6624 for (i = 0; i < sav->sav_npending; i++) {
6625 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
6626 &spareguid) == 0 && spareguid == guid)
6627 return (B_TRUE);
6628 }
6629
6630 return (B_FALSE);
6631}
6632
b128c09f
BB
6633/*
6634 * Check if a pool has an active shared spare device.
6635 * Note: reference count of an active spare is 2, as a spare and as a replace
6636 */
6637static boolean_t
6638spa_has_active_shared_spare(spa_t *spa)
6639{
6640 int i, refcnt;
6641 uint64_t pool;
6642 spa_aux_vdev_t *sav = &spa->spa_spares;
6643
6644 for (i = 0; i < sav->sav_count; i++) {
6645 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
6646 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
6647 refcnt > 2)
6648 return (B_TRUE);
6649 }
6650
6651 return (B_FALSE);
6652}
6653
34dc7c2f 6654/*
26685276 6655 * Post a FM_EREPORT_ZFS_* event from sys/fm/fs/zfs.h. The payload will be
34dc7c2f
BB
6656 * filled in from the spa and (optionally) the vdev. This doesn't do anything
6657 * in the userland libzpool, as we don't want consumers to misinterpret ztest
6658 * or zdb as real changes.
6659 */
6660void
6661spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
6662{
6663#ifdef _KERNEL
26685276 6664 zfs_ereport_post(name, spa, vd, NULL, 0, 0);
34dc7c2f
BB
6665#endif
6666}
c28b2279
BB
6667
6668#if defined(_KERNEL) && defined(HAVE_SPL)
6669/* state manipulation functions */
6670EXPORT_SYMBOL(spa_open);
6671EXPORT_SYMBOL(spa_open_rewind);
6672EXPORT_SYMBOL(spa_get_stats);
6673EXPORT_SYMBOL(spa_create);
6674EXPORT_SYMBOL(spa_import_rootpool);
6675EXPORT_SYMBOL(spa_import);
6676EXPORT_SYMBOL(spa_tryimport);
6677EXPORT_SYMBOL(spa_destroy);
6678EXPORT_SYMBOL(spa_export);
6679EXPORT_SYMBOL(spa_reset);
6680EXPORT_SYMBOL(spa_async_request);
6681EXPORT_SYMBOL(spa_async_suspend);
6682EXPORT_SYMBOL(spa_async_resume);
6683EXPORT_SYMBOL(spa_inject_addref);
6684EXPORT_SYMBOL(spa_inject_delref);
6685EXPORT_SYMBOL(spa_scan_stat_init);
6686EXPORT_SYMBOL(spa_scan_get_stats);
6687
6688/* device maniion */
6689EXPORT_SYMBOL(spa_vdev_add);
6690EXPORT_SYMBOL(spa_vdev_attach);
6691EXPORT_SYMBOL(spa_vdev_detach);
6692EXPORT_SYMBOL(spa_vdev_remove);
6693EXPORT_SYMBOL(spa_vdev_setpath);
6694EXPORT_SYMBOL(spa_vdev_setfru);
6695EXPORT_SYMBOL(spa_vdev_split_mirror);
6696
6697/* spare statech is global across all pools) */
6698EXPORT_SYMBOL(spa_spare_add);
6699EXPORT_SYMBOL(spa_spare_remove);
6700EXPORT_SYMBOL(spa_spare_exists);
6701EXPORT_SYMBOL(spa_spare_activate);
6702
6703/* L2ARC statech is global across all pools) */
6704EXPORT_SYMBOL(spa_l2cache_add);
6705EXPORT_SYMBOL(spa_l2cache_remove);
6706EXPORT_SYMBOL(spa_l2cache_exists);
6707EXPORT_SYMBOL(spa_l2cache_activate);
6708EXPORT_SYMBOL(spa_l2cache_drop);
6709
6710/* scanning */
6711EXPORT_SYMBOL(spa_scan);
6712EXPORT_SYMBOL(spa_scan_stop);
6713
6714/* spa syncing */
6715EXPORT_SYMBOL(spa_sync); /* only for DMU use */
6716EXPORT_SYMBOL(spa_sync_allpools);
6717
6718/* properties */
6719EXPORT_SYMBOL(spa_prop_set);
6720EXPORT_SYMBOL(spa_prop_get);
6721EXPORT_SYMBOL(spa_prop_clear_bootfs);
6722
6723/* asynchronous event notification */
6724EXPORT_SYMBOL(spa_event_notify);
6725#endif
dea377c0
MA
6726
6727#if defined(_KERNEL) && defined(HAVE_SPL)
6728module_param(spa_load_verify_maxinflight, int, 0644);
6729MODULE_PARM_DESC(spa_load_verify_maxinflight,
6730 "Max concurrent traversal I/Os while verifying pool during import -X");
6731
6732module_param(spa_load_verify_metadata, int, 0644);
6733MODULE_PARM_DESC(spa_load_verify_metadata,
6734 "Set to traverse metadata on pool import");
6735
6736module_param(spa_load_verify_data, int, 0644);
6737MODULE_PARM_DESC(spa_load_verify_data,
6738 "Set to traverse data on pool import");
6739#endif