]> git.proxmox.com Git - mirror_zfs-debian.git/blame - module/zfs/spa.c
New upstream version 0.7.5
[mirror_zfs-debian.git] / module / zfs / spa.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
cae5b340
AX
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
ea04106b 26 * Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved.
e10b0808 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
cae5b340
AX
28 * Copyright 2013 Saso Kiselkov. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 * Copyright 2016 Toomas Soome <tsoome@me.com>
4e820b5a 31 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
cae5b340
AX
32 * Copyright (c) 2017 Datto Inc.
33 * Copyright 2017 Joyent, Inc.
a38718a6 34 */
34dc7c2f 35
34dc7c2f 36/*
a08ee875
LG
37 * SPA: Storage Pool Allocator
38 *
34dc7c2f
BB
39 * This file contains all the routines used when modifying on-disk SPA state.
40 * This includes opening, importing, destroying, exporting a pool, and syncing a
41 * pool.
42 */
43
44#include <sys/zfs_context.h>
45#include <sys/fm/fs/zfs.h>
46#include <sys/spa_impl.h>
47#include <sys/zio.h>
48#include <sys/zio_checksum.h>
34dc7c2f
BB
49#include <sys/dmu.h>
50#include <sys/dmu_tx.h>
51#include <sys/zap.h>
52#include <sys/zil.h>
428870ff 53#include <sys/ddt.h>
34dc7c2f 54#include <sys/vdev_impl.h>
c28b2279 55#include <sys/vdev_disk.h>
34dc7c2f 56#include <sys/metaslab.h>
428870ff 57#include <sys/metaslab_impl.h>
cae5b340 58#include <sys/mmp.h>
34dc7c2f
BB
59#include <sys/uberblock_impl.h>
60#include <sys/txg.h>
61#include <sys/avl.h>
62#include <sys/dmu_traverse.h>
63#include <sys/dmu_objset.h>
64#include <sys/unique.h>
65#include <sys/dsl_pool.h>
66#include <sys/dsl_dataset.h>
67#include <sys/dsl_dir.h>
68#include <sys/dsl_prop.h>
69#include <sys/dsl_synctask.h>
70#include <sys/fs/zfs.h>
71#include <sys/arc.h>
72#include <sys/callb.h>
73#include <sys/systeminfo.h>
34dc7c2f 74#include <sys/spa_boot.h>
9babb374 75#include <sys/zfs_ioctl.h>
428870ff 76#include <sys/dsl_scan.h>
9ae529ec 77#include <sys/zfeature.h>
a08ee875 78#include <sys/dsl_destroy.h>
c06d4368 79#include <sys/zvol.h>
34dc7c2f 80
d164b209 81#ifdef _KERNEL
cae5b340
AX
82#include <sys/fm/protocol.h>
83#include <sys/fm/util.h>
428870ff
BB
84#include <sys/bootprops.h>
85#include <sys/callb.h>
86#include <sys/cpupart.h>
87#include <sys/pool.h>
88#include <sys/sysdc.h>
d164b209
BB
89#include <sys/zone.h>
90#endif /* _KERNEL */
91
34dc7c2f
BB
92#include "zfs_prop.h"
93#include "zfs_comutil.h"
94
cae5b340
AX
95/*
96 * The interval, in seconds, at which failed configuration cache file writes
97 * should be retried.
98 */
99static int zfs_ccw_retry_interval = 300;
100
428870ff 101typedef enum zti_modes {
c06d4368 102 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
c06d4368
AX
103 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
104 ZTI_MODE_NULL, /* don't create a taskq */
105 ZTI_NMODES
428870ff 106} zti_modes_t;
34dc7c2f 107
c06d4368
AX
108#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
109#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
110#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
111#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
9babb374 112
c06d4368
AX
113#define ZTI_N(n) ZTI_P(n, 1)
114#define ZTI_ONE ZTI_N(1)
9babb374
BB
115
116typedef struct zio_taskq_info {
c06d4368 117 zti_modes_t zti_mode;
428870ff 118 uint_t zti_value;
c06d4368 119 uint_t zti_count;
9babb374
BB
120} zio_taskq_info_t;
121
122static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
451041db 123 "iss", "iss_h", "int", "int_h"
9babb374
BB
124};
125
428870ff 126/*
c06d4368
AX
127 * This table defines the taskq settings for each ZFS I/O type. When
128 * initializing a pool, we use this table to create an appropriately sized
129 * taskq. Some operations are low volume and therefore have a small, static
130 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
131 * macros. Other operations process a large amount of data; the ZTI_BATCH
132 * macro causes us to create a taskq oriented for throughput. Some operations
133 * are so high frequency and short-lived that the taskq itself can become a a
134 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
135 * additional degree of parallelism specified by the number of threads per-
136 * taskq and the number of taskqs; when dispatching an event in this case, the
137 * particular taskq is chosen at random.
138 *
139 * The different taskq priorities are to handle the different contexts (issue
140 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
141 * need to be handled with minimum delay.
428870ff
BB
142 */
143const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
144 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
c06d4368 145 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
e10b0808
AX
146 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
147 { ZTI_BATCH, ZTI_N(5), ZTI_P(12, 8), ZTI_N(5) }, /* WRITE */
148 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
c06d4368
AX
149 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
150 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
9babb374
BB
151};
152
cae5b340
AX
153static sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
154 const char *name);
155static void spa_event_post(sysevent_t *ev);
a08ee875
LG
156static void spa_sync_version(void *arg, dmu_tx_t *tx);
157static void spa_sync_props(void *arg, dmu_tx_t *tx);
b128c09f 158static boolean_t spa_has_active_shared_spare(spa_t *spa);
bf701a83 159static inline int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
428870ff
BB
160 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
161 char **ereport);
572e2857 162static void spa_vdev_resilver_done(spa_t *spa);
428870ff 163
a08ee875 164uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
428870ff
BB
165id_t zio_taskq_psrset_bind = PS_NONE;
166boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
167uint_t zio_taskq_basedc = 80; /* base duty cycle */
168
169boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
170
171/*
172 * This (illegal) pool name is used when temporarily importing a spa_t in order
173 * to get the vdev stats associated with the imported devices.
174 */
175#define TRYIMPORT_NAME "$import"
34dc7c2f
BB
176
177/*
178 * ==========================================================================
179 * SPA properties routines
180 * ==========================================================================
181 */
182
183/*
184 * Add a (source=src, propname=propval) list to an nvlist.
185 */
186static void
187spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
188 uint64_t intval, zprop_source_t src)
189{
190 const char *propname = zpool_prop_to_name(prop);
191 nvlist_t *propval;
192
ea04106b 193 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
194 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
195
196 if (strval != NULL)
197 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
198 else
199 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
200
201 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
202 nvlist_free(propval);
203}
204
205/*
206 * Get property values from the spa configuration.
207 */
208static void
209spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
210{
1bd201e7 211 vdev_t *rvd = spa->spa_root_vdev;
9ae529ec 212 dsl_pool_t *pool = spa->spa_dsl_pool;
ea04106b 213 uint64_t size, alloc, cap, version;
cae5b340 214 const zprop_source_t src = ZPROP_SRC_NONE;
b128c09f 215 spa_config_dirent_t *dp;
ea04106b 216 metaslab_class_t *mc = spa_normal_class(spa);
b128c09f
BB
217
218 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
34dc7c2f 219
1bd201e7 220 if (rvd != NULL) {
428870ff
BB
221 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
222 size = metaslab_class_get_space(spa_normal_class(spa));
d164b209
BB
223 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
224 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
428870ff
BB
225 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
226 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
227 size - alloc, src);
1bd201e7 228
ea04106b
AX
229 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
230 metaslab_class_fragmentation(mc), src);
231 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
232 metaslab_class_expandable_space(mc), src);
572e2857
BB
233 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
234 (spa_mode(spa) == FREAD), src);
d164b209 235
428870ff 236 cap = (size == 0) ? 0 : (alloc * 100 / size);
d164b209
BB
237 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
238
428870ff
BB
239 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
240 ddt_get_pool_dedup_ratio(spa), src);
241
d164b209 242 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
1bd201e7 243 rvd->vdev_state, src);
d164b209
BB
244
245 version = spa_version(spa);
cae5b340
AX
246 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
247 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
248 version, ZPROP_SRC_DEFAULT);
249 } else {
250 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
251 version, ZPROP_SRC_LOCAL);
252 }
d164b209 253 }
34dc7c2f 254
9ae529ec 255 if (pool != NULL) {
9ae529ec
CS
256 /*
257 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
258 * when opening pools before this version freedir will be NULL.
259 */
ea04106b 260 if (pool->dp_free_dir != NULL) {
9ae529ec 261 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
e10b0808
AX
262 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
263 src);
9ae529ec
CS
264 } else {
265 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
266 NULL, 0, src);
267 }
ea04106b
AX
268
269 if (pool->dp_leak_dir != NULL) {
270 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
e10b0808
AX
271 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
272 src);
ea04106b
AX
273 } else {
274 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
275 NULL, 0, src);
276 }
9ae529ec
CS
277 }
278
34dc7c2f 279 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
34dc7c2f 280
d96eb2b1
DM
281 if (spa->spa_comment != NULL) {
282 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
283 0, ZPROP_SRC_LOCAL);
284 }
285
34dc7c2f
BB
286 if (spa->spa_root != NULL)
287 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
288 0, ZPROP_SRC_LOCAL);
289
e10b0808
AX
290 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
291 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
292 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
293 } else {
294 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
295 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
296 }
297
cae5b340
AX
298 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
299 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
300 DNODE_MAX_SIZE, ZPROP_SRC_NONE);
301 } else {
302 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
303 DNODE_MIN_SIZE, ZPROP_SRC_NONE);
304 }
305
b128c09f
BB
306 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
307 if (dp->scd_path == NULL) {
34dc7c2f 308 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f
BB
309 "none", 0, ZPROP_SRC_LOCAL);
310 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
34dc7c2f 311 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f 312 dp->scd_path, 0, ZPROP_SRC_LOCAL);
34dc7c2f
BB
313 }
314 }
315}
316
317/*
318 * Get zpool property values.
319 */
320int
321spa_prop_get(spa_t *spa, nvlist_t **nvp)
322{
428870ff 323 objset_t *mos = spa->spa_meta_objset;
34dc7c2f
BB
324 zap_cursor_t zc;
325 zap_attribute_t za;
34dc7c2f
BB
326 int err;
327
ea04106b 328 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
c28b2279 329 if (err)
a08ee875 330 return (err);
34dc7c2f 331
b128c09f
BB
332 mutex_enter(&spa->spa_props_lock);
333
34dc7c2f
BB
334 /*
335 * Get properties from the spa config.
336 */
337 spa_prop_get_config(spa, nvp);
338
34dc7c2f 339 /* If no pool property object, no more prop to get. */
428870ff 340 if (mos == NULL || spa->spa_pool_props_object == 0) {
34dc7c2f 341 mutex_exit(&spa->spa_props_lock);
c28b2279 342 goto out;
34dc7c2f
BB
343 }
344
345 /*
346 * Get properties from the MOS pool property object.
347 */
348 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
349 (err = zap_cursor_retrieve(&zc, &za)) == 0;
350 zap_cursor_advance(&zc)) {
351 uint64_t intval = 0;
352 char *strval = NULL;
353 zprop_source_t src = ZPROP_SRC_DEFAULT;
354 zpool_prop_t prop;
355
356 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
357 continue;
358
359 switch (za.za_integer_length) {
360 case 8:
361 /* integer property */
362 if (za.za_first_integer !=
363 zpool_prop_default_numeric(prop))
364 src = ZPROP_SRC_LOCAL;
365
366 if (prop == ZPOOL_PROP_BOOTFS) {
367 dsl_pool_t *dp;
368 dsl_dataset_t *ds = NULL;
369
370 dp = spa_get_dsl(spa);
a08ee875 371 dsl_pool_config_enter(dp, FTAG);
c65aa5b2
BB
372 if ((err = dsl_dataset_hold_obj(dp,
373 za.za_first_integer, FTAG, &ds))) {
a08ee875 374 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
375 break;
376 }
377
cae5b340 378 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
ea04106b 379 KM_SLEEP);
34dc7c2f 380 dsl_dataset_name(ds, strval);
b128c09f 381 dsl_dataset_rele(ds, FTAG);
a08ee875 382 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
383 } else {
384 strval = NULL;
385 intval = za.za_first_integer;
386 }
387
388 spa_prop_add_list(*nvp, prop, strval, intval, src);
389
390 if (strval != NULL)
cae5b340 391 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
34dc7c2f
BB
392
393 break;
394
395 case 1:
396 /* string property */
ea04106b 397 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
34dc7c2f
BB
398 err = zap_lookup(mos, spa->spa_pool_props_object,
399 za.za_name, 1, za.za_num_integers, strval);
400 if (err) {
401 kmem_free(strval, za.za_num_integers);
402 break;
403 }
404 spa_prop_add_list(*nvp, prop, strval, 0, src);
405 kmem_free(strval, za.za_num_integers);
406 break;
407
408 default:
409 break;
410 }
411 }
412 zap_cursor_fini(&zc);
413 mutex_exit(&spa->spa_props_lock);
414out:
415 if (err && err != ENOENT) {
416 nvlist_free(*nvp);
417 *nvp = NULL;
418 return (err);
419 }
420
421 return (0);
422}
423
424/*
425 * Validate the given pool properties nvlist and modify the list
426 * for the property values to be set.
427 */
428static int
429spa_prop_validate(spa_t *spa, nvlist_t *props)
430{
431 nvpair_t *elem;
432 int error = 0, reset_bootfs = 0;
d4ed6673 433 uint64_t objnum = 0;
9ae529ec 434 boolean_t has_feature = B_FALSE;
34dc7c2f
BB
435
436 elem = NULL;
437 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
34dc7c2f 438 uint64_t intval;
9ae529ec
CS
439 char *strval, *slash, *check, *fname;
440 const char *propname = nvpair_name(elem);
441 zpool_prop_t prop = zpool_name_to_prop(propname);
442
443 switch ((int)prop) {
444 case ZPROP_INVAL:
445 if (!zpool_prop_feature(propname)) {
a08ee875 446 error = SET_ERROR(EINVAL);
9ae529ec
CS
447 break;
448 }
449
450 /*
451 * Sanitize the input.
452 */
453 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
a08ee875 454 error = SET_ERROR(EINVAL);
9ae529ec
CS
455 break;
456 }
457
458 if (nvpair_value_uint64(elem, &intval) != 0) {
a08ee875 459 error = SET_ERROR(EINVAL);
9ae529ec
CS
460 break;
461 }
34dc7c2f 462
9ae529ec 463 if (intval != 0) {
a08ee875 464 error = SET_ERROR(EINVAL);
9ae529ec
CS
465 break;
466 }
34dc7c2f 467
9ae529ec
CS
468 fname = strchr(propname, '@') + 1;
469 if (zfeature_lookup_name(fname, NULL) != 0) {
a08ee875 470 error = SET_ERROR(EINVAL);
9ae529ec
CS
471 break;
472 }
473
474 has_feature = B_TRUE;
475 break;
34dc7c2f 476
34dc7c2f
BB
477 case ZPOOL_PROP_VERSION:
478 error = nvpair_value_uint64(elem, &intval);
479 if (!error &&
9ae529ec
CS
480 (intval < spa_version(spa) ||
481 intval > SPA_VERSION_BEFORE_FEATURES ||
482 has_feature))
a08ee875 483 error = SET_ERROR(EINVAL);
34dc7c2f
BB
484 break;
485
486 case ZPOOL_PROP_DELEGATION:
487 case ZPOOL_PROP_AUTOREPLACE:
b128c09f 488 case ZPOOL_PROP_LISTSNAPS:
9babb374 489 case ZPOOL_PROP_AUTOEXPAND:
34dc7c2f
BB
490 error = nvpair_value_uint64(elem, &intval);
491 if (!error && intval > 1)
a08ee875 492 error = SET_ERROR(EINVAL);
34dc7c2f
BB
493 break;
494
cae5b340
AX
495 case ZPOOL_PROP_MULTIHOST:
496 error = nvpair_value_uint64(elem, &intval);
497 if (!error && intval > 1)
498 error = SET_ERROR(EINVAL);
499
500 if (!error && !spa_get_hostid())
501 error = SET_ERROR(ENOTSUP);
502
503 break;
504
34dc7c2f 505 case ZPOOL_PROP_BOOTFS:
9babb374
BB
506 /*
507 * If the pool version is less than SPA_VERSION_BOOTFS,
508 * or the pool is still being created (version == 0),
509 * the bootfs property cannot be set.
510 */
34dc7c2f 511 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
a08ee875 512 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
513 break;
514 }
515
516 /*
b128c09f 517 * Make sure the vdev config is bootable
34dc7c2f 518 */
b128c09f 519 if (!vdev_is_bootable(spa->spa_root_vdev)) {
a08ee875 520 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
521 break;
522 }
523
524 reset_bootfs = 1;
525
526 error = nvpair_value_string(elem, &strval);
527
528 if (!error) {
9ae529ec 529 objset_t *os;
e10b0808 530 uint64_t propval;
b128c09f 531
34dc7c2f
BB
532 if (strval == NULL || strval[0] == '\0') {
533 objnum = zpool_prop_default_numeric(
534 ZPOOL_PROP_BOOTFS);
535 break;
536 }
537
a08ee875
LG
538 error = dmu_objset_hold(strval, FTAG, &os);
539 if (error)
34dc7c2f 540 break;
b128c09f 541
e10b0808
AX
542 /*
543 * Must be ZPL, and its property settings
544 * must be supported by GRUB (compression
cae5b340
AX
545 * is not gzip, and large blocks or large
546 * dnodes are not used).
e10b0808 547 */
428870ff
BB
548
549 if (dmu_objset_type(os) != DMU_OST_ZFS) {
a08ee875
LG
550 error = SET_ERROR(ENOTSUP);
551 } else if ((error =
552 dsl_prop_get_int_ds(dmu_objset_ds(os),
b128c09f 553 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
e10b0808
AX
554 &propval)) == 0 &&
555 !BOOTFS_COMPRESS_VALID(propval)) {
556 error = SET_ERROR(ENOTSUP);
557 } else if ((error =
558 dsl_prop_get_int_ds(dmu_objset_ds(os),
cae5b340 559 zfs_prop_to_name(ZFS_PROP_DNODESIZE),
e10b0808 560 &propval)) == 0 &&
cae5b340 561 propval != ZFS_DNSIZE_LEGACY) {
a08ee875 562 error = SET_ERROR(ENOTSUP);
b128c09f
BB
563 } else {
564 objnum = dmu_objset_id(os);
565 }
428870ff 566 dmu_objset_rele(os, FTAG);
34dc7c2f
BB
567 }
568 break;
b128c09f 569
34dc7c2f
BB
570 case ZPOOL_PROP_FAILUREMODE:
571 error = nvpair_value_uint64(elem, &intval);
cae5b340 572 if (!error && intval > ZIO_FAILURE_MODE_PANIC)
a08ee875 573 error = SET_ERROR(EINVAL);
34dc7c2f
BB
574
575 /*
576 * This is a special case which only occurs when
577 * the pool has completely failed. This allows
578 * the user to change the in-core failmode property
579 * without syncing it out to disk (I/Os might
580 * currently be blocked). We do this by returning
581 * EIO to the caller (spa_prop_set) to trick it
582 * into thinking we encountered a property validation
583 * error.
584 */
b128c09f 585 if (!error && spa_suspended(spa)) {
34dc7c2f 586 spa->spa_failmode = intval;
a08ee875 587 error = SET_ERROR(EIO);
34dc7c2f
BB
588 }
589 break;
590
591 case ZPOOL_PROP_CACHEFILE:
592 if ((error = nvpair_value_string(elem, &strval)) != 0)
593 break;
594
595 if (strval[0] == '\0')
596 break;
597
598 if (strcmp(strval, "none") == 0)
599 break;
600
601 if (strval[0] != '/') {
a08ee875 602 error = SET_ERROR(EINVAL);
34dc7c2f
BB
603 break;
604 }
605
606 slash = strrchr(strval, '/');
607 ASSERT(slash != NULL);
608
609 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
610 strcmp(slash, "/..") == 0)
a08ee875 611 error = SET_ERROR(EINVAL);
34dc7c2f 612 break;
428870ff 613
d96eb2b1
DM
614 case ZPOOL_PROP_COMMENT:
615 if ((error = nvpair_value_string(elem, &strval)) != 0)
616 break;
617 for (check = strval; *check != '\0'; check++) {
618 if (!isprint(*check)) {
a08ee875 619 error = SET_ERROR(EINVAL);
d96eb2b1
DM
620 break;
621 }
d96eb2b1
DM
622 }
623 if (strlen(strval) > ZPROP_MAX_COMMENT)
a08ee875 624 error = SET_ERROR(E2BIG);
d96eb2b1
DM
625 break;
626
428870ff
BB
627 case ZPOOL_PROP_DEDUPDITTO:
628 if (spa_version(spa) < SPA_VERSION_DEDUP)
a08ee875 629 error = SET_ERROR(ENOTSUP);
428870ff
BB
630 else
631 error = nvpair_value_uint64(elem, &intval);
632 if (error == 0 &&
633 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
a08ee875 634 error = SET_ERROR(EINVAL);
428870ff 635 break;
e75c13c3
BB
636
637 default:
638 break;
34dc7c2f
BB
639 }
640
641 if (error)
642 break;
643 }
644
645 if (!error && reset_bootfs) {
646 error = nvlist_remove(props,
647 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
648
649 if (!error) {
650 error = nvlist_add_uint64(props,
651 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
652 }
653 }
654
655 return (error);
656}
657
d164b209
BB
658void
659spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
660{
661 char *cachefile;
662 spa_config_dirent_t *dp;
663
664 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
665 &cachefile) != 0)
666 return;
667
668 dp = kmem_alloc(sizeof (spa_config_dirent_t),
ea04106b 669 KM_SLEEP);
d164b209
BB
670
671 if (cachefile[0] == '\0')
672 dp->scd_path = spa_strdup(spa_config_path);
673 else if (strcmp(cachefile, "none") == 0)
674 dp->scd_path = NULL;
675 else
676 dp->scd_path = spa_strdup(cachefile);
677
678 list_insert_head(&spa->spa_config_list, dp);
679 if (need_sync)
680 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
681}
682
34dc7c2f
BB
683int
684spa_prop_set(spa_t *spa, nvlist_t *nvp)
685{
686 int error;
9ae529ec 687 nvpair_t *elem = NULL;
d164b209 688 boolean_t need_sync = B_FALSE;
34dc7c2f
BB
689
690 if ((error = spa_prop_validate(spa, nvp)) != 0)
691 return (error);
692
d164b209 693 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
9ae529ec 694 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
d164b209 695
572e2857
BB
696 if (prop == ZPOOL_PROP_CACHEFILE ||
697 prop == ZPOOL_PROP_ALTROOT ||
698 prop == ZPOOL_PROP_READONLY)
d164b209
BB
699 continue;
700
9ae529ec
CS
701 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
702 uint64_t ver;
703
704 if (prop == ZPOOL_PROP_VERSION) {
705 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
706 } else {
707 ASSERT(zpool_prop_feature(nvpair_name(elem)));
708 ver = SPA_VERSION_FEATURES;
709 need_sync = B_TRUE;
710 }
711
712 /* Save time if the version is already set. */
713 if (ver == spa_version(spa))
714 continue;
715
716 /*
717 * In addition to the pool directory object, we might
718 * create the pool properties object, the features for
719 * read object, the features for write object, or the
720 * feature descriptions object.
721 */
a08ee875 722 error = dsl_sync_task(spa->spa_name, NULL,
e10b0808
AX
723 spa_sync_version, &ver,
724 6, ZFS_SPACE_CHECK_RESERVED);
9ae529ec
CS
725 if (error)
726 return (error);
727 continue;
728 }
729
d164b209
BB
730 need_sync = B_TRUE;
731 break;
732 }
733
9ae529ec 734 if (need_sync) {
a08ee875 735 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
e10b0808 736 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
9ae529ec
CS
737 }
738
739 return (0);
34dc7c2f
BB
740}
741
742/*
743 * If the bootfs property value is dsobj, clear it.
744 */
745void
746spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
747{
748 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
749 VERIFY(zap_remove(spa->spa_meta_objset,
750 spa->spa_pool_props_object,
751 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
752 spa->spa_bootfs = 0;
753 }
754}
755
3bc7e0fb
GW
756/*ARGSUSED*/
757static int
a08ee875 758spa_change_guid_check(void *arg, dmu_tx_t *tx)
3bc7e0fb 759{
a08ee875 760 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
761 vdev_t *rvd = spa->spa_root_vdev;
762 uint64_t vdev_state;
a08ee875 763 ASSERTV(uint64_t *newguid = arg);
3bc7e0fb
GW
764
765 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
766 vdev_state = rvd->vdev_state;
767 spa_config_exit(spa, SCL_STATE, FTAG);
768
769 if (vdev_state != VDEV_STATE_HEALTHY)
a08ee875 770 return (SET_ERROR(ENXIO));
3bc7e0fb
GW
771
772 ASSERT3U(spa_guid(spa), !=, *newguid);
773
774 return (0);
775}
776
777static void
a08ee875 778spa_change_guid_sync(void *arg, dmu_tx_t *tx)
3bc7e0fb 779{
a08ee875
LG
780 uint64_t *newguid = arg;
781 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
782 uint64_t oldguid;
783 vdev_t *rvd = spa->spa_root_vdev;
784
785 oldguid = spa_guid(spa);
786
787 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
788 rvd->vdev_guid = *newguid;
789 rvd->vdev_guid_sum += (*newguid - oldguid);
790 vdev_config_dirty(rvd);
791 spa_config_exit(spa, SCL_STATE, FTAG);
792
a08ee875
LG
793 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
794 oldguid, *newguid);
3bc7e0fb
GW
795}
796
3541dc6d
GA
797/*
798 * Change the GUID for the pool. This is done so that we can later
799 * re-import a pool built from a clone of our own vdevs. We will modify
800 * the root vdev's guid, our own pool guid, and then mark all of our
801 * vdevs dirty. Note that we must make sure that all our vdevs are
802 * online when we do this, or else any vdevs that weren't present
803 * would be orphaned from our pool. We are also going to issue a
804 * sysevent to update any watchers.
805 */
806int
807spa_change_guid(spa_t *spa)
808{
3bc7e0fb
GW
809 int error;
810 uint64_t guid;
3541dc6d 811
a08ee875 812 mutex_enter(&spa->spa_vdev_top_lock);
3bc7e0fb
GW
813 mutex_enter(&spa_namespace_lock);
814 guid = spa_generate_guid(NULL);
3541dc6d 815
a08ee875 816 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
e10b0808 817 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
3541dc6d 818
3bc7e0fb
GW
819 if (error == 0) {
820 spa_config_sync(spa, B_FALSE, B_TRUE);
cae5b340 821 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
3bc7e0fb 822 }
3541dc6d 823
3bc7e0fb 824 mutex_exit(&spa_namespace_lock);
a08ee875 825 mutex_exit(&spa->spa_vdev_top_lock);
3541dc6d 826
3bc7e0fb 827 return (error);
3541dc6d
GA
828}
829
34dc7c2f
BB
830/*
831 * ==========================================================================
832 * SPA state manipulation (open/create/destroy/import/export)
833 * ==========================================================================
834 */
835
836static int
837spa_error_entry_compare(const void *a, const void *b)
838{
cae5b340
AX
839 const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
840 const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
34dc7c2f
BB
841 int ret;
842
cae5b340 843 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
ea04106b 844 sizeof (zbookmark_phys_t));
34dc7c2f 845
cae5b340 846 return (AVL_ISIGN(ret));
34dc7c2f
BB
847}
848
849/*
850 * Utility function which retrieves copies of the current logs and
851 * re-initializes them in the process.
852 */
853void
854spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
855{
856 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
857
858 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
859 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
860
861 avl_create(&spa->spa_errlist_scrub,
862 spa_error_entry_compare, sizeof (spa_error_entry_t),
863 offsetof(spa_error_entry_t, se_avl));
864 avl_create(&spa->spa_errlist_last,
865 spa_error_entry_compare, sizeof (spa_error_entry_t),
866 offsetof(spa_error_entry_t, se_avl));
867}
868
c06d4368
AX
869static void
870spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
34dc7c2f 871{
c06d4368
AX
872 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
873 enum zti_modes mode = ztip->zti_mode;
874 uint_t value = ztip->zti_value;
875 uint_t count = ztip->zti_count;
876 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
877 char name[32];
68d83c55 878 uint_t i, flags = 0;
428870ff 879 boolean_t batch = B_FALSE;
34dc7c2f 880
c06d4368
AX
881 if (mode == ZTI_MODE_NULL) {
882 tqs->stqs_count = 0;
883 tqs->stqs_taskq = NULL;
884 return;
885 }
428870ff 886
c06d4368 887 ASSERT3U(count, >, 0);
428870ff 888
c06d4368
AX
889 tqs->stqs_count = count;
890 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
428870ff 891
a08ee875
LG
892 switch (mode) {
893 case ZTI_MODE_FIXED:
894 ASSERT3U(value, >=, 1);
895 value = MAX(value, 1);
68d83c55 896 flags |= TASKQ_DYNAMIC;
a08ee875 897 break;
c06d4368 898
a08ee875
LG
899 case ZTI_MODE_BATCH:
900 batch = B_TRUE;
901 flags |= TASKQ_THREADS_CPU_PCT;
94a40997 902 value = MIN(zio_taskq_batch_pct, 100);
a08ee875 903 break;
c06d4368 904
a08ee875
LG
905 default:
906 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
907 "spa_activate()",
908 zio_type_name[t], zio_taskq_types[q], mode, value);
909 break;
910 }
c06d4368 911
a08ee875
LG
912 for (i = 0; i < count; i++) {
913 taskq_t *tq;
c06d4368
AX
914
915 if (count > 1) {
916 (void) snprintf(name, sizeof (name), "%s_%s_%u",
917 zio_type_name[t], zio_taskq_types[q], i);
918 } else {
919 (void) snprintf(name, sizeof (name), "%s_%s",
920 zio_type_name[t], zio_taskq_types[q]);
921 }
922
923 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
924 if (batch)
925 flags |= TASKQ_DC_BATCH;
926
927 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
928 spa->spa_proc, zio_taskq_basedc, flags);
929 } else {
a08ee875
LG
930 pri_t pri = maxclsyspri;
931 /*
932 * The write issue taskq can be extremely CPU
e10b0808
AX
933 * intensive. Run it at slightly less important
934 * priority than the other taskqs. Under Linux this
935 * means incrementing the priority value on platforms
936 * like illumos it should be decremented.
a08ee875
LG
937 */
938 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
e10b0808 939 pri++;
a08ee875
LG
940
941 tq = taskq_create_proc(name, value, pri, 50,
c06d4368
AX
942 INT_MAX, spa->spa_proc, flags);
943 }
944
945 tqs->stqs_taskq[i] = tq;
946 }
947}
948
949static void
950spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
951{
952 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
953 uint_t i;
954
955 if (tqs->stqs_taskq == NULL) {
956 ASSERT3U(tqs->stqs_count, ==, 0);
957 return;
428870ff 958 }
34dc7c2f 959
c06d4368
AX
960 for (i = 0; i < tqs->stqs_count; i++) {
961 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
962 taskq_destroy(tqs->stqs_taskq[i]);
963 }
964
965 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
966 tqs->stqs_taskq = NULL;
967}
968
969/*
970 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
971 * Note that a type may have multiple discrete taskqs to avoid lock contention
972 * on the taskq itself. In that case we choose which taskq at random by using
973 * the low bits of gethrtime().
974 */
975void
976spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
977 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
978{
979 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
980 taskq_t *tq;
981
982 ASSERT3P(tqs->stqs_taskq, !=, NULL);
983 ASSERT3U(tqs->stqs_count, !=, 0);
984
985 if (tqs->stqs_count == 1) {
986 tq = tqs->stqs_taskq[0];
987 } else {
988 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
989 }
990
991 taskq_dispatch_ent(tq, func, arg, flags, ent);
992}
993
994/*
995 * Same as spa_taskq_dispatch_ent() but block on the task until completion.
996 */
997void
998spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
999 task_func_t *func, void *arg, uint_t flags)
1000{
1001 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1002 taskq_t *tq;
1003 taskqid_t id;
1004
1005 ASSERT3P(tqs->stqs_taskq, !=, NULL);
1006 ASSERT3U(tqs->stqs_count, !=, 0);
34dc7c2f 1007
c06d4368
AX
1008 if (tqs->stqs_count == 1) {
1009 tq = tqs->stqs_taskq[0];
1010 } else {
1011 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
428870ff 1012 }
c06d4368
AX
1013
1014 id = taskq_dispatch(tq, func, arg, flags);
1015 if (id)
1016 taskq_wait_id(tq, id);
428870ff
BB
1017}
1018
1019static void
1020spa_create_zio_taskqs(spa_t *spa)
1021{
d6320ddb
BB
1022 int t, q;
1023
1024 for (t = 0; t < ZIO_TYPES; t++) {
1025 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
c06d4368 1026 spa_taskqs_init(spa, t, q);
428870ff
BB
1027 }
1028 }
1029}
9babb374 1030
7b89a549 1031#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
428870ff
BB
1032static void
1033spa_thread(void *arg)
1034{
1035 callb_cpr_t cprinfo;
9babb374 1036
428870ff
BB
1037 spa_t *spa = arg;
1038 user_t *pu = PTOU(curproc);
9babb374 1039
428870ff
BB
1040 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1041 spa->spa_name);
9babb374 1042
428870ff
BB
1043 ASSERT(curproc != &p0);
1044 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1045 "zpool-%s", spa->spa_name);
1046 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1047
1048 /* bind this thread to the requested psrset */
1049 if (zio_taskq_psrset_bind != PS_NONE) {
1050 pool_lock();
1051 mutex_enter(&cpu_lock);
1052 mutex_enter(&pidlock);
1053 mutex_enter(&curproc->p_lock);
1054
1055 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1056 0, NULL, NULL) == 0) {
1057 curthread->t_bind_pset = zio_taskq_psrset_bind;
1058 } else {
1059 cmn_err(CE_WARN,
1060 "Couldn't bind process for zfs pool \"%s\" to "
1061 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1062 }
1063
1064 mutex_exit(&curproc->p_lock);
1065 mutex_exit(&pidlock);
1066 mutex_exit(&cpu_lock);
1067 pool_unlock();
1068 }
1069
1070 if (zio_taskq_sysdc) {
1071 sysdc_thread_enter(curthread, 100, 0);
1072 }
1073
1074 spa->spa_proc = curproc;
1075 spa->spa_did = curthread->t_did;
1076
1077 spa_create_zio_taskqs(spa);
1078
1079 mutex_enter(&spa->spa_proc_lock);
1080 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1081
1082 spa->spa_proc_state = SPA_PROC_ACTIVE;
1083 cv_broadcast(&spa->spa_proc_cv);
1084
1085 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1086 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1087 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1088 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1089
1090 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1091 spa->spa_proc_state = SPA_PROC_GONE;
1092 spa->spa_proc = &p0;
1093 cv_broadcast(&spa->spa_proc_cv);
1094 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1095
1096 mutex_enter(&curproc->p_lock);
1097 lwp_exit();
1098}
1099#endif
1100
1101/*
1102 * Activate an uninitialized pool.
1103 */
1104static void
1105spa_activate(spa_t *spa, int mode)
1106{
1107 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1108
1109 spa->spa_state = POOL_STATE_ACTIVE;
1110 spa->spa_mode = mode;
1111
1112 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1113 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1114
1115 /* Try to create a covering process */
1116 mutex_enter(&spa->spa_proc_lock);
1117 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1118 ASSERT(spa->spa_proc == &p0);
1119 spa->spa_did = 0;
1120
7b89a549 1121#ifdef HAVE_SPA_THREAD
428870ff
BB
1122 /* Only create a process if we're going to be around a while. */
1123 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1124 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1125 NULL, 0) == 0) {
1126 spa->spa_proc_state = SPA_PROC_CREATED;
1127 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1128 cv_wait(&spa->spa_proc_cv,
1129 &spa->spa_proc_lock);
9babb374 1130 }
428870ff
BB
1131 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1132 ASSERT(spa->spa_proc != &p0);
1133 ASSERT(spa->spa_did != 0);
1134 } else {
1135#ifdef _KERNEL
1136 cmn_err(CE_WARN,
1137 "Couldn't create process for zfs pool \"%s\"\n",
1138 spa->spa_name);
1139#endif
b128c09f 1140 }
34dc7c2f 1141 }
7b89a549 1142#endif /* HAVE_SPA_THREAD */
428870ff
BB
1143 mutex_exit(&spa->spa_proc_lock);
1144
1145 /* If we didn't create a process, we need to create our taskqs. */
1146 if (spa->spa_proc == &p0) {
1147 spa_create_zio_taskqs(spa);
1148 }
34dc7c2f 1149
b128c09f
BB
1150 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1151 offsetof(vdev_t, vdev_config_dirty_node));
e10b0808
AX
1152 list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1153 offsetof(objset_t, os_evicting_node));
b128c09f
BB
1154 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1155 offsetof(vdev_t, vdev_state_dirty_node));
34dc7c2f 1156
cae5b340 1157 txg_list_create(&spa->spa_vdev_txg_list, spa,
34dc7c2f
BB
1158 offsetof(struct vdev, vdev_txg_node));
1159
1160 avl_create(&spa->spa_errlist_scrub,
1161 spa_error_entry_compare, sizeof (spa_error_entry_t),
1162 offsetof(spa_error_entry_t, se_avl));
1163 avl_create(&spa->spa_errlist_last,
1164 spa_error_entry_compare, sizeof (spa_error_entry_t),
1165 offsetof(spa_error_entry_t, se_avl));
4e820b5a
AX
1166
1167 /*
1168 * This taskq is used to perform zvol-minor-related tasks
1169 * asynchronously. This has several advantages, including easy
1170 * resolution of various deadlocks (zfsonlinux bug #3681).
1171 *
1172 * The taskq must be single threaded to ensure tasks are always
1173 * processed in the order in which they were dispatched.
1174 *
1175 * A taskq per pool allows one to keep the pools independent.
1176 * This way if one pool is suspended, it will not impact another.
1177 *
1178 * The preferred location to dispatch a zvol minor task is a sync
1179 * task. In this context, there is easy access to the spa_t and minimal
1180 * error handling is required because the sync task must succeed.
1181 */
1182 spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1183 1, INT_MAX, 0);
cae5b340
AX
1184
1185 /*
1186 * The taskq to upgrade datasets in this pool. Currently used by
1187 * feature SPA_FEATURE_USEROBJ_ACCOUNTING.
1188 */
1189 spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus,
1190 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
34dc7c2f
BB
1191}
1192
1193/*
1194 * Opposite of spa_activate().
1195 */
1196static void
1197spa_deactivate(spa_t *spa)
1198{
d6320ddb
BB
1199 int t, q;
1200
34dc7c2f
BB
1201 ASSERT(spa->spa_sync_on == B_FALSE);
1202 ASSERT(spa->spa_dsl_pool == NULL);
1203 ASSERT(spa->spa_root_vdev == NULL);
9babb374 1204 ASSERT(spa->spa_async_zio_root == NULL);
34dc7c2f
BB
1205 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1206
e10b0808
AX
1207 spa_evicting_os_wait(spa);
1208
4e820b5a
AX
1209 if (spa->spa_zvol_taskq) {
1210 taskq_destroy(spa->spa_zvol_taskq);
1211 spa->spa_zvol_taskq = NULL;
1212 }
1213
cae5b340
AX
1214 if (spa->spa_upgrade_taskq) {
1215 taskq_destroy(spa->spa_upgrade_taskq);
1216 spa->spa_upgrade_taskq = NULL;
1217 }
1218
34dc7c2f
BB
1219 txg_list_destroy(&spa->spa_vdev_txg_list);
1220
b128c09f 1221 list_destroy(&spa->spa_config_dirty_list);
e10b0808 1222 list_destroy(&spa->spa_evicting_os_list);
b128c09f 1223 list_destroy(&spa->spa_state_dirty_list);
34dc7c2f 1224
cae5b340 1225 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
c06d4368 1226
d6320ddb
BB
1227 for (t = 0; t < ZIO_TYPES; t++) {
1228 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
c06d4368 1229 spa_taskqs_fini(spa, t, q);
b128c09f 1230 }
34dc7c2f
BB
1231 }
1232
1233 metaslab_class_destroy(spa->spa_normal_class);
1234 spa->spa_normal_class = NULL;
1235
1236 metaslab_class_destroy(spa->spa_log_class);
1237 spa->spa_log_class = NULL;
1238
1239 /*
1240 * If this was part of an import or the open otherwise failed, we may
1241 * still have errors left in the queues. Empty them just in case.
1242 */
1243 spa_errlog_drain(spa);
1244
1245 avl_destroy(&spa->spa_errlist_scrub);
1246 avl_destroy(&spa->spa_errlist_last);
1247
1248 spa->spa_state = POOL_STATE_UNINITIALIZED;
428870ff
BB
1249
1250 mutex_enter(&spa->spa_proc_lock);
1251 if (spa->spa_proc_state != SPA_PROC_NONE) {
1252 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1253 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1254 cv_broadcast(&spa->spa_proc_cv);
1255 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1256 ASSERT(spa->spa_proc != &p0);
1257 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1258 }
1259 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1260 spa->spa_proc_state = SPA_PROC_NONE;
1261 }
1262 ASSERT(spa->spa_proc == &p0);
1263 mutex_exit(&spa->spa_proc_lock);
1264
1265 /*
1266 * We want to make sure spa_thread() has actually exited the ZFS
1267 * module, so that the module can't be unloaded out from underneath
1268 * it.
1269 */
1270 if (spa->spa_did != 0) {
1271 thread_join(spa->spa_did);
1272 spa->spa_did = 0;
1273 }
34dc7c2f
BB
1274}
1275
1276/*
1277 * Verify a pool configuration, and construct the vdev tree appropriately. This
1278 * will create all the necessary vdevs in the appropriate layout, with each vdev
1279 * in the CLOSED state. This will prep the pool before open/creation/import.
1280 * All vdev validation is done by the vdev_alloc() routine.
1281 */
1282static int
1283spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1284 uint_t id, int atype)
1285{
1286 nvlist_t **child;
9babb374 1287 uint_t children;
34dc7c2f 1288 int error;
d6320ddb 1289 int c;
34dc7c2f
BB
1290
1291 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1292 return (error);
1293
1294 if ((*vdp)->vdev_ops->vdev_op_leaf)
1295 return (0);
1296
b128c09f
BB
1297 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1298 &child, &children);
1299
1300 if (error == ENOENT)
1301 return (0);
1302
1303 if (error) {
34dc7c2f
BB
1304 vdev_free(*vdp);
1305 *vdp = NULL;
a08ee875 1306 return (SET_ERROR(EINVAL));
34dc7c2f
BB
1307 }
1308
d6320ddb 1309 for (c = 0; c < children; c++) {
34dc7c2f
BB
1310 vdev_t *vd;
1311 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1312 atype)) != 0) {
1313 vdev_free(*vdp);
1314 *vdp = NULL;
1315 return (error);
1316 }
1317 }
1318
1319 ASSERT(*vdp != NULL);
1320
1321 return (0);
1322}
1323
1324/*
1325 * Opposite of spa_load().
1326 */
1327static void
1328spa_unload(spa_t *spa)
1329{
cae5b340 1330 int i, c;
34dc7c2f 1331
b128c09f
BB
1332 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1333
34dc7c2f
BB
1334 /*
1335 * Stop async tasks.
1336 */
1337 spa_async_suspend(spa);
1338
1339 /*
1340 * Stop syncing.
1341 */
1342 if (spa->spa_sync_on) {
1343 txg_sync_stop(spa->spa_dsl_pool);
1344 spa->spa_sync_on = B_FALSE;
1345 }
1346
cae5b340
AX
1347 /*
1348 * Even though vdev_free() also calls vdev_metaslab_fini, we need
1349 * to call it earlier, before we wait for async i/o to complete.
1350 * This ensures that there is no async metaslab prefetching, by
1351 * calling taskq_wait(mg_taskq).
1352 */
1353 if (spa->spa_root_vdev != NULL) {
1354 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1355 for (c = 0; c < spa->spa_root_vdev->vdev_children; c++)
1356 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
1357 spa_config_exit(spa, SCL_ALL, FTAG);
1358 }
1359
1360 if (spa->spa_mmp.mmp_thread)
1361 mmp_thread_stop(spa);
1362
34dc7c2f 1363 /*
b128c09f 1364 * Wait for any outstanding async I/O to complete.
34dc7c2f 1365 */
9babb374 1366 if (spa->spa_async_zio_root != NULL) {
ea04106b
AX
1367 for (i = 0; i < max_ncpus; i++)
1368 (void) zio_wait(spa->spa_async_zio_root[i]);
1369 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
9babb374
BB
1370 spa->spa_async_zio_root = NULL;
1371 }
34dc7c2f 1372
428870ff
BB
1373 bpobj_close(&spa->spa_deferred_bpobj);
1374
ea04106b
AX
1375 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1376
1377 /*
1378 * Close all vdevs.
1379 */
1380 if (spa->spa_root_vdev)
1381 vdev_free(spa->spa_root_vdev);
1382 ASSERT(spa->spa_root_vdev == NULL);
1383
34dc7c2f
BB
1384 /*
1385 * Close the dsl pool.
1386 */
1387 if (spa->spa_dsl_pool) {
1388 dsl_pool_close(spa->spa_dsl_pool);
1389 spa->spa_dsl_pool = NULL;
428870ff 1390 spa->spa_meta_objset = NULL;
34dc7c2f
BB
1391 }
1392
428870ff
BB
1393 ddt_unload(spa);
1394
fb5f0bc8
BB
1395 /*
1396 * Drop and purge level 2 cache
1397 */
1398 spa_l2cache_drop(spa);
1399
34dc7c2f
BB
1400 for (i = 0; i < spa->spa_spares.sav_count; i++)
1401 vdev_free(spa->spa_spares.sav_vdevs[i]);
1402 if (spa->spa_spares.sav_vdevs) {
1403 kmem_free(spa->spa_spares.sav_vdevs,
1404 spa->spa_spares.sav_count * sizeof (void *));
1405 spa->spa_spares.sav_vdevs = NULL;
1406 }
1407 if (spa->spa_spares.sav_config) {
1408 nvlist_free(spa->spa_spares.sav_config);
1409 spa->spa_spares.sav_config = NULL;
1410 }
b128c09f 1411 spa->spa_spares.sav_count = 0;
34dc7c2f 1412
5ffb9d1d
GW
1413 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1414 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
34dc7c2f 1415 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
5ffb9d1d 1416 }
34dc7c2f
BB
1417 if (spa->spa_l2cache.sav_vdevs) {
1418 kmem_free(spa->spa_l2cache.sav_vdevs,
1419 spa->spa_l2cache.sav_count * sizeof (void *));
1420 spa->spa_l2cache.sav_vdevs = NULL;
1421 }
1422 if (spa->spa_l2cache.sav_config) {
1423 nvlist_free(spa->spa_l2cache.sav_config);
1424 spa->spa_l2cache.sav_config = NULL;
1425 }
b128c09f 1426 spa->spa_l2cache.sav_count = 0;
34dc7c2f
BB
1427
1428 spa->spa_async_suspended = 0;
fb5f0bc8 1429
d96eb2b1
DM
1430 if (spa->spa_comment != NULL) {
1431 spa_strfree(spa->spa_comment);
1432 spa->spa_comment = NULL;
1433 }
1434
fb5f0bc8 1435 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
1436}
1437
1438/*
1439 * Load (or re-load) the current list of vdevs describing the active spares for
1440 * this pool. When this is called, we have some form of basic information in
1441 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1442 * then re-generate a more complete list including status information.
1443 */
1444static void
1445spa_load_spares(spa_t *spa)
1446{
1447 nvlist_t **spares;
1448 uint_t nspares;
1449 int i;
1450 vdev_t *vd, *tvd;
1451
b128c09f
BB
1452 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1453
34dc7c2f
BB
1454 /*
1455 * First, close and free any existing spare vdevs.
1456 */
1457 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1458 vd = spa->spa_spares.sav_vdevs[i];
1459
1460 /* Undo the call to spa_activate() below */
b128c09f
BB
1461 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1462 B_FALSE)) != NULL && tvd->vdev_isspare)
34dc7c2f
BB
1463 spa_spare_remove(tvd);
1464 vdev_close(vd);
1465 vdev_free(vd);
1466 }
1467
1468 if (spa->spa_spares.sav_vdevs)
1469 kmem_free(spa->spa_spares.sav_vdevs,
1470 spa->spa_spares.sav_count * sizeof (void *));
1471
1472 if (spa->spa_spares.sav_config == NULL)
1473 nspares = 0;
1474 else
1475 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1476 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1477
1478 spa->spa_spares.sav_count = (int)nspares;
1479 spa->spa_spares.sav_vdevs = NULL;
1480
1481 if (nspares == 0)
1482 return;
1483
1484 /*
1485 * Construct the array of vdevs, opening them to get status in the
1486 * process. For each spare, there is potentially two different vdev_t
1487 * structures associated with it: one in the list of spares (used only
1488 * for basic validation purposes) and one in the active vdev
1489 * configuration (if it's spared in). During this phase we open and
1490 * validate each vdev on the spare list. If the vdev also exists in the
1491 * active configuration, then we also mark this vdev as an active spare.
1492 */
ea04106b
AX
1493 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
1494 KM_SLEEP);
34dc7c2f
BB
1495 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1496 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1497 VDEV_ALLOC_SPARE) == 0);
1498 ASSERT(vd != NULL);
1499
1500 spa->spa_spares.sav_vdevs[i] = vd;
1501
b128c09f
BB
1502 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1503 B_FALSE)) != NULL) {
34dc7c2f
BB
1504 if (!tvd->vdev_isspare)
1505 spa_spare_add(tvd);
1506
1507 /*
1508 * We only mark the spare active if we were successfully
1509 * able to load the vdev. Otherwise, importing a pool
1510 * with a bad active spare would result in strange
1511 * behavior, because multiple pool would think the spare
1512 * is actively in use.
1513 *
1514 * There is a vulnerability here to an equally bizarre
1515 * circumstance, where a dead active spare is later
1516 * brought back to life (onlined or otherwise). Given
1517 * the rarity of this scenario, and the extra complexity
1518 * it adds, we ignore the possibility.
1519 */
1520 if (!vdev_is_dead(tvd))
1521 spa_spare_activate(tvd);
1522 }
1523
b128c09f 1524 vd->vdev_top = vd;
9babb374 1525 vd->vdev_aux = &spa->spa_spares;
b128c09f 1526
34dc7c2f
BB
1527 if (vdev_open(vd) != 0)
1528 continue;
1529
34dc7c2f
BB
1530 if (vdev_validate_aux(vd) == 0)
1531 spa_spare_add(vd);
1532 }
1533
1534 /*
1535 * Recompute the stashed list of spares, with status information
1536 * this time.
1537 */
1538 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1539 DATA_TYPE_NVLIST_ARRAY) == 0);
1540
1541 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
ea04106b 1542 KM_SLEEP);
34dc7c2f
BB
1543 for (i = 0; i < spa->spa_spares.sav_count; i++)
1544 spares[i] = vdev_config_generate(spa,
428870ff 1545 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
34dc7c2f
BB
1546 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1547 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1548 for (i = 0; i < spa->spa_spares.sav_count; i++)
1549 nvlist_free(spares[i]);
1550 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1551}
1552
1553/*
1554 * Load (or re-load) the current list of vdevs describing the active l2cache for
1555 * this pool. When this is called, we have some form of basic information in
1556 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1557 * then re-generate a more complete list including status information.
1558 * Devices which are already active have their details maintained, and are
1559 * not re-opened.
1560 */
1561static void
1562spa_load_l2cache(spa_t *spa)
1563{
1564 nvlist_t **l2cache;
1565 uint_t nl2cache;
1566 int i, j, oldnvdevs;
9babb374 1567 uint64_t guid;
a08ee875 1568 vdev_t *vd, **oldvdevs, **newvdevs;
34dc7c2f
BB
1569 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1570
b128c09f
BB
1571 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1572
34dc7c2f
BB
1573 oldvdevs = sav->sav_vdevs;
1574 oldnvdevs = sav->sav_count;
1575 sav->sav_vdevs = NULL;
1576 sav->sav_count = 0;
1577
cae5b340
AX
1578 if (sav->sav_config == NULL) {
1579 nl2cache = 0;
1580 newvdevs = NULL;
1581 goto out;
1582 }
1583
1584 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1585 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1586 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1587
34dc7c2f
BB
1588 /*
1589 * Process new nvlist of vdevs.
1590 */
1591 for (i = 0; i < nl2cache; i++) {
1592 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1593 &guid) == 0);
1594
1595 newvdevs[i] = NULL;
1596 for (j = 0; j < oldnvdevs; j++) {
1597 vd = oldvdevs[j];
1598 if (vd != NULL && guid == vd->vdev_guid) {
1599 /*
1600 * Retain previous vdev for add/remove ops.
1601 */
1602 newvdevs[i] = vd;
1603 oldvdevs[j] = NULL;
1604 break;
1605 }
1606 }
1607
1608 if (newvdevs[i] == NULL) {
1609 /*
1610 * Create new vdev
1611 */
1612 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1613 VDEV_ALLOC_L2CACHE) == 0);
1614 ASSERT(vd != NULL);
1615 newvdevs[i] = vd;
1616
1617 /*
1618 * Commit this vdev as an l2cache device,
1619 * even if it fails to open.
1620 */
1621 spa_l2cache_add(vd);
1622
b128c09f
BB
1623 vd->vdev_top = vd;
1624 vd->vdev_aux = sav;
1625
1626 spa_l2cache_activate(vd);
1627
34dc7c2f
BB
1628 if (vdev_open(vd) != 0)
1629 continue;
1630
34dc7c2f
BB
1631 (void) vdev_validate_aux(vd);
1632
9babb374
BB
1633 if (!vdev_is_dead(vd))
1634 l2arc_add_vdev(spa, vd);
34dc7c2f
BB
1635 }
1636 }
1637
cae5b340
AX
1638 sav->sav_vdevs = newvdevs;
1639 sav->sav_count = (int)nl2cache;
1640
1641 /*
1642 * Recompute the stashed list of l2cache devices, with status
1643 * information this time.
1644 */
1645 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1646 DATA_TYPE_NVLIST_ARRAY) == 0);
1647
1648 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1649 for (i = 0; i < sav->sav_count; i++)
1650 l2cache[i] = vdev_config_generate(spa,
1651 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1652 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1653 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1654
1655out:
34dc7c2f
BB
1656 /*
1657 * Purge vdevs that were dropped
1658 */
1659 for (i = 0; i < oldnvdevs; i++) {
1660 uint64_t pool;
1661
1662 vd = oldvdevs[i];
1663 if (vd != NULL) {
5ffb9d1d
GW
1664 ASSERT(vd->vdev_isl2cache);
1665
fb5f0bc8
BB
1666 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1667 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 1668 l2arc_remove_vdev(vd);
5ffb9d1d
GW
1669 vdev_clear_stats(vd);
1670 vdev_free(vd);
34dc7c2f
BB
1671 }
1672 }
1673
1674 if (oldvdevs)
1675 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1676
34dc7c2f
BB
1677 for (i = 0; i < sav->sav_count; i++)
1678 nvlist_free(l2cache[i]);
1679 if (sav->sav_count)
1680 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1681}
1682
1683static int
1684load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1685{
1686 dmu_buf_t *db;
1687 char *packed = NULL;
1688 size_t nvsize = 0;
1689 int error;
1690 *value = NULL;
1691
c3275b56
BB
1692 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1693 if (error)
1694 return (error);
1695
34dc7c2f
BB
1696 nvsize = *(uint64_t *)db->db_data;
1697 dmu_buf_rele(db, FTAG);
1698
ea04106b 1699 packed = vmem_alloc(nvsize, KM_SLEEP);
9babb374
BB
1700 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1701 DMU_READ_PREFETCH);
34dc7c2f
BB
1702 if (error == 0)
1703 error = nvlist_unpack(packed, nvsize, value, 0);
ea04106b 1704 vmem_free(packed, nvsize);
34dc7c2f
BB
1705
1706 return (error);
1707}
1708
1709/*
1710 * Checks to see if the given vdev could not be opened, in which case we post a
1711 * sysevent to notify the autoreplace code that the device has been removed.
1712 */
1713static void
1714spa_check_removed(vdev_t *vd)
1715{
d6320ddb
BB
1716 int c;
1717
1718 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
1719 spa_check_removed(vd->vdev_child[c]);
1720
a08ee875
LG
1721 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1722 !vd->vdev_ishole) {
cae5b340
AX
1723 zfs_post_autoreplace(vd->vdev_spa, vd);
1724 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
1725 }
1726}
1727
1728static void
1729spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
1730{
1731 uint64_t i;
1732
1733 ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
1734
1735 vd->vdev_top_zap = mvd->vdev_top_zap;
1736 vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
1737
1738 for (i = 0; i < vd->vdev_children; i++) {
1739 spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
34dc7c2f
BB
1740 }
1741}
1742
9babb374 1743/*
572e2857 1744 * Validate the current config against the MOS config
9babb374 1745 */
572e2857
BB
1746static boolean_t
1747spa_config_valid(spa_t *spa, nvlist_t *config)
9babb374 1748{
572e2857
BB
1749 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1750 nvlist_t *nv;
d6320ddb 1751 int c, i;
572e2857
BB
1752
1753 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1754
1755 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1756 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1757
1758 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
9babb374 1759
428870ff 1760 /*
572e2857
BB
1761 * If we're doing a normal import, then build up any additional
1762 * diagnostic information about missing devices in this config.
1763 * We'll pass this up to the user for further processing.
428870ff 1764 */
572e2857
BB
1765 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1766 nvlist_t **child, *nv;
1767 uint64_t idx = 0;
1768
cae5b340 1769 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
ea04106b
AX
1770 KM_SLEEP);
1771 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
572e2857 1772
d6320ddb 1773 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1774 vdev_t *tvd = rvd->vdev_child[c];
1775 vdev_t *mtvd = mrvd->vdev_child[c];
1776
1777 if (tvd->vdev_ops == &vdev_missing_ops &&
1778 mtvd->vdev_ops != &vdev_missing_ops &&
1779 mtvd->vdev_islog)
1780 child[idx++] = vdev_config_generate(spa, mtvd,
1781 B_FALSE, 0);
1782 }
9babb374 1783
572e2857
BB
1784 if (idx) {
1785 VERIFY(nvlist_add_nvlist_array(nv,
1786 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1787 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1788 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1789
d6320ddb 1790 for (i = 0; i < idx; i++)
572e2857
BB
1791 nvlist_free(child[i]);
1792 }
1793 nvlist_free(nv);
1794 kmem_free(child, rvd->vdev_children * sizeof (char **));
1795 }
1796
1797 /*
1798 * Compare the root vdev tree with the information we have
1799 * from the MOS config (mrvd). Check each top-level vdev
1800 * with the corresponding MOS config top-level (mtvd).
1801 */
d6320ddb 1802 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1803 vdev_t *tvd = rvd->vdev_child[c];
1804 vdev_t *mtvd = mrvd->vdev_child[c];
1805
1806 /*
1807 * Resolve any "missing" vdevs in the current configuration.
1808 * If we find that the MOS config has more accurate information
1809 * about the top-level vdev then use that vdev instead.
1810 */
1811 if (tvd->vdev_ops == &vdev_missing_ops &&
1812 mtvd->vdev_ops != &vdev_missing_ops) {
1813
1814 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1815 continue;
1816
1817 /*
1818 * Device specific actions.
1819 */
1820 if (mtvd->vdev_islog) {
1821 spa_set_log_state(spa, SPA_LOG_CLEAR);
1822 } else {
1823 /*
1824 * XXX - once we have 'readonly' pool
1825 * support we should be able to handle
1826 * missing data devices by transitioning
1827 * the pool to readonly.
1828 */
1829 continue;
1830 }
1831
1832 /*
1833 * Swap the missing vdev with the data we were
1834 * able to obtain from the MOS config.
1835 */
1836 vdev_remove_child(rvd, tvd);
1837 vdev_remove_child(mrvd, mtvd);
1838
1839 vdev_add_child(rvd, mtvd);
1840 vdev_add_child(mrvd, tvd);
1841
1842 spa_config_exit(spa, SCL_ALL, FTAG);
1843 vdev_load(mtvd);
1844 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1845
1846 vdev_reopen(rvd);
cae5b340
AX
1847 } else {
1848 if (mtvd->vdev_islog) {
1849 /*
1850 * Load the slog device's state from the MOS
1851 * config since it's possible that the label
1852 * does not contain the most up-to-date
1853 * information.
1854 */
1855 vdev_load_log_state(tvd, mtvd);
1856 vdev_reopen(tvd);
1857 }
1858
572e2857 1859 /*
cae5b340 1860 * Per-vdev ZAP info is stored exclusively in the MOS.
572e2857 1861 */
cae5b340 1862 spa_config_valid_zaps(tvd, mtvd);
572e2857 1863 }
9babb374 1864 }
cae5b340 1865
572e2857 1866 vdev_free(mrvd);
428870ff 1867 spa_config_exit(spa, SCL_ALL, FTAG);
572e2857
BB
1868
1869 /*
1870 * Ensure we were able to validate the config.
1871 */
1872 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
9babb374
BB
1873}
1874
b128c09f
BB
1875/*
1876 * Check for missing log devices
1877 */
a08ee875 1878static boolean_t
b128c09f
BB
1879spa_check_logs(spa_t *spa)
1880{
a08ee875 1881 boolean_t rv = B_FALSE;
e10b0808 1882 dsl_pool_t *dp = spa_get_dsl(spa);
a08ee875 1883
b128c09f 1884 switch (spa->spa_log_state) {
e75c13c3
BB
1885 default:
1886 break;
b128c09f
BB
1887 case SPA_LOG_MISSING:
1888 /* need to recheck in case slog has been restored */
1889 case SPA_LOG_UNKNOWN:
e10b0808
AX
1890 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1891 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
a08ee875 1892 if (rv)
428870ff 1893 spa_set_log_state(spa, SPA_LOG_MISSING);
b128c09f 1894 break;
b128c09f 1895 }
a08ee875 1896 return (rv);
b128c09f
BB
1897}
1898
428870ff
BB
1899static boolean_t
1900spa_passivate_log(spa_t *spa)
34dc7c2f 1901{
428870ff
BB
1902 vdev_t *rvd = spa->spa_root_vdev;
1903 boolean_t slog_found = B_FALSE;
d6320ddb 1904 int c;
b128c09f 1905
428870ff 1906 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
fb5f0bc8 1907
428870ff
BB
1908 if (!spa_has_slogs(spa))
1909 return (B_FALSE);
34dc7c2f 1910
d6320ddb 1911 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1912 vdev_t *tvd = rvd->vdev_child[c];
1913 metaslab_group_t *mg = tvd->vdev_mg;
34dc7c2f 1914
428870ff
BB
1915 if (tvd->vdev_islog) {
1916 metaslab_group_passivate(mg);
1917 slog_found = B_TRUE;
1918 }
34dc7c2f
BB
1919 }
1920
428870ff
BB
1921 return (slog_found);
1922}
34dc7c2f 1923
428870ff
BB
1924static void
1925spa_activate_log(spa_t *spa)
1926{
1927 vdev_t *rvd = spa->spa_root_vdev;
d6320ddb 1928 int c;
34dc7c2f 1929
428870ff
BB
1930 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1931
d6320ddb 1932 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1933 vdev_t *tvd = rvd->vdev_child[c];
1934 metaslab_group_t *mg = tvd->vdev_mg;
1935
1936 if (tvd->vdev_islog)
1937 metaslab_group_activate(mg);
34dc7c2f 1938 }
428870ff 1939}
34dc7c2f 1940
428870ff
BB
1941int
1942spa_offline_log(spa_t *spa)
1943{
a08ee875 1944 int error;
9babb374 1945
a08ee875
LG
1946 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1947 NULL, DS_FIND_CHILDREN);
1948 if (error == 0) {
428870ff
BB
1949 /*
1950 * We successfully offlined the log device, sync out the
1951 * current txg so that the "stubby" block can be removed
1952 * by zil_sync().
1953 */
1954 txg_wait_synced(spa->spa_dsl_pool, 0);
1955 }
1956 return (error);
1957}
34dc7c2f 1958
428870ff
BB
1959static void
1960spa_aux_check_removed(spa_aux_vdev_t *sav)
1961{
d6320ddb
BB
1962 int i;
1963
1964 for (i = 0; i < sav->sav_count; i++)
428870ff
BB
1965 spa_check_removed(sav->sav_vdevs[i]);
1966}
34dc7c2f 1967
428870ff
BB
1968void
1969spa_claim_notify(zio_t *zio)
1970{
1971 spa_t *spa = zio->io_spa;
34dc7c2f 1972
428870ff
BB
1973 if (zio->io_error)
1974 return;
34dc7c2f 1975
428870ff
BB
1976 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1977 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1978 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1979 mutex_exit(&spa->spa_props_lock);
1980}
34dc7c2f 1981
428870ff
BB
1982typedef struct spa_load_error {
1983 uint64_t sle_meta_count;
1984 uint64_t sle_data_count;
1985} spa_load_error_t;
34dc7c2f 1986
428870ff
BB
1987static void
1988spa_load_verify_done(zio_t *zio)
1989{
1990 blkptr_t *bp = zio->io_bp;
1991 spa_load_error_t *sle = zio->io_private;
1992 dmu_object_type_t type = BP_GET_TYPE(bp);
1993 int error = zio->io_error;
ea04106b 1994 spa_t *spa = zio->io_spa;
34dc7c2f 1995
cae5b340 1996 abd_free(zio->io_abd);
428870ff 1997 if (error) {
9ae529ec 1998 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
428870ff 1999 type != DMU_OT_INTENT_LOG)
cae5b340 2000 atomic_inc_64(&sle->sle_meta_count);
428870ff 2001 else
cae5b340 2002 atomic_inc_64(&sle->sle_data_count);
34dc7c2f 2003 }
ea04106b
AX
2004
2005 mutex_enter(&spa->spa_scrub_lock);
2006 spa->spa_scrub_inflight--;
2007 cv_broadcast(&spa->spa_scrub_io_cv);
2008 mutex_exit(&spa->spa_scrub_lock);
428870ff 2009}
34dc7c2f 2010
ea04106b
AX
2011/*
2012 * Maximum number of concurrent scrub i/os to create while verifying
2013 * a pool while importing it.
2014 */
2015int spa_load_verify_maxinflight = 10000;
2016int spa_load_verify_metadata = B_TRUE;
2017int spa_load_verify_data = B_TRUE;
2018
428870ff
BB
2019/*ARGSUSED*/
2020static int
2021spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
ea04106b 2022 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
428870ff 2023{
ea04106b
AX
2024 zio_t *rio;
2025 size_t size;
34dc7c2f 2026
cae5b340 2027 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
ea04106b
AX
2028 return (0);
2029 /*
2030 * Note: normally this routine will not be called if
2031 * spa_load_verify_metadata is not set. However, it may be useful
2032 * to manually set the flag after the traversal has begun.
2033 */
2034 if (!spa_load_verify_metadata)
2035 return (0);
cae5b340 2036 if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
ea04106b
AX
2037 return (0);
2038
2039 rio = arg;
2040 size = BP_GET_PSIZE(bp);
ea04106b
AX
2041
2042 mutex_enter(&spa->spa_scrub_lock);
2043 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
2044 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2045 spa->spa_scrub_inflight++;
2046 mutex_exit(&spa->spa_scrub_lock);
2047
cae5b340 2048 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
ea04106b
AX
2049 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2050 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2051 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
428870ff
BB
2052 return (0);
2053}
34dc7c2f 2054
87dac73d
AX
2055/* ARGSUSED */
2056int
2057verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2058{
2059 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2060 return (SET_ERROR(ENAMETOOLONG));
2061
2062 return (0);
2063}
2064
428870ff
BB
2065static int
2066spa_load_verify(spa_t *spa)
2067{
2068 zio_t *rio;
2069 spa_load_error_t sle = { 0 };
2070 zpool_rewind_policy_t policy;
2071 boolean_t verify_ok = B_FALSE;
ea04106b 2072 int error = 0;
34dc7c2f 2073
428870ff 2074 zpool_get_rewind_policy(spa->spa_config, &policy);
34dc7c2f 2075
428870ff
BB
2076 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
2077 return (0);
34dc7c2f 2078
87dac73d
AX
2079 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2080 error = dmu_objset_find_dp(spa->spa_dsl_pool,
2081 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2082 DS_FIND_CHILDREN);
2083 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2084 if (error != 0)
2085 return (error);
2086
428870ff
BB
2087 rio = zio_root(spa, NULL, &sle,
2088 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
34dc7c2f 2089
ea04106b
AX
2090 if (spa_load_verify_metadata) {
2091 error = traverse_pool(spa, spa->spa_verify_min_txg,
2092 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
2093 spa_load_verify_cb, rio);
2094 }
428870ff
BB
2095
2096 (void) zio_wait(rio);
2097
2098 spa->spa_load_meta_errors = sle.sle_meta_count;
2099 spa->spa_load_data_errors = sle.sle_data_count;
2100
2101 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
2102 sle.sle_data_count <= policy.zrp_maxdata) {
572e2857
BB
2103 int64_t loss = 0;
2104
428870ff
BB
2105 verify_ok = B_TRUE;
2106 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2107 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
572e2857
BB
2108
2109 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2110 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2111 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2112 VERIFY(nvlist_add_int64(spa->spa_load_info,
2113 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2114 VERIFY(nvlist_add_uint64(spa->spa_load_info,
2115 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
428870ff
BB
2116 } else {
2117 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2118 }
2119
2120 if (error) {
2121 if (error != ENXIO && error != EIO)
a08ee875 2122 error = SET_ERROR(EIO);
428870ff
BB
2123 return (error);
2124 }
2125
2126 return (verify_ok ? 0 : EIO);
2127}
2128
2129/*
2130 * Find a value in the pool props object.
2131 */
2132static void
2133spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2134{
2135 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2136 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2137}
2138
2139/*
2140 * Find a value in the pool directory object.
2141 */
2142static int
2143spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
2144{
2145 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2146 name, sizeof (uint64_t), 1, val));
2147}
2148
2149static int
2150spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2151{
2152 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2153 return (err);
2154}
2155
2156/*
2157 * Fix up config after a partly-completed split. This is done with the
2158 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
2159 * pool have that entry in their config, but only the splitting one contains
2160 * a list of all the guids of the vdevs that are being split off.
2161 *
2162 * This function determines what to do with that list: either rejoin
2163 * all the disks to the pool, or complete the splitting process. To attempt
2164 * the rejoin, each disk that is offlined is marked online again, and
2165 * we do a reopen() call. If the vdev label for every disk that was
2166 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2167 * then we call vdev_split() on each disk, and complete the split.
2168 *
2169 * Otherwise we leave the config alone, with all the vdevs in place in
2170 * the original pool.
2171 */
2172static void
2173spa_try_repair(spa_t *spa, nvlist_t *config)
2174{
2175 uint_t extracted;
2176 uint64_t *glist;
2177 uint_t i, gcount;
2178 nvlist_t *nvl;
2179 vdev_t **vd;
2180 boolean_t attempt_reopen;
2181
2182 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2183 return;
2184
2185 /* check that the config is complete */
2186 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2187 &glist, &gcount) != 0)
2188 return;
2189
ea04106b 2190 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
428870ff
BB
2191
2192 /* attempt to online all the vdevs & validate */
2193 attempt_reopen = B_TRUE;
2194 for (i = 0; i < gcount; i++) {
2195 if (glist[i] == 0) /* vdev is hole */
2196 continue;
2197
2198 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2199 if (vd[i] == NULL) {
2200 /*
2201 * Don't bother attempting to reopen the disks;
2202 * just do the split.
2203 */
2204 attempt_reopen = B_FALSE;
2205 } else {
2206 /* attempt to re-online it */
2207 vd[i]->vdev_offline = B_FALSE;
2208 }
2209 }
2210
2211 if (attempt_reopen) {
2212 vdev_reopen(spa->spa_root_vdev);
2213
2214 /* check each device to see what state it's in */
2215 for (extracted = 0, i = 0; i < gcount; i++) {
2216 if (vd[i] != NULL &&
2217 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2218 break;
2219 ++extracted;
2220 }
2221 }
2222
2223 /*
2224 * If every disk has been moved to the new pool, or if we never
2225 * even attempted to look at them, then we split them off for
2226 * good.
2227 */
2228 if (!attempt_reopen || gcount == extracted) {
2229 for (i = 0; i < gcount; i++)
2230 if (vd[i] != NULL)
2231 vdev_split(vd[i]);
2232 vdev_reopen(spa->spa_root_vdev);
2233 }
2234
2235 kmem_free(vd, gcount * sizeof (vdev_t *));
2236}
2237
2238static int
2239spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2240 boolean_t mosconfig)
2241{
2242 nvlist_t *config = spa->spa_config;
2243 char *ereport = FM_EREPORT_ZFS_POOL;
d96eb2b1 2244 char *comment;
428870ff
BB
2245 int error;
2246 uint64_t pool_guid;
2247 nvlist_t *nvl;
2248
2249 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
a08ee875 2250 return (SET_ERROR(EINVAL));
428870ff 2251
d96eb2b1
DM
2252 ASSERT(spa->spa_comment == NULL);
2253 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2254 spa->spa_comment = spa_strdup(comment);
2255
428870ff
BB
2256 /*
2257 * Versioning wasn't explicitly added to the label until later, so if
2258 * it's not present treat it as the initial version.
2259 */
2260 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2261 &spa->spa_ubsync.ub_version) != 0)
2262 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2263
2264 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2265 &spa->spa_config_txg);
2266
2267 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2268 spa_guid_exists(pool_guid, 0)) {
a08ee875 2269 error = SET_ERROR(EEXIST);
428870ff 2270 } else {
3541dc6d 2271 spa->spa_config_guid = pool_guid;
428870ff
BB
2272
2273 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2274 &nvl) == 0) {
2275 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
ea04106b 2276 KM_SLEEP) == 0);
428870ff
BB
2277 }
2278
9ae529ec
CS
2279 nvlist_free(spa->spa_load_info);
2280 spa->spa_load_info = fnvlist_alloc();
2281
572e2857 2282 gethrestime(&spa->spa_loaded_ts);
428870ff
BB
2283 error = spa_load_impl(spa, pool_guid, config, state, type,
2284 mosconfig, &ereport);
2285 }
2286
e10b0808
AX
2287 /*
2288 * Don't count references from objsets that are already closed
2289 * and are making their way through the eviction process.
2290 */
2291 spa_evicting_os_wait(spa);
428870ff 2292 spa->spa_minref = refcount_count(&spa->spa_refcount);
572e2857
BB
2293 if (error) {
2294 if (error != EEXIST) {
2295 spa->spa_loaded_ts.tv_sec = 0;
2296 spa->spa_loaded_ts.tv_nsec = 0;
2297 }
2298 if (error != EBADF) {
2299 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2300 }
2301 }
428870ff
BB
2302 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2303 spa->spa_ena = 0;
2304
2305 return (error);
2306}
2307
cae5b340
AX
2308#ifdef ZFS_DEBUG
2309/*
2310 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2311 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2312 * spa's per-vdev ZAP list.
2313 */
2314static uint64_t
2315vdev_count_verify_zaps(vdev_t *vd)
2316{
2317 spa_t *spa = vd->vdev_spa;
2318 uint64_t total = 0;
2319 uint64_t i;
2320
2321 if (vd->vdev_top_zap != 0) {
2322 total++;
2323 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2324 spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2325 }
2326 if (vd->vdev_leaf_zap != 0) {
2327 total++;
2328 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2329 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2330 }
2331
2332 for (i = 0; i < vd->vdev_children; i++) {
2333 total += vdev_count_verify_zaps(vd->vdev_child[i]);
2334 }
2335
2336 return (total);
2337}
2338#endif
2339
2340/*
2341 * Determine whether the activity check is required.
2342 */
2343static boolean_t
6b763916
AX
2344spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
2345 nvlist_t *config)
cae5b340
AX
2346{
2347 uint64_t state = 0;
2348 uint64_t hostid = 0;
2349 uint64_t tryconfig_txg = 0;
2350 uint64_t tryconfig_timestamp = 0;
2351 nvlist_t *nvinfo;
2352
2353 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
2354 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2355 (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
2356 &tryconfig_txg);
2357 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2358 &tryconfig_timestamp);
2359 }
2360
2361 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
cae5b340
AX
2362
2363 /*
2364 * Disable the MMP activity check - This is used by zdb which
2365 * is intended to be used on potentially active pools.
2366 */
2367 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
2368 return (B_FALSE);
2369
2370 /*
2371 * Skip the activity check when the MMP feature is disabled.
2372 */
2373 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
2374 return (B_FALSE);
2375 /*
2376 * If the tryconfig_* values are nonzero, they are the results of an
2377 * earlier tryimport. If they match the uberblock we just found, then
2378 * the pool has not changed and we return false so we do not test a
2379 * second time.
2380 */
2381 if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
2382 tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp)
2383 return (B_FALSE);
2384
2385 /*
2386 * Allow the activity check to be skipped when importing the pool
6b763916
AX
2387 * on the same host which last imported it. Since the hostid from
2388 * configuration may be stale use the one read from the label.
cae5b340 2389 */
6b763916
AX
2390 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
2391 hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
2392
cae5b340
AX
2393 if (hostid == spa_get_hostid())
2394 return (B_FALSE);
2395
2396 /*
2397 * Skip the activity test when the pool was cleanly exported.
2398 */
2399 if (state != POOL_STATE_ACTIVE)
2400 return (B_FALSE);
2401
2402 return (B_TRUE);
2403}
2404
2405/*
2406 * Perform the import activity check. If the user canceled the import or
2407 * we detected activity then fail.
2408 */
2409static int
2410spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
2411{
2412 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
2413 uint64_t txg = ub->ub_txg;
2414 uint64_t timestamp = ub->ub_timestamp;
2415 uint64_t import_delay = NANOSEC;
2416 hrtime_t import_expire;
2417 nvlist_t *mmp_label = NULL;
2418 vdev_t *rvd = spa->spa_root_vdev;
2419 kcondvar_t cv;
2420 kmutex_t mtx;
2421 int error = 0;
2422
2423 cv_init(&cv, NULL, CV_DEFAULT, NULL);
2424 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
2425 mutex_enter(&mtx);
2426
2427 /*
2428 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
2429 * during the earlier tryimport. If the txg recorded there is 0 then
2430 * the pool is known to be active on another host.
2431 *
2432 * Otherwise, the pool might be in use on another node. Check for
2433 * changes in the uberblocks on disk if necessary.
2434 */
2435 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
2436 nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
2437 ZPOOL_CONFIG_LOAD_INFO);
2438
2439 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
2440 fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
2441 vdev_uberblock_load(rvd, ub, &mmp_label);
2442 error = SET_ERROR(EREMOTEIO);
2443 goto out;
2444 }
2445 }
2446
2447 /*
2448 * Preferentially use the zfs_multihost_interval from the node which
2449 * last imported the pool. This value is stored in an MMP uberblock as.
2450 *
2451 * ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval
2452 */
2453 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay)
2454 import_delay = MAX(import_delay, import_intervals *
2455 ub->ub_mmp_delay * MAX(vdev_count_leaves(spa), 1));
2456
2457 /* Apply a floor using the local default values. */
2458 import_delay = MAX(import_delay, import_intervals *
2459 MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)));
2460
2461 /* Add a small random factor in case of simultaneous imports (0-25%) */
2462 import_expire = gethrtime() + import_delay +
2463 (import_delay * spa_get_random(250) / 1000);
2464
2465 while (gethrtime() < import_expire) {
2466 vdev_uberblock_load(rvd, ub, &mmp_label);
2467
2468 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) {
2469 error = SET_ERROR(EREMOTEIO);
2470 break;
2471 }
2472
2473 if (mmp_label) {
2474 nvlist_free(mmp_label);
2475 mmp_label = NULL;
2476 }
2477
2478 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
2479 if (error != -1) {
2480 error = SET_ERROR(EINTR);
2481 break;
2482 }
2483 error = 0;
2484 }
2485
2486out:
2487 mutex_exit(&mtx);
2488 mutex_destroy(&mtx);
2489 cv_destroy(&cv);
2490
2491 /*
2492 * If the pool is determined to be active store the status in the
2493 * spa->spa_load_info nvlist. If the remote hostname or hostid are
2494 * available from configuration read from disk store them as well.
2495 * This allows 'zpool import' to generate a more useful message.
2496 *
2497 * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
2498 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
2499 * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
2500 */
2501 if (error == EREMOTEIO) {
2502 char *hostname = "<unknown>";
2503 uint64_t hostid = 0;
2504
2505 if (mmp_label) {
2506 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
2507 hostname = fnvlist_lookup_string(mmp_label,
2508 ZPOOL_CONFIG_HOSTNAME);
2509 fnvlist_add_string(spa->spa_load_info,
2510 ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
2511 }
2512
2513 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
2514 hostid = fnvlist_lookup_uint64(mmp_label,
2515 ZPOOL_CONFIG_HOSTID);
2516 fnvlist_add_uint64(spa->spa_load_info,
2517 ZPOOL_CONFIG_MMP_HOSTID, hostid);
2518 }
2519 }
2520
2521 fnvlist_add_uint64(spa->spa_load_info,
2522 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
2523 fnvlist_add_uint64(spa->spa_load_info,
2524 ZPOOL_CONFIG_MMP_TXG, 0);
2525
2526 error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
2527 }
2528
2529 if (mmp_label)
2530 nvlist_free(mmp_label);
2531
2532 return (error);
2533}
2534
428870ff
BB
2535/*
2536 * Load an existing storage pool, using the pool's builtin spa_config as a
2537 * source of configuration information.
2538 */
bf701a83
BB
2539__attribute__((always_inline))
2540static inline int
428870ff
BB
2541spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2542 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2543 char **ereport)
2544{
2545 int error = 0;
2546 nvlist_t *nvroot = NULL;
9ae529ec 2547 nvlist_t *label;
428870ff
BB
2548 vdev_t *rvd;
2549 uberblock_t *ub = &spa->spa_uberblock;
572e2857 2550 uint64_t children, config_cache_txg = spa->spa_config_txg;
428870ff 2551 int orig_mode = spa->spa_mode;
ea04106b 2552 int parse, i;
428870ff 2553 uint64_t obj;
9ae529ec 2554 boolean_t missing_feat_write = B_FALSE;
cae5b340
AX
2555 boolean_t activity_check = B_FALSE;
2556 nvlist_t *mos_config;
428870ff
BB
2557
2558 /*
2559 * If this is an untrusted config, access the pool in read-only mode.
2560 * This prevents things like resilvering recently removed devices.
2561 */
2562 if (!mosconfig)
2563 spa->spa_mode = FREAD;
2564
2565 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2566
2567 spa->spa_load_state = state;
2568
2569 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
a08ee875 2570 return (SET_ERROR(EINVAL));
428870ff
BB
2571
2572 parse = (type == SPA_IMPORT_EXISTING ?
2573 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2574
2575 /*
2576 * Create "The Godfather" zio to hold all async IOs
2577 */
ea04106b
AX
2578 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2579 KM_SLEEP);
2580 for (i = 0; i < max_ncpus; i++) {
2581 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2582 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2583 ZIO_FLAG_GODFATHER);
2584 }
428870ff
BB
2585
2586 /*
2587 * Parse the configuration into a vdev tree. We explicitly set the
2588 * value that will be returned by spa_version() since parsing the
2589 * configuration requires knowing the version number.
2590 */
2591 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2592 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2593 spa_config_exit(spa, SCL_ALL, FTAG);
2594
2595 if (error != 0)
2596 return (error);
2597
2598 ASSERT(spa->spa_root_vdev == rvd);
e10b0808
AX
2599 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2600 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
428870ff
BB
2601
2602 if (type != SPA_IMPORT_ASSEMBLE) {
2603 ASSERT(spa_guid(spa) == pool_guid);
2604 }
2605
2606 /*
2607 * Try to open all vdevs, loading each label in the process.
2608 */
2609 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2610 error = vdev_open(rvd);
2611 spa_config_exit(spa, SCL_ALL, FTAG);
2612 if (error != 0)
2613 return (error);
2614
2615 /*
2616 * We need to validate the vdev labels against the configuration that
2617 * we have in hand, which is dependent on the setting of mosconfig. If
2618 * mosconfig is true then we're validating the vdev labels based on
2619 * that config. Otherwise, we're validating against the cached config
2620 * (zpool.cache) that was read when we loaded the zfs module, and then
2621 * later we will recursively call spa_load() and validate against
2622 * the vdev config.
2623 *
2624 * If we're assembling a new pool that's been split off from an
2625 * existing pool, the labels haven't yet been updated so we skip
2626 * validation for now.
2627 */
2628 if (type != SPA_IMPORT_ASSEMBLE) {
2629 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
c7f2d69d 2630 error = vdev_validate(rvd, mosconfig);
428870ff
BB
2631 spa_config_exit(spa, SCL_ALL, FTAG);
2632
2633 if (error != 0)
2634 return (error);
2635
2636 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
a08ee875 2637 return (SET_ERROR(ENXIO));
428870ff
BB
2638 }
2639
2640 /*
2641 * Find the best uberblock.
2642 */
9ae529ec 2643 vdev_uberblock_load(rvd, ub, &label);
428870ff
BB
2644
2645 /*
2646 * If we weren't able to find a single valid uberblock, return failure.
2647 */
9ae529ec
CS
2648 if (ub->ub_txg == 0) {
2649 nvlist_free(label);
428870ff 2650 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
9ae529ec 2651 }
428870ff 2652
cae5b340
AX
2653 /*
2654 * For pools which have the multihost property on determine if the
2655 * pool is truly inactive and can be safely imported. Prevent
2656 * hosts which don't have a hostid set from importing the pool.
2657 */
6b763916 2658 activity_check = spa_activity_check_required(spa, ub, label, config);
cae5b340
AX
2659 if (activity_check) {
2660 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
2661 spa_get_hostid() == 0) {
2662 nvlist_free(label);
2663 fnvlist_add_uint64(spa->spa_load_info,
2664 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
2665 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
2666 }
2667
2668 error = spa_activity_check(spa, ub, config);
2669 if (error) {
2670 nvlist_free(label);
2671 return (error);
2672 }
2673
2674 fnvlist_add_uint64(spa->spa_load_info,
2675 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
2676 fnvlist_add_uint64(spa->spa_load_info,
2677 ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
2678 }
2679
428870ff 2680 /*
9ae529ec 2681 * If the pool has an unsupported version we can't open it.
428870ff 2682 */
9ae529ec
CS
2683 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2684 nvlist_free(label);
428870ff 2685 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
9ae529ec
CS
2686 }
2687
2688 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2689 nvlist_t *features;
2690
2691 /*
2692 * If we weren't able to find what's necessary for reading the
2693 * MOS in the label, return failure.
2694 */
2695 if (label == NULL || nvlist_lookup_nvlist(label,
2696 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2697 nvlist_free(label);
2698 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2699 ENXIO));
2700 }
2701
2702 /*
2703 * Update our in-core representation with the definitive values
2704 * from the label.
2705 */
2706 nvlist_free(spa->spa_label_features);
2707 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2708 }
2709
2710 nvlist_free(label);
2711
2712 /*
2713 * Look through entries in the label nvlist's features_for_read. If
2714 * there is a feature listed there which we don't understand then we
2715 * cannot open a pool.
2716 */
2717 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2718 nvlist_t *unsup_feat;
2719 nvpair_t *nvp;
2720
2721 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2722 0);
2723
2724 for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL);
2725 nvp != NULL;
2726 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2727 if (!zfeature_is_supported(nvpair_name(nvp))) {
2728 VERIFY(nvlist_add_string(unsup_feat,
2729 nvpair_name(nvp), "") == 0);
2730 }
2731 }
2732
2733 if (!nvlist_empty(unsup_feat)) {
2734 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2735 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2736 nvlist_free(unsup_feat);
2737 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2738 ENOTSUP));
2739 }
2740
2741 nvlist_free(unsup_feat);
2742 }
428870ff
BB
2743
2744 /*
2745 * If the vdev guid sum doesn't match the uberblock, we have an
572e2857
BB
2746 * incomplete configuration. We first check to see if the pool
2747 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2748 * If it is, defer the vdev_guid_sum check till later so we
2749 * can handle missing vdevs.
428870ff 2750 */
572e2857
BB
2751 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2752 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
428870ff
BB
2753 rvd->vdev_guid_sum != ub->ub_guid_sum)
2754 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2755
2756 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2757 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2758 spa_try_repair(spa, config);
2759 spa_config_exit(spa, SCL_ALL, FTAG);
2760 nvlist_free(spa->spa_config_splitting);
2761 spa->spa_config_splitting = NULL;
2762 }
2763
2764 /*
2765 * Initialize internal SPA structures.
2766 */
2767 spa->spa_state = POOL_STATE_ACTIVE;
2768 spa->spa_ubsync = spa->spa_uberblock;
2769 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2770 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2771 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2772 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2773 spa->spa_claim_max_txg = spa->spa_first_txg;
2774 spa->spa_prev_software_version = ub->ub_software_version;
2775
9ae529ec 2776 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
428870ff
BB
2777 if (error)
2778 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2779 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2780
2781 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2782 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2783
9ae529ec
CS
2784 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2785 boolean_t missing_feat_read = B_FALSE;
b9b24bb4 2786 nvlist_t *unsup_feat, *enabled_feat;
ea04106b 2787 spa_feature_t i;
9ae529ec
CS
2788
2789 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2790 &spa->spa_feat_for_read_obj) != 0) {
2791 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2792 }
2793
2794 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2795 &spa->spa_feat_for_write_obj) != 0) {
2796 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2797 }
2798
2799 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2800 &spa->spa_feat_desc_obj) != 0) {
2801 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2802 }
2803
b9b24bb4
CS
2804 enabled_feat = fnvlist_alloc();
2805 unsup_feat = fnvlist_alloc();
9ae529ec 2806
ea04106b 2807 if (!spa_features_check(spa, B_FALSE,
b9b24bb4 2808 unsup_feat, enabled_feat))
9ae529ec
CS
2809 missing_feat_read = B_TRUE;
2810
2811 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
ea04106b 2812 if (!spa_features_check(spa, B_TRUE,
b9b24bb4 2813 unsup_feat, enabled_feat)) {
9ae529ec 2814 missing_feat_write = B_TRUE;
b9b24bb4 2815 }
9ae529ec
CS
2816 }
2817
b9b24bb4
CS
2818 fnvlist_add_nvlist(spa->spa_load_info,
2819 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2820
9ae529ec 2821 if (!nvlist_empty(unsup_feat)) {
b9b24bb4
CS
2822 fnvlist_add_nvlist(spa->spa_load_info,
2823 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
9ae529ec
CS
2824 }
2825
b9b24bb4
CS
2826 fnvlist_free(enabled_feat);
2827 fnvlist_free(unsup_feat);
9ae529ec
CS
2828
2829 if (!missing_feat_read) {
2830 fnvlist_add_boolean(spa->spa_load_info,
2831 ZPOOL_CONFIG_CAN_RDONLY);
2832 }
2833
2834 /*
2835 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2836 * twofold: to determine whether the pool is available for
2837 * import in read-write mode and (if it is not) whether the
2838 * pool is available for import in read-only mode. If the pool
2839 * is available for import in read-write mode, it is displayed
2840 * as available in userland; if it is not available for import
2841 * in read-only mode, it is displayed as unavailable in
2842 * userland. If the pool is available for import in read-only
2843 * mode but not read-write mode, it is displayed as unavailable
2844 * in userland with a special note that the pool is actually
2845 * available for open in read-only mode.
2846 *
2847 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2848 * missing a feature for write, we must first determine whether
2849 * the pool can be opened read-only before returning to
2850 * userland in order to know whether to display the
2851 * abovementioned note.
2852 */
2853 if (missing_feat_read || (missing_feat_write &&
2854 spa_writeable(spa))) {
2855 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2856 ENOTSUP));
2857 }
ea04106b
AX
2858
2859 /*
2860 * Load refcounts for ZFS features from disk into an in-memory
2861 * cache during SPA initialization.
2862 */
2863 for (i = 0; i < SPA_FEATURES; i++) {
2864 uint64_t refcount;
2865
2866 error = feature_get_refcount_from_disk(spa,
2867 &spa_feature_table[i], &refcount);
2868 if (error == 0) {
2869 spa->spa_feat_refcount_cache[i] = refcount;
2870 } else if (error == ENOTSUP) {
2871 spa->spa_feat_refcount_cache[i] =
2872 SPA_FEATURE_DISABLED;
2873 } else {
2874 return (spa_vdev_err(rvd,
2875 VDEV_AUX_CORRUPT_DATA, EIO));
2876 }
2877 }
2878 }
2879
2880 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2881 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2882 &spa->spa_feat_enabled_txg_obj) != 0)
2883 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
9ae529ec
CS
2884 }
2885
2886 spa->spa_is_initializing = B_TRUE;
2887 error = dsl_pool_open(spa->spa_dsl_pool);
2888 spa->spa_is_initializing = B_FALSE;
2889 if (error != 0)
2890 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2891
428870ff
BB
2892 if (!mosconfig) {
2893 uint64_t hostid;
2894 nvlist_t *policy = NULL, *nvconfig;
2895
2896 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2897 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2898
2899 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
b128c09f 2900 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
34dc7c2f
BB
2901 char *hostname;
2902 unsigned long myhostid = 0;
2903
428870ff 2904 VERIFY(nvlist_lookup_string(nvconfig,
34dc7c2f
BB
2905 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2906
cae5b340
AX
2907 myhostid = spa_get_hostid();
2908 if (hostid && myhostid && hostid != myhostid) {
428870ff 2909 nvlist_free(nvconfig);
a08ee875 2910 return (SET_ERROR(EBADF));
34dc7c2f
BB
2911 }
2912 }
428870ff
BB
2913 if (nvlist_lookup_nvlist(spa->spa_config,
2914 ZPOOL_REWIND_POLICY, &policy) == 0)
2915 VERIFY(nvlist_add_nvlist(nvconfig,
2916 ZPOOL_REWIND_POLICY, policy) == 0);
34dc7c2f 2917
428870ff 2918 spa_config_set(spa, nvconfig);
34dc7c2f
BB
2919 spa_unload(spa);
2920 spa_deactivate(spa);
fb5f0bc8 2921 spa_activate(spa, orig_mode);
34dc7c2f 2922
428870ff 2923 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
34dc7c2f
BB
2924 }
2925
cae5b340
AX
2926 /* Grab the checksum salt from the MOS. */
2927 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2928 DMU_POOL_CHECKSUM_SALT, 1,
2929 sizeof (spa->spa_cksum_salt.zcs_bytes),
2930 spa->spa_cksum_salt.zcs_bytes);
2931 if (error == ENOENT) {
2932 /* Generate a new salt for subsequent use */
2933 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
2934 sizeof (spa->spa_cksum_salt.zcs_bytes));
2935 } else if (error != 0) {
2936 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2937 }
2938
428870ff
BB
2939 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2940 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2941 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2942 if (error != 0)
2943 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2944
2945 /*
2946 * Load the bit that tells us to use the new accounting function
2947 * (raid-z deflation). If we have an older pool, this will not
2948 * be present.
2949 */
428870ff
BB
2950 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2951 if (error != 0 && error != ENOENT)
2952 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2953
2954 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2955 &spa->spa_creation_version);
2956 if (error != 0 && error != ENOENT)
2957 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2958
2959 /*
2960 * Load the persistent error log. If we have an older pool, this will
2961 * not be present.
2962 */
428870ff
BB
2963 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2964 if (error != 0 && error != ENOENT)
2965 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2966
428870ff
BB
2967 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2968 &spa->spa_errlog_scrub);
2969 if (error != 0 && error != ENOENT)
2970 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2971
2972 /*
2973 * Load the history object. If we have an older pool, this
2974 * will not be present.
2975 */
428870ff
BB
2976 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2977 if (error != 0 && error != ENOENT)
2978 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2979
cae5b340
AX
2980 /*
2981 * Load the per-vdev ZAP map. If we have an older pool, this will not
2982 * be present; in this case, defer its creation to a later time to
2983 * avoid dirtying the MOS this early / out of sync context. See
2984 * spa_sync_config_object.
2985 */
2986
2987 /* The sentinel is only available in the MOS config. */
2988 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
2989 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2990
2991 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
2992 &spa->spa_all_vdev_zaps);
2993
2994 if (error == ENOENT) {
2995 VERIFY(!nvlist_exists(mos_config,
2996 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
2997 spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
2998 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
2999 } else if (error != 0) {
3000 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3001 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
3002 /*
3003 * An older version of ZFS overwrote the sentinel value, so
3004 * we have orphaned per-vdev ZAPs in the MOS. Defer their
3005 * destruction to later; see spa_sync_config_object.
3006 */
3007 spa->spa_avz_action = AVZ_ACTION_DESTROY;
3008 /*
3009 * We're assuming that no vdevs have had their ZAPs created
3010 * before this. Better be sure of it.
3011 */
3012 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
3013 }
3014 nvlist_free(mos_config);
3015
428870ff
BB
3016 /*
3017 * If we're assembling the pool from the split-off vdevs of
3018 * an existing pool, we don't want to attach the spares & cache
3019 * devices.
3020 */
34dc7c2f
BB
3021
3022 /*
3023 * Load any hot spares for this pool.
3024 */
428870ff
BB
3025 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
3026 if (error != 0 && error != ENOENT)
3027 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3028 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
3029 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
3030 if (load_nvlist(spa, spa->spa_spares.sav_object,
428870ff
BB
3031 &spa->spa_spares.sav_config) != 0)
3032 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 3033
b128c09f 3034 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3035 spa_load_spares(spa);
b128c09f 3036 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
3037 } else if (error == 0) {
3038 spa->spa_spares.sav_sync = B_TRUE;
34dc7c2f
BB
3039 }
3040
3041 /*
3042 * Load any level 2 ARC devices for this pool.
3043 */
428870ff 3044 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
34dc7c2f 3045 &spa->spa_l2cache.sav_object);
428870ff
BB
3046 if (error != 0 && error != ENOENT)
3047 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3048 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
3049 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
3050 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
428870ff
BB
3051 &spa->spa_l2cache.sav_config) != 0)
3052 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 3053
b128c09f 3054 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3055 spa_load_l2cache(spa);
b128c09f 3056 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
3057 } else if (error == 0) {
3058 spa->spa_l2cache.sav_sync = B_TRUE;
b128c09f
BB
3059 }
3060
34dc7c2f
BB
3061 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3062
428870ff
BB
3063 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
3064 if (error && error != ENOENT)
3065 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
3066
3067 if (error == 0) {
ea04106b 3068 uint64_t autoreplace = 0;
428870ff
BB
3069
3070 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
3071 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
3072 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
3073 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
3074 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
cae5b340 3075 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
428870ff
BB
3076 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
3077 &spa->spa_dedup_ditto);
3078
3079 spa->spa_autoreplace = (autoreplace != 0);
34dc7c2f
BB
3080 }
3081
cae5b340
AX
3082 /*
3083 * If the 'multihost' property is set, then never allow a pool to
3084 * be imported when the system hostid is zero. The exception to
3085 * this rule is zdb which is always allowed to access pools.
3086 */
3087 if (spa_multihost(spa) && spa_get_hostid() == 0 &&
3088 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
3089 fnvlist_add_uint64(spa->spa_load_info,
3090 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
3091 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
3092 }
3093
34dc7c2f
BB
3094 /*
3095 * If the 'autoreplace' property is set, then post a resource notifying
3096 * the ZFS DE that it should not issue any faults for unopenable
3097 * devices. We also iterate over the vdevs, and post a sysevent for any
3098 * unopenable vdevs so that the normal autoreplace handler can take
3099 * over.
3100 */
428870ff 3101 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
34dc7c2f 3102 spa_check_removed(spa->spa_root_vdev);
428870ff
BB
3103 /*
3104 * For the import case, this is done in spa_import(), because
3105 * at this point we're using the spare definitions from
3106 * the MOS config, not necessarily from the userland config.
3107 */
3108 if (state != SPA_LOAD_IMPORT) {
3109 spa_aux_check_removed(&spa->spa_spares);
3110 spa_aux_check_removed(&spa->spa_l2cache);
3111 }
3112 }
34dc7c2f
BB
3113
3114 /*
3115 * Load the vdev state for all toplevel vdevs.
3116 */
3117 vdev_load(rvd);
3118
3119 /*
3120 * Propagate the leaf DTLs we just loaded all the way up the tree.
3121 */
b128c09f 3122 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3123 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
b128c09f 3124 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 3125
428870ff
BB
3126 /*
3127 * Load the DDTs (dedup tables).
3128 */
3129 error = ddt_load(spa);
3130 if (error != 0)
3131 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3132
3133 spa_update_dspace(spa);
3134
428870ff 3135 /*
572e2857
BB
3136 * Validate the config, using the MOS config to fill in any
3137 * information which might be missing. If we fail to validate
3138 * the config then declare the pool unfit for use. If we're
3139 * assembling a pool from a split, the log is not transferred
3140 * over.
428870ff
BB
3141 */
3142 if (type != SPA_IMPORT_ASSEMBLE) {
3143 nvlist_t *nvconfig;
3144
3145 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
3146 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3147
572e2857
BB
3148 if (!spa_config_valid(spa, nvconfig)) {
3149 nvlist_free(nvconfig);
3150 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
3151 ENXIO));
3152 }
428870ff
BB
3153 nvlist_free(nvconfig);
3154
572e2857 3155 /*
9ae529ec 3156 * Now that we've validated the config, check the state of the
572e2857
BB
3157 * root vdev. If it can't be opened, it indicates one or
3158 * more toplevel vdevs are faulted.
3159 */
3160 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
a08ee875 3161 return (SET_ERROR(ENXIO));
572e2857 3162
e10b0808 3163 if (spa_writeable(spa) && spa_check_logs(spa)) {
428870ff
BB
3164 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
3165 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
3166 }
3167 }
3168
9ae529ec
CS
3169 if (missing_feat_write) {
3170 ASSERT(state == SPA_LOAD_TRYIMPORT);
3171
3172 /*
3173 * At this point, we know that we can open the pool in
3174 * read-only mode but not read-write mode. We now have enough
3175 * information and can return to userland.
3176 */
3177 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
3178 }
3179
572e2857
BB
3180 /*
3181 * We've successfully opened the pool, verify that we're ready
3182 * to start pushing transactions.
3183 */
3184 if (state != SPA_LOAD_TRYIMPORT) {
c65aa5b2 3185 if ((error = spa_load_verify(spa)))
572e2857
BB
3186 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3187 error));
3188 }
3189
428870ff
BB
3190 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
3191 spa->spa_load_max_txg == UINT64_MAX)) {
34dc7c2f
BB
3192 dmu_tx_t *tx;
3193 int need_update = B_FALSE;
e10b0808 3194 dsl_pool_t *dp = spa_get_dsl(spa);
d6320ddb 3195 int c;
fb5f0bc8
BB
3196
3197 ASSERT(state != SPA_LOAD_TRYIMPORT);
34dc7c2f
BB
3198
3199 /*
3200 * Claim log blocks that haven't been committed yet.
3201 * This must all happen in a single txg.
428870ff
BB
3202 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
3203 * invoked from zil_claim_log_block()'s i/o done callback.
3204 * Price of rollback is that we abandon the log.
34dc7c2f 3205 */
428870ff
BB
3206 spa->spa_claiming = B_TRUE;
3207
e10b0808
AX
3208 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
3209 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
34dc7c2f
BB
3210 zil_claim, tx, DS_FIND_CHILDREN);
3211 dmu_tx_commit(tx);
3212
428870ff
BB
3213 spa->spa_claiming = B_FALSE;
3214
3215 spa_set_log_state(spa, SPA_LOG_GOOD);
34dc7c2f
BB
3216 spa->spa_sync_on = B_TRUE;
3217 txg_sync_start(spa->spa_dsl_pool);
cae5b340 3218 mmp_thread_start(spa);
34dc7c2f
BB
3219
3220 /*
428870ff
BB
3221 * Wait for all claims to sync. We sync up to the highest
3222 * claimed log block birth time so that claimed log blocks
3223 * don't appear to be from the future. spa_claim_max_txg
3224 * will have been set for us by either zil_check_log_chain()
3225 * (invoked from spa_check_logs()) or zil_claim() above.
34dc7c2f 3226 */
428870ff 3227 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
34dc7c2f
BB
3228
3229 /*
3230 * If the config cache is stale, or we have uninitialized
3231 * metaslabs (see spa_vdev_add()), then update the config.
45d1cae3 3232 *
572e2857 3233 * If this is a verbatim import, trust the current
45d1cae3 3234 * in-core spa_config and update the disk labels.
34dc7c2f
BB
3235 */
3236 if (config_cache_txg != spa->spa_config_txg ||
572e2857
BB
3237 state == SPA_LOAD_IMPORT ||
3238 state == SPA_LOAD_RECOVER ||
3239 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
34dc7c2f
BB
3240 need_update = B_TRUE;
3241
d6320ddb 3242 for (c = 0; c < rvd->vdev_children; c++)
34dc7c2f
BB
3243 if (rvd->vdev_child[c]->vdev_ms_array == 0)
3244 need_update = B_TRUE;
3245
3246 /*
3247 * Update the config cache asychronously in case we're the
3248 * root pool, in which case the config cache isn't writable yet.
3249 */
3250 if (need_update)
3251 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
fb5f0bc8
BB
3252
3253 /*
3254 * Check all DTLs to see if anything needs resilvering.
3255 */
428870ff
BB
3256 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
3257 vdev_resilver_needed(rvd, NULL, NULL))
fb5f0bc8 3258 spa_async_request(spa, SPA_ASYNC_RESILVER);
428870ff 3259
a08ee875
LG
3260 /*
3261 * Log the fact that we booted up (so that we can detect if
3262 * we rebooted in the middle of an operation).
3263 */
41d74433 3264 spa_history_log_version(spa, "open", NULL);
a08ee875 3265
428870ff
BB
3266 /*
3267 * Delete any inconsistent datasets.
3268 */
3269 (void) dmu_objset_find(spa_name(spa),
3270 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
3271
3272 /*
3273 * Clean up any stale temporary dataset userrefs.
3274 */
3275 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
34dc7c2f
BB
3276 }
3277
428870ff
BB
3278 return (0);
3279}
34dc7c2f 3280
428870ff
BB
3281static int
3282spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
3283{
572e2857
BB
3284 int mode = spa->spa_mode;
3285
428870ff
BB
3286 spa_unload(spa);
3287 spa_deactivate(spa);
3288
ea04106b 3289 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
428870ff 3290
572e2857 3291 spa_activate(spa, mode);
428870ff
BB
3292 spa_async_suspend(spa);
3293
3294 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
3295}
3296
9ae529ec
CS
3297/*
3298 * If spa_load() fails this function will try loading prior txg's. If
3299 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
3300 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
3301 * function will not rewind the pool and will return the same error as
3302 * spa_load().
3303 */
428870ff
BB
3304static int
3305spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
3306 uint64_t max_request, int rewind_flags)
3307{
9ae529ec 3308 nvlist_t *loadinfo = NULL;
428870ff
BB
3309 nvlist_t *config = NULL;
3310 int load_error, rewind_error;
3311 uint64_t safe_rewind_txg;
3312 uint64_t min_txg;
3313
3314 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
3315 spa->spa_load_max_txg = spa->spa_load_txg;
3316 spa_set_log_state(spa, SPA_LOG_CLEAR);
3317 } else {
3318 spa->spa_load_max_txg = max_request;
ea04106b
AX
3319 if (max_request != UINT64_MAX)
3320 spa->spa_extreme_rewind = B_TRUE;
428870ff
BB
3321 }
3322
3323 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
3324 mosconfig);
3325 if (load_error == 0)
3326 return (0);
3327
3328 if (spa->spa_root_vdev != NULL)
3329 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3330
3331 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
3332 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
3333
3334 if (rewind_flags & ZPOOL_NEVER_REWIND) {
3335 nvlist_free(config);
3336 return (load_error);
3337 }
3338
9ae529ec
CS
3339 if (state == SPA_LOAD_RECOVER) {
3340 /* Price of rolling back is discarding txgs, including log */
428870ff 3341 spa_set_log_state(spa, SPA_LOG_CLEAR);
9ae529ec
CS
3342 } else {
3343 /*
3344 * If we aren't rolling back save the load info from our first
3345 * import attempt so that we can restore it after attempting
3346 * to rewind.
3347 */
3348 loadinfo = spa->spa_load_info;
3349 spa->spa_load_info = fnvlist_alloc();
3350 }
428870ff
BB
3351
3352 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
3353 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
3354 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
3355 TXG_INITIAL : safe_rewind_txg;
3356
3357 /*
3358 * Continue as long as we're finding errors, we're still within
3359 * the acceptable rewind range, and we're still finding uberblocks
3360 */
3361 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
3362 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
3363 if (spa->spa_load_max_txg < safe_rewind_txg)
3364 spa->spa_extreme_rewind = B_TRUE;
3365 rewind_error = spa_load_retry(spa, state, mosconfig);
3366 }
3367
428870ff
BB
3368 spa->spa_extreme_rewind = B_FALSE;
3369 spa->spa_load_max_txg = UINT64_MAX;
3370
3371 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
3372 spa_config_set(spa, config);
cae5b340
AX
3373 else
3374 nvlist_free(config);
428870ff 3375
9ae529ec
CS
3376 if (state == SPA_LOAD_RECOVER) {
3377 ASSERT3P(loadinfo, ==, NULL);
3378 return (rewind_error);
3379 } else {
3380 /* Store the rewind info as part of the initial load info */
3381 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
3382 spa->spa_load_info);
3383
3384 /* Restore the initial load info */
3385 fnvlist_free(spa->spa_load_info);
3386 spa->spa_load_info = loadinfo;
3387
3388 return (load_error);
3389 }
34dc7c2f
BB
3390}
3391
3392/*
3393 * Pool Open/Import
3394 *
3395 * The import case is identical to an open except that the configuration is sent
3396 * down from userland, instead of grabbed from the configuration cache. For the
3397 * case of an open, the pool configuration will exist in the
3398 * POOL_STATE_UNINITIALIZED state.
3399 *
3400 * The stats information (gen/count/ustats) is used to gather vdev statistics at
3401 * the same time open the pool, without having to keep around the spa_t in some
3402 * ambiguous state.
3403 */
3404static int
428870ff
BB
3405spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
3406 nvlist_t **config)
34dc7c2f
BB
3407{
3408 spa_t *spa;
572e2857 3409 spa_load_state_t state = SPA_LOAD_OPEN;
34dc7c2f 3410 int error;
34dc7c2f 3411 int locked = B_FALSE;
c06d4368 3412 int firstopen = B_FALSE;
34dc7c2f
BB
3413
3414 *spapp = NULL;
3415
3416 /*
3417 * As disgusting as this is, we need to support recursive calls to this
3418 * function because dsl_dir_open() is called during spa_load(), and ends
3419 * up calling spa_open() again. The real fix is to figure out how to
3420 * avoid dsl_dir_open() calling this in the first place.
3421 */
3422 if (mutex_owner(&spa_namespace_lock) != curthread) {
3423 mutex_enter(&spa_namespace_lock);
3424 locked = B_TRUE;
3425 }
3426
3427 if ((spa = spa_lookup(pool)) == NULL) {
3428 if (locked)
3429 mutex_exit(&spa_namespace_lock);
a08ee875 3430 return (SET_ERROR(ENOENT));
34dc7c2f 3431 }
428870ff 3432
34dc7c2f 3433 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
428870ff
BB
3434 zpool_rewind_policy_t policy;
3435
c06d4368
AX
3436 firstopen = B_TRUE;
3437
428870ff
BB
3438 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
3439 &policy);
3440 if (policy.zrp_request & ZPOOL_DO_REWIND)
3441 state = SPA_LOAD_RECOVER;
34dc7c2f 3442
fb5f0bc8 3443 spa_activate(spa, spa_mode_global);
34dc7c2f 3444
428870ff
BB
3445 if (state != SPA_LOAD_RECOVER)
3446 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3447
3448 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
3449 policy.zrp_request);
34dc7c2f
BB
3450
3451 if (error == EBADF) {
3452 /*
3453 * If vdev_validate() returns failure (indicated by
3454 * EBADF), it indicates that one of the vdevs indicates
3455 * that the pool has been exported or destroyed. If
3456 * this is the case, the config cache is out of sync and
3457 * we should remove the pool from the namespace.
3458 */
34dc7c2f
BB
3459 spa_unload(spa);
3460 spa_deactivate(spa);
b128c09f 3461 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 3462 spa_remove(spa);
34dc7c2f
BB
3463 if (locked)
3464 mutex_exit(&spa_namespace_lock);
a08ee875 3465 return (SET_ERROR(ENOENT));
34dc7c2f
BB
3466 }
3467
3468 if (error) {
3469 /*
3470 * We can't open the pool, but we still have useful
3471 * information: the state of each vdev after the
3472 * attempted vdev_open(). Return this to the user.
3473 */
572e2857 3474 if (config != NULL && spa->spa_config) {
428870ff 3475 VERIFY(nvlist_dup(spa->spa_config, config,
ea04106b 3476 KM_SLEEP) == 0);
572e2857
BB
3477 VERIFY(nvlist_add_nvlist(*config,
3478 ZPOOL_CONFIG_LOAD_INFO,
3479 spa->spa_load_info) == 0);
3480 }
34dc7c2f
BB
3481 spa_unload(spa);
3482 spa_deactivate(spa);
428870ff 3483 spa->spa_last_open_failed = error;
34dc7c2f
BB
3484 if (locked)
3485 mutex_exit(&spa_namespace_lock);
3486 *spapp = NULL;
3487 return (error);
34dc7c2f 3488 }
34dc7c2f
BB
3489 }
3490
3491 spa_open_ref(spa, tag);
3492
b128c09f 3493 if (config != NULL)
34dc7c2f 3494 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f 3495
572e2857
BB
3496 /*
3497 * If we've recovered the pool, pass back any information we
3498 * gathered while doing the load.
3499 */
3500 if (state == SPA_LOAD_RECOVER) {
3501 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3502 spa->spa_load_info) == 0);
3503 }
3504
428870ff
BB
3505 if (locked) {
3506 spa->spa_last_open_failed = 0;
3507 spa->spa_last_ubsync_txg = 0;
3508 spa->spa_load_txg = 0;
3509 mutex_exit(&spa_namespace_lock);
3510 }
3511
c06d4368 3512 if (firstopen)
4e820b5a 3513 zvol_create_minors(spa, spa_name(spa), B_TRUE);
c06d4368 3514
428870ff
BB
3515 *spapp = spa;
3516
34dc7c2f
BB
3517 return (0);
3518}
3519
428870ff
BB
3520int
3521spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3522 nvlist_t **config)
3523{
3524 return (spa_open_common(name, spapp, tag, policy, config));
3525}
3526
34dc7c2f
BB
3527int
3528spa_open(const char *name, spa_t **spapp, void *tag)
3529{
428870ff 3530 return (spa_open_common(name, spapp, tag, NULL, NULL));
34dc7c2f
BB
3531}
3532
3533/*
3534 * Lookup the given spa_t, incrementing the inject count in the process,
3535 * preventing it from being exported or destroyed.
3536 */
3537spa_t *
3538spa_inject_addref(char *name)
3539{
3540 spa_t *spa;
3541
3542 mutex_enter(&spa_namespace_lock);
3543 if ((spa = spa_lookup(name)) == NULL) {
3544 mutex_exit(&spa_namespace_lock);
3545 return (NULL);
3546 }
3547 spa->spa_inject_ref++;
3548 mutex_exit(&spa_namespace_lock);
3549
3550 return (spa);
3551}
3552
3553void
3554spa_inject_delref(spa_t *spa)
3555{
3556 mutex_enter(&spa_namespace_lock);
3557 spa->spa_inject_ref--;
3558 mutex_exit(&spa_namespace_lock);
3559}
3560
3561/*
3562 * Add spares device information to the nvlist.
3563 */
3564static void
3565spa_add_spares(spa_t *spa, nvlist_t *config)
3566{
3567 nvlist_t **spares;
3568 uint_t i, nspares;
3569 nvlist_t *nvroot;
3570 uint64_t guid;
3571 vdev_stat_t *vs;
3572 uint_t vsc;
3573 uint64_t pool;
3574
9babb374
BB
3575 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3576
34dc7c2f
BB
3577 if (spa->spa_spares.sav_count == 0)
3578 return;
3579
3580 VERIFY(nvlist_lookup_nvlist(config,
3581 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3582 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3583 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3584 if (nspares != 0) {
3585 VERIFY(nvlist_add_nvlist_array(nvroot,
3586 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3587 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3588 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3589
3590 /*
3591 * Go through and find any spares which have since been
3592 * repurposed as an active spare. If this is the case, update
3593 * their status appropriately.
3594 */
3595 for (i = 0; i < nspares; i++) {
3596 VERIFY(nvlist_lookup_uint64(spares[i],
3597 ZPOOL_CONFIG_GUID, &guid) == 0);
b128c09f
BB
3598 if (spa_spare_exists(guid, &pool, NULL) &&
3599 pool != 0ULL) {
34dc7c2f 3600 VERIFY(nvlist_lookup_uint64_array(
428870ff 3601 spares[i], ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3602 (uint64_t **)&vs, &vsc) == 0);
3603 vs->vs_state = VDEV_STATE_CANT_OPEN;
3604 vs->vs_aux = VDEV_AUX_SPARED;
3605 }
3606 }
3607 }
3608}
3609
3610/*
3611 * Add l2cache device information to the nvlist, including vdev stats.
3612 */
3613static void
3614spa_add_l2cache(spa_t *spa, nvlist_t *config)
3615{
3616 nvlist_t **l2cache;
3617 uint_t i, j, nl2cache;
3618 nvlist_t *nvroot;
3619 uint64_t guid;
3620 vdev_t *vd;
3621 vdev_stat_t *vs;
3622 uint_t vsc;
3623
9babb374
BB
3624 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3625
34dc7c2f
BB
3626 if (spa->spa_l2cache.sav_count == 0)
3627 return;
3628
34dc7c2f
BB
3629 VERIFY(nvlist_lookup_nvlist(config,
3630 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3631 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3632 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3633 if (nl2cache != 0) {
3634 VERIFY(nvlist_add_nvlist_array(nvroot,
3635 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3636 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3637 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3638
3639 /*
3640 * Update level 2 cache device stats.
3641 */
3642
3643 for (i = 0; i < nl2cache; i++) {
3644 VERIFY(nvlist_lookup_uint64(l2cache[i],
3645 ZPOOL_CONFIG_GUID, &guid) == 0);
3646
3647 vd = NULL;
3648 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3649 if (guid ==
3650 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3651 vd = spa->spa_l2cache.sav_vdevs[j];
3652 break;
3653 }
3654 }
3655 ASSERT(vd != NULL);
3656
3657 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
428870ff
BB
3658 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3659 == 0);
34dc7c2f 3660 vdev_get_stats(vd, vs);
cae5b340
AX
3661 vdev_config_generate_stats(vd, l2cache[i]);
3662
34dc7c2f
BB
3663 }
3664 }
34dc7c2f
BB
3665}
3666
9ae529ec 3667static void
ea04106b 3668spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
9ae529ec 3669{
9ae529ec
CS
3670 zap_cursor_t zc;
3671 zap_attribute_t za;
3672
9ae529ec
CS
3673 if (spa->spa_feat_for_read_obj != 0) {
3674 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3675 spa->spa_feat_for_read_obj);
3676 zap_cursor_retrieve(&zc, &za) == 0;
3677 zap_cursor_advance(&zc)) {
3678 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3679 za.za_num_integers == 1);
ea04106b 3680 VERIFY0(nvlist_add_uint64(features, za.za_name,
9ae529ec
CS
3681 za.za_first_integer));
3682 }
3683 zap_cursor_fini(&zc);
3684 }
3685
3686 if (spa->spa_feat_for_write_obj != 0) {
3687 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3688 spa->spa_feat_for_write_obj);
3689 zap_cursor_retrieve(&zc, &za) == 0;
3690 zap_cursor_advance(&zc)) {
3691 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3692 za.za_num_integers == 1);
ea04106b 3693 VERIFY0(nvlist_add_uint64(features, za.za_name,
9ae529ec
CS
3694 za.za_first_integer));
3695 }
3696 zap_cursor_fini(&zc);
3697 }
ea04106b 3698}
9ae529ec 3699
ea04106b
AX
3700static void
3701spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
3702{
3703 int i;
3704
3705 for (i = 0; i < SPA_FEATURES; i++) {
3706 zfeature_info_t feature = spa_feature_table[i];
3707 uint64_t refcount;
3708
3709 if (feature_get_refcount(spa, &feature, &refcount) != 0)
3710 continue;
3711
3712 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
3713 }
3714}
3715
3716/*
3717 * Store a list of pool features and their reference counts in the
3718 * config.
3719 *
3720 * The first time this is called on a spa, allocate a new nvlist, fetch
3721 * the pool features and reference counts from disk, then save the list
3722 * in the spa. In subsequent calls on the same spa use the saved nvlist
3723 * and refresh its values from the cached reference counts. This
3724 * ensures we don't block here on I/O on a suspended pool so 'zpool
3725 * clear' can resume the pool.
3726 */
3727static void
3728spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3729{
3730 nvlist_t *features;
3731
3732 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3733
3734 mutex_enter(&spa->spa_feat_stats_lock);
3735 features = spa->spa_feat_stats;
3736
3737 if (features != NULL) {
3738 spa_feature_stats_from_cache(spa, features);
3739 } else {
3740 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
3741 spa->spa_feat_stats = features;
3742 spa_feature_stats_from_disk(spa, features);
3743 }
3744
3745 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3746 features));
3747
3748 mutex_exit(&spa->spa_feat_stats_lock);
9ae529ec
CS
3749}
3750
34dc7c2f 3751int
9ae529ec
CS
3752spa_get_stats(const char *name, nvlist_t **config,
3753 char *altroot, size_t buflen)
34dc7c2f
BB
3754{
3755 int error;
3756 spa_t *spa;
3757
3758 *config = NULL;
428870ff 3759 error = spa_open_common(name, &spa, FTAG, NULL, config);
34dc7c2f 3760
9babb374
BB
3761 if (spa != NULL) {
3762 /*
3763 * This still leaves a window of inconsistency where the spares
3764 * or l2cache devices could change and the config would be
3765 * self-inconsistent.
3766 */
3767 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f 3768
9babb374 3769 if (*config != NULL) {
572e2857
BB
3770 uint64_t loadtimes[2];
3771
3772 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3773 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3774 VERIFY(nvlist_add_uint64_array(*config,
3775 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3776
b128c09f 3777 VERIFY(nvlist_add_uint64(*config,
9babb374
BB
3778 ZPOOL_CONFIG_ERRCOUNT,
3779 spa_get_errlog_size(spa)) == 0);
3780
3781 if (spa_suspended(spa))
3782 VERIFY(nvlist_add_uint64(*config,
3783 ZPOOL_CONFIG_SUSPENDED,
3784 spa->spa_failmode) == 0);
b128c09f 3785
9babb374
BB
3786 spa_add_spares(spa, *config);
3787 spa_add_l2cache(spa, *config);
9ae529ec 3788 spa_add_feature_stats(spa, *config);
9babb374 3789 }
34dc7c2f
BB
3790 }
3791
3792 /*
3793 * We want to get the alternate root even for faulted pools, so we cheat
3794 * and call spa_lookup() directly.
3795 */
3796 if (altroot) {
3797 if (spa == NULL) {
3798 mutex_enter(&spa_namespace_lock);
3799 spa = spa_lookup(name);
3800 if (spa)
3801 spa_altroot(spa, altroot, buflen);
3802 else
3803 altroot[0] = '\0';
3804 spa = NULL;
3805 mutex_exit(&spa_namespace_lock);
3806 } else {
3807 spa_altroot(spa, altroot, buflen);
3808 }
3809 }
3810
9babb374
BB
3811 if (spa != NULL) {
3812 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 3813 spa_close(spa, FTAG);
9babb374 3814 }
34dc7c2f
BB
3815
3816 return (error);
3817}
3818
3819/*
3820 * Validate that the auxiliary device array is well formed. We must have an
3821 * array of nvlists, each which describes a valid leaf vdev. If this is an
3822 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3823 * specified, as long as they are well-formed.
3824 */
3825static int
3826spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3827 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3828 vdev_labeltype_t label)
3829{
3830 nvlist_t **dev;
3831 uint_t i, ndev;
3832 vdev_t *vd;
3833 int error;
3834
b128c09f
BB
3835 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3836
34dc7c2f
BB
3837 /*
3838 * It's acceptable to have no devs specified.
3839 */
3840 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3841 return (0);
3842
3843 if (ndev == 0)
a08ee875 3844 return (SET_ERROR(EINVAL));
34dc7c2f
BB
3845
3846 /*
3847 * Make sure the pool is formatted with a version that supports this
3848 * device type.
3849 */
3850 if (spa_version(spa) < version)
a08ee875 3851 return (SET_ERROR(ENOTSUP));
34dc7c2f
BB
3852
3853 /*
3854 * Set the pending device list so we correctly handle device in-use
3855 * checking.
3856 */
3857 sav->sav_pending = dev;
3858 sav->sav_npending = ndev;
3859
3860 for (i = 0; i < ndev; i++) {
3861 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3862 mode)) != 0)
3863 goto out;
3864
3865 if (!vd->vdev_ops->vdev_op_leaf) {
3866 vdev_free(vd);
a08ee875 3867 error = SET_ERROR(EINVAL);
34dc7c2f
BB
3868 goto out;
3869 }
3870
34dc7c2f
BB
3871 vd->vdev_top = vd;
3872
3873 if ((error = vdev_open(vd)) == 0 &&
3874 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3875 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3876 vd->vdev_guid) == 0);
3877 }
3878
3879 vdev_free(vd);
3880
3881 if (error &&
3882 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3883 goto out;
3884 else
3885 error = 0;
3886 }
3887
3888out:
3889 sav->sav_pending = NULL;
3890 sav->sav_npending = 0;
3891 return (error);
3892}
3893
3894static int
3895spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3896{
3897 int error;
3898
b128c09f
BB
3899 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3900
34dc7c2f
BB
3901 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3902 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3903 VDEV_LABEL_SPARE)) != 0) {
3904 return (error);
3905 }
3906
3907 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3908 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3909 VDEV_LABEL_L2CACHE));
3910}
3911
3912static void
3913spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3914 const char *config)
3915{
3916 int i;
3917
3918 if (sav->sav_config != NULL) {
3919 nvlist_t **olddevs;
3920 uint_t oldndevs;
3921 nvlist_t **newdevs;
3922
3923 /*
cae5b340 3924 * Generate new dev list by concatenating with the
34dc7c2f
BB
3925 * current dev list.
3926 */
3927 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3928 &olddevs, &oldndevs) == 0);
3929
3930 newdevs = kmem_alloc(sizeof (void *) *
ea04106b 3931 (ndevs + oldndevs), KM_SLEEP);
34dc7c2f
BB
3932 for (i = 0; i < oldndevs; i++)
3933 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
ea04106b 3934 KM_SLEEP) == 0);
34dc7c2f
BB
3935 for (i = 0; i < ndevs; i++)
3936 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
ea04106b 3937 KM_SLEEP) == 0);
34dc7c2f
BB
3938
3939 VERIFY(nvlist_remove(sav->sav_config, config,
3940 DATA_TYPE_NVLIST_ARRAY) == 0);
3941
3942 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3943 config, newdevs, ndevs + oldndevs) == 0);
3944 for (i = 0; i < oldndevs + ndevs; i++)
3945 nvlist_free(newdevs[i]);
3946 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3947 } else {
3948 /*
3949 * Generate a new dev list.
3950 */
3951 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
ea04106b 3952 KM_SLEEP) == 0);
34dc7c2f
BB
3953 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3954 devs, ndevs) == 0);
3955 }
3956}
3957
3958/*
3959 * Stop and drop level 2 ARC devices
3960 */
3961void
3962spa_l2cache_drop(spa_t *spa)
3963{
3964 vdev_t *vd;
3965 int i;
3966 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3967
3968 for (i = 0; i < sav->sav_count; i++) {
3969 uint64_t pool;
3970
3971 vd = sav->sav_vdevs[i];
3972 ASSERT(vd != NULL);
3973
fb5f0bc8
BB
3974 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3975 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 3976 l2arc_remove_vdev(vd);
34dc7c2f
BB
3977 }
3978}
3979
3980/*
3981 * Pool Creation
3982 */
3983int
3984spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
a08ee875 3985 nvlist_t *zplprops)
34dc7c2f
BB
3986{
3987 spa_t *spa;
3988 char *altroot = NULL;
3989 vdev_t *rvd;
3990 dsl_pool_t *dp;
3991 dmu_tx_t *tx;
9babb374 3992 int error = 0;
34dc7c2f
BB
3993 uint64_t txg = TXG_INITIAL;
3994 nvlist_t **spares, **l2cache;
3995 uint_t nspares, nl2cache;
428870ff 3996 uint64_t version, obj;
9ae529ec
CS
3997 boolean_t has_features;
3998 nvpair_t *elem;
ea04106b
AX
3999 int c, i;
4000 char *poolname;
4001 nvlist_t *nvl;
4002
4003 if (nvlist_lookup_string(props, "tname", &poolname) != 0)
4004 poolname = (char *)pool;
34dc7c2f
BB
4005
4006 /*
4007 * If this pool already exists, return failure.
4008 */
4009 mutex_enter(&spa_namespace_lock);
ea04106b 4010 if (spa_lookup(poolname) != NULL) {
34dc7c2f 4011 mutex_exit(&spa_namespace_lock);
a08ee875 4012 return (SET_ERROR(EEXIST));
34dc7c2f
BB
4013 }
4014
4015 /*
4016 * Allocate a new spa_t structure.
4017 */
ea04106b
AX
4018 nvl = fnvlist_alloc();
4019 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
34dc7c2f
BB
4020 (void) nvlist_lookup_string(props,
4021 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
ea04106b
AX
4022 spa = spa_add(poolname, nvl, altroot);
4023 fnvlist_free(nvl);
fb5f0bc8 4024 spa_activate(spa, spa_mode_global);
34dc7c2f 4025
34dc7c2f 4026 if (props && (error = spa_prop_validate(spa, props))) {
34dc7c2f
BB
4027 spa_deactivate(spa);
4028 spa_remove(spa);
b128c09f 4029 mutex_exit(&spa_namespace_lock);
34dc7c2f
BB
4030 return (error);
4031 }
4032
ea04106b
AX
4033 /*
4034 * Temporary pool names should never be written to disk.
4035 */
4036 if (poolname != pool)
4037 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
4038
9ae529ec
CS
4039 has_features = B_FALSE;
4040 for (elem = nvlist_next_nvpair(props, NULL);
4041 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
4042 if (zpool_prop_feature(nvpair_name(elem)))
4043 has_features = B_TRUE;
4044 }
4045
4046 if (has_features || nvlist_lookup_uint64(props,
4047 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
34dc7c2f 4048 version = SPA_VERSION;
9ae529ec
CS
4049 }
4050 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
428870ff
BB
4051
4052 spa->spa_first_txg = txg;
4053 spa->spa_uberblock.ub_txg = txg - 1;
34dc7c2f
BB
4054 spa->spa_uberblock.ub_version = version;
4055 spa->spa_ubsync = spa->spa_uberblock;
cae5b340 4056 spa->spa_load_state = SPA_LOAD_CREATE;
34dc7c2f 4057
9babb374
BB
4058 /*
4059 * Create "The Godfather" zio to hold all async IOs
4060 */
ea04106b
AX
4061 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
4062 KM_SLEEP);
4063 for (i = 0; i < max_ncpus; i++) {
4064 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
4065 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
4066 ZIO_FLAG_GODFATHER);
4067 }
9babb374 4068
34dc7c2f
BB
4069 /*
4070 * Create the root vdev.
4071 */
b128c09f 4072 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
4073
4074 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
4075
4076 ASSERT(error != 0 || rvd != NULL);
4077 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
4078
4079 if (error == 0 && !zfs_allocatable_devs(nvroot))
a08ee875 4080 error = SET_ERROR(EINVAL);
34dc7c2f
BB
4081
4082 if (error == 0 &&
4083 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
4084 (error = spa_validate_aux(spa, nvroot, txg,
4085 VDEV_ALLOC_ADD)) == 0) {
d6320ddb 4086 for (c = 0; c < rvd->vdev_children; c++) {
9babb374
BB
4087 vdev_metaslab_set_size(rvd->vdev_child[c]);
4088 vdev_expand(rvd->vdev_child[c], txg);
4089 }
34dc7c2f
BB
4090 }
4091
b128c09f 4092 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4093
4094 if (error != 0) {
4095 spa_unload(spa);
4096 spa_deactivate(spa);
4097 spa_remove(spa);
4098 mutex_exit(&spa_namespace_lock);
4099 return (error);
4100 }
4101
4102 /*
4103 * Get the list of spares, if specified.
4104 */
4105 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4106 &spares, &nspares) == 0) {
4107 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
ea04106b 4108 KM_SLEEP) == 0);
34dc7c2f
BB
4109 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4110 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 4111 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4112 spa_load_spares(spa);
b128c09f 4113 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4114 spa->spa_spares.sav_sync = B_TRUE;
4115 }
4116
4117 /*
4118 * Get the list of level 2 cache devices, if specified.
4119 */
4120 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4121 &l2cache, &nl2cache) == 0) {
4122 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
ea04106b 4123 NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
4124 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4125 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 4126 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4127 spa_load_l2cache(spa);
b128c09f 4128 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4129 spa->spa_l2cache.sav_sync = B_TRUE;
4130 }
4131
9ae529ec 4132 spa->spa_is_initializing = B_TRUE;
b128c09f 4133 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
34dc7c2f 4134 spa->spa_meta_objset = dp->dp_meta_objset;
9ae529ec 4135 spa->spa_is_initializing = B_FALSE;
34dc7c2f 4136
428870ff
BB
4137 /*
4138 * Create DDTs (dedup tables).
4139 */
4140 ddt_create(spa);
4141
4142 spa_update_dspace(spa);
4143
34dc7c2f
BB
4144 tx = dmu_tx_create_assigned(dp, txg);
4145
41d74433
AX
4146 /*
4147 * Create the pool's history object.
4148 */
4149 if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
4150 spa_history_create_obj(spa, tx);
4151
4152 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
4153 spa_history_log_version(spa, "create", tx);
4154
34dc7c2f
BB
4155 /*
4156 * Create the pool config object.
4157 */
4158 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
b128c09f 4159 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
34dc7c2f
BB
4160 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
4161
4162 if (zap_add(spa->spa_meta_objset,
4163 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
4164 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
4165 cmn_err(CE_PANIC, "failed to add pool config");
4166 }
4167
9ae529ec
CS
4168 if (spa_version(spa) >= SPA_VERSION_FEATURES)
4169 spa_feature_create_zap_objects(spa, tx);
4170
428870ff
BB
4171 if (zap_add(spa->spa_meta_objset,
4172 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
4173 sizeof (uint64_t), 1, &version, tx) != 0) {
4174 cmn_err(CE_PANIC, "failed to add pool version");
4175 }
4176
34dc7c2f
BB
4177 /* Newly created pools with the right version are always deflated. */
4178 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
4179 spa->spa_deflate = TRUE;
4180 if (zap_add(spa->spa_meta_objset,
4181 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
4182 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
4183 cmn_err(CE_PANIC, "failed to add deflate");
4184 }
4185 }
4186
4187 /*
428870ff 4188 * Create the deferred-free bpobj. Turn off compression
34dc7c2f
BB
4189 * because sync-to-convergence takes longer if the blocksize
4190 * keeps changing.
4191 */
428870ff
BB
4192 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
4193 dmu_object_set_compress(spa->spa_meta_objset, obj,
34dc7c2f 4194 ZIO_COMPRESS_OFF, tx);
34dc7c2f 4195 if (zap_add(spa->spa_meta_objset,
428870ff
BB
4196 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
4197 sizeof (uint64_t), 1, &obj, tx) != 0) {
4198 cmn_err(CE_PANIC, "failed to add bpobj");
34dc7c2f 4199 }
428870ff
BB
4200 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
4201 spa->spa_meta_objset, obj));
34dc7c2f 4202
cae5b340
AX
4203 /*
4204 * Generate some random noise for salted checksums to operate on.
4205 */
4206 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
4207 sizeof (spa->spa_cksum_salt.zcs_bytes));
4208
34dc7c2f
BB
4209 /*
4210 * Set pool properties.
4211 */
4212 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
4213 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
4214 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
9babb374 4215 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
cae5b340 4216 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
428870ff 4217
d164b209
BB
4218 if (props != NULL) {
4219 spa_configfile_set(spa, props, B_FALSE);
a08ee875 4220 spa_sync_props(props, tx);
d164b209 4221 }
34dc7c2f
BB
4222
4223 dmu_tx_commit(tx);
4224
4225 spa->spa_sync_on = B_TRUE;
4226 txg_sync_start(spa->spa_dsl_pool);
cae5b340 4227 mmp_thread_start(spa);
34dc7c2f
BB
4228
4229 /*
4230 * We explicitly wait for the first transaction to complete so that our
4231 * bean counters are appropriately updated.
4232 */
4233 txg_wait_synced(spa->spa_dsl_pool, txg);
4234
b128c09f 4235 spa_config_sync(spa, B_FALSE, B_TRUE);
34dc7c2f 4236
e10b0808
AX
4237 /*
4238 * Don't count references from objsets that are already closed
4239 * and are making their way through the eviction process.
4240 */
4241 spa_evicting_os_wait(spa);
b128c09f 4242 spa->spa_minref = refcount_count(&spa->spa_refcount);
cae5b340 4243 spa->spa_load_state = SPA_LOAD_NONE;
b128c09f 4244
d164b209
BB
4245 mutex_exit(&spa_namespace_lock);
4246
34dc7c2f
BB
4247 return (0);
4248}
4249
9babb374
BB
4250/*
4251 * Import a non-root pool into the system.
4252 */
4253int
a08ee875 4254spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
34dc7c2f
BB
4255{
4256 spa_t *spa;
4257 char *altroot = NULL;
428870ff
BB
4258 spa_load_state_t state = SPA_LOAD_IMPORT;
4259 zpool_rewind_policy_t policy;
572e2857
BB
4260 uint64_t mode = spa_mode_global;
4261 uint64_t readonly = B_FALSE;
9babb374 4262 int error;
34dc7c2f
BB
4263 nvlist_t *nvroot;
4264 nvlist_t **spares, **l2cache;
4265 uint_t nspares, nl2cache;
34dc7c2f
BB
4266
4267 /*
4268 * If a pool with this name exists, return failure.
4269 */
4270 mutex_enter(&spa_namespace_lock);
428870ff 4271 if (spa_lookup(pool) != NULL) {
9babb374 4272 mutex_exit(&spa_namespace_lock);
a08ee875 4273 return (SET_ERROR(EEXIST));
34dc7c2f
BB
4274 }
4275
4276 /*
4277 * Create and initialize the spa structure.
4278 */
4279 (void) nvlist_lookup_string(props,
4280 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
572e2857
BB
4281 (void) nvlist_lookup_uint64(props,
4282 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
4283 if (readonly)
4284 mode = FREAD;
428870ff 4285 spa = spa_add(pool, config, altroot);
572e2857
BB
4286 spa->spa_import_flags = flags;
4287
4288 /*
4289 * Verbatim import - Take a pool and insert it into the namespace
4290 * as if it had been loaded at boot.
4291 */
4292 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
4293 if (props != NULL)
4294 spa_configfile_set(spa, props, B_FALSE);
4295
4296 spa_config_sync(spa, B_FALSE, B_TRUE);
cae5b340 4297 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
572e2857
BB
4298
4299 mutex_exit(&spa_namespace_lock);
572e2857
BB
4300 return (0);
4301 }
4302
4303 spa_activate(spa, mode);
34dc7c2f 4304
9babb374
BB
4305 /*
4306 * Don't start async tasks until we know everything is healthy.
4307 */
4308 spa_async_suspend(spa);
b128c09f 4309
572e2857
BB
4310 zpool_get_rewind_policy(config, &policy);
4311 if (policy.zrp_request & ZPOOL_DO_REWIND)
4312 state = SPA_LOAD_RECOVER;
4313
34dc7c2f 4314 /*
9babb374
BB
4315 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
4316 * because the user-supplied config is actually the one to trust when
b128c09f 4317 * doing an import.
34dc7c2f 4318 */
428870ff
BB
4319 if (state != SPA_LOAD_RECOVER)
4320 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
572e2857 4321
428870ff
BB
4322 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
4323 policy.zrp_request);
4324
4325 /*
572e2857
BB
4326 * Propagate anything learned while loading the pool and pass it
4327 * back to caller (i.e. rewind info, missing devices, etc).
428870ff 4328 */
572e2857
BB
4329 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4330 spa->spa_load_info) == 0);
34dc7c2f 4331
b128c09f 4332 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4333 /*
9babb374
BB
4334 * Toss any existing sparelist, as it doesn't have any validity
4335 * anymore, and conflicts with spa_has_spare().
34dc7c2f 4336 */
9babb374 4337 if (spa->spa_spares.sav_config) {
34dc7c2f
BB
4338 nvlist_free(spa->spa_spares.sav_config);
4339 spa->spa_spares.sav_config = NULL;
4340 spa_load_spares(spa);
4341 }
9babb374 4342 if (spa->spa_l2cache.sav_config) {
34dc7c2f
BB
4343 nvlist_free(spa->spa_l2cache.sav_config);
4344 spa->spa_l2cache.sav_config = NULL;
4345 spa_load_l2cache(spa);
4346 }
4347
4348 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4349 &nvroot) == 0);
b128c09f 4350 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 4351
d164b209
BB
4352 if (props != NULL)
4353 spa_configfile_set(spa, props, B_FALSE);
4354
fb5f0bc8
BB
4355 if (error != 0 || (props && spa_writeable(spa) &&
4356 (error = spa_prop_set(spa, props)))) {
9babb374
BB
4357 spa_unload(spa);
4358 spa_deactivate(spa);
4359 spa_remove(spa);
34dc7c2f
BB
4360 mutex_exit(&spa_namespace_lock);
4361 return (error);
4362 }
4363
572e2857
BB
4364 spa_async_resume(spa);
4365
34dc7c2f
BB
4366 /*
4367 * Override any spares and level 2 cache devices as specified by
4368 * the user, as these may have correct device names/devids, etc.
4369 */
4370 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4371 &spares, &nspares) == 0) {
4372 if (spa->spa_spares.sav_config)
4373 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4374 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4375 else
4376 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
ea04106b 4377 NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
4378 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4379 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 4380 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4381 spa_load_spares(spa);
b128c09f 4382 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4383 spa->spa_spares.sav_sync = B_TRUE;
4384 }
4385 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4386 &l2cache, &nl2cache) == 0) {
4387 if (spa->spa_l2cache.sav_config)
4388 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4389 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4390 else
4391 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
ea04106b 4392 NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
4393 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4394 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 4395 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4396 spa_load_l2cache(spa);
b128c09f 4397 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4398 spa->spa_l2cache.sav_sync = B_TRUE;
4399 }
4400
428870ff
BB
4401 /*
4402 * Check for any removed devices.
4403 */
4404 if (spa->spa_autoreplace) {
4405 spa_aux_check_removed(&spa->spa_spares);
4406 spa_aux_check_removed(&spa->spa_l2cache);
4407 }
4408
fb5f0bc8 4409 if (spa_writeable(spa)) {
b128c09f
BB
4410 /*
4411 * Update the config cache to include the newly-imported pool.
4412 */
45d1cae3 4413 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
b128c09f 4414 }
34dc7c2f 4415
34dc7c2f 4416 /*
9babb374
BB
4417 * It's possible that the pool was expanded while it was exported.
4418 * We kick off an async task to handle this for us.
34dc7c2f 4419 */
9babb374 4420 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
b128c09f 4421
41d74433 4422 spa_history_log_version(spa, "import", NULL);
cae5b340
AX
4423
4424 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
4425
4e820b5a 4426 zvol_create_minors(spa, pool, B_TRUE);
c06d4368 4427
cae5b340
AX
4428 mutex_exit(&spa_namespace_lock);
4429
b128c09f
BB
4430 return (0);
4431}
4432
34dc7c2f
BB
4433nvlist_t *
4434spa_tryimport(nvlist_t *tryconfig)
4435{
4436 nvlist_t *config = NULL;
4437 char *poolname;
4438 spa_t *spa;
4439 uint64_t state;
d164b209 4440 int error;
34dc7c2f
BB
4441
4442 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4443 return (NULL);
4444
4445 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4446 return (NULL);
4447
4448 /*
4449 * Create and initialize the spa structure.
4450 */
4451 mutex_enter(&spa_namespace_lock);
428870ff 4452 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
fb5f0bc8 4453 spa_activate(spa, FREAD);
34dc7c2f
BB
4454
4455 /*
4456 * Pass off the heavy lifting to spa_load().
4457 * Pass TRUE for mosconfig because the user-supplied config
4458 * is actually the one to trust when doing an import.
4459 */
428870ff 4460 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
34dc7c2f
BB
4461
4462 /*
4463 * If 'tryconfig' was at least parsable, return the current config.
4464 */
4465 if (spa->spa_root_vdev != NULL) {
34dc7c2f 4466 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f
BB
4467 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4468 poolname) == 0);
4469 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4470 state) == 0);
4471 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4472 spa->spa_uberblock.ub_timestamp) == 0);
9ae529ec
CS
4473 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4474 spa->spa_load_info) == 0);
ea04106b
AX
4475 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
4476 spa->spa_errata) == 0);
34dc7c2f
BB
4477
4478 /*
4479 * If the bootfs property exists on this pool then we
4480 * copy it out so that external consumers can tell which
4481 * pools are bootable.
4482 */
d164b209 4483 if ((!error || error == EEXIST) && spa->spa_bootfs) {
ea04106b 4484 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
34dc7c2f
BB
4485
4486 /*
4487 * We have to play games with the name since the
4488 * pool was opened as TRYIMPORT_NAME.
4489 */
b128c09f 4490 if (dsl_dsobj_to_dsname(spa_name(spa),
34dc7c2f
BB
4491 spa->spa_bootfs, tmpname) == 0) {
4492 char *cp;
a08ee875
LG
4493 char *dsname;
4494
ea04106b 4495 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
34dc7c2f
BB
4496
4497 cp = strchr(tmpname, '/');
4498 if (cp == NULL) {
4499 (void) strlcpy(dsname, tmpname,
4500 MAXPATHLEN);
4501 } else {
4502 (void) snprintf(dsname, MAXPATHLEN,
4503 "%s/%s", poolname, ++cp);
4504 }
4505 VERIFY(nvlist_add_string(config,
4506 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4507 kmem_free(dsname, MAXPATHLEN);
4508 }
4509 kmem_free(tmpname, MAXPATHLEN);
4510 }
4511
4512 /*
4513 * Add the list of hot spares and level 2 cache devices.
4514 */
9babb374 4515 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
4516 spa_add_spares(spa, config);
4517 spa_add_l2cache(spa, config);
9babb374 4518 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f
BB
4519 }
4520
4521 spa_unload(spa);
4522 spa_deactivate(spa);
4523 spa_remove(spa);
4524 mutex_exit(&spa_namespace_lock);
4525
4526 return (config);
4527}
4528
4529/*
4530 * Pool export/destroy
4531 *
4532 * The act of destroying or exporting a pool is very simple. We make sure there
4533 * is no more pending I/O and any references to the pool are gone. Then, we
4534 * update the pool state and sync all the labels to disk, removing the
fb5f0bc8
BB
4535 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4536 * we don't sync the labels or remove the configuration cache.
34dc7c2f
BB
4537 */
4538static int
b128c09f 4539spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
fb5f0bc8 4540 boolean_t force, boolean_t hardforce)
34dc7c2f
BB
4541{
4542 spa_t *spa;
4543
4544 if (oldconfig)
4545 *oldconfig = NULL;
4546
fb5f0bc8 4547 if (!(spa_mode_global & FWRITE))
a08ee875 4548 return (SET_ERROR(EROFS));
34dc7c2f
BB
4549
4550 mutex_enter(&spa_namespace_lock);
4551 if ((spa = spa_lookup(pool)) == NULL) {
4552 mutex_exit(&spa_namespace_lock);
a08ee875 4553 return (SET_ERROR(ENOENT));
34dc7c2f
BB
4554 }
4555
4556 /*
4557 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4558 * reacquire the namespace lock, and see if we can export.
4559 */
4560 spa_open_ref(spa, FTAG);
4561 mutex_exit(&spa_namespace_lock);
4562 spa_async_suspend(spa);
4e820b5a
AX
4563 if (spa->spa_zvol_taskq) {
4564 zvol_remove_minors(spa, spa_name(spa), B_TRUE);
4565 taskq_wait(spa->spa_zvol_taskq);
4566 }
34dc7c2f
BB
4567 mutex_enter(&spa_namespace_lock);
4568 spa_close(spa, FTAG);
4569
ea04106b
AX
4570 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
4571 goto export_spa;
34dc7c2f 4572 /*
ea04106b
AX
4573 * The pool will be in core if it's openable, in which case we can
4574 * modify its state. Objsets may be open only because they're dirty,
4575 * so we have to force it to sync before checking spa_refcnt.
34dc7c2f 4576 */
e10b0808 4577 if (spa->spa_sync_on) {
34dc7c2f 4578 txg_wait_synced(spa->spa_dsl_pool, 0);
e10b0808
AX
4579 spa_evicting_os_wait(spa);
4580 }
34dc7c2f 4581
ea04106b
AX
4582 /*
4583 * A pool cannot be exported or destroyed if there are active
4584 * references. If we are resetting a pool, allow references by
4585 * fault injection handlers.
4586 */
4587 if (!spa_refcount_zero(spa) ||
4588 (spa->spa_inject_ref != 0 &&
4589 new_state != POOL_STATE_UNINITIALIZED)) {
4590 spa_async_resume(spa);
4591 mutex_exit(&spa_namespace_lock);
4592 return (SET_ERROR(EBUSY));
4593 }
34dc7c2f 4594
ea04106b 4595 if (spa->spa_sync_on) {
b128c09f
BB
4596 /*
4597 * A pool cannot be exported if it has an active shared spare.
4598 * This is to prevent other pools stealing the active spare
4599 * from an exported pool. At user's own will, such pool can
4600 * be forcedly exported.
4601 */
4602 if (!force && new_state == POOL_STATE_EXPORTED &&
4603 spa_has_active_shared_spare(spa)) {
4604 spa_async_resume(spa);
4605 mutex_exit(&spa_namespace_lock);
a08ee875 4606 return (SET_ERROR(EXDEV));
b128c09f 4607 }
34dc7c2f
BB
4608
4609 /*
4610 * We want this to be reflected on every label,
4611 * so mark them all dirty. spa_unload() will do the
4612 * final sync that pushes these changes out.
4613 */
fb5f0bc8 4614 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
b128c09f 4615 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4616 spa->spa_state = new_state;
428870ff
BB
4617 spa->spa_final_txg = spa_last_synced_txg(spa) +
4618 TXG_DEFER_SIZE + 1;
34dc7c2f 4619 vdev_config_dirty(spa->spa_root_vdev);
b128c09f 4620 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4621 }
4622 }
4623
ea04106b 4624export_spa:
41d74433
AX
4625 if (new_state == POOL_STATE_DESTROYED)
4626 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
4627 else if (new_state == POOL_STATE_EXPORTED)
4628 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
34dc7c2f
BB
4629
4630 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4631 spa_unload(spa);
4632 spa_deactivate(spa);
4633 }
4634
4635 if (oldconfig && spa->spa_config)
4636 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4637
4638 if (new_state != POOL_STATE_UNINITIALIZED) {
fb5f0bc8
BB
4639 if (!hardforce)
4640 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 4641 spa_remove(spa);
34dc7c2f
BB
4642 }
4643 mutex_exit(&spa_namespace_lock);
4644
4645 return (0);
4646}
4647
4648/*
4649 * Destroy a storage pool.
4650 */
4651int
4652spa_destroy(char *pool)
4653{
fb5f0bc8
BB
4654 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4655 B_FALSE, B_FALSE));
34dc7c2f
BB
4656}
4657
4658/*
4659 * Export a storage pool.
4660 */
4661int
fb5f0bc8
BB
4662spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4663 boolean_t hardforce)
34dc7c2f 4664{
fb5f0bc8
BB
4665 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4666 force, hardforce));
34dc7c2f
BB
4667}
4668
4669/*
4670 * Similar to spa_export(), this unloads the spa_t without actually removing it
4671 * from the namespace in any way.
4672 */
4673int
4674spa_reset(char *pool)
4675{
b128c09f 4676 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
fb5f0bc8 4677 B_FALSE, B_FALSE));
34dc7c2f
BB
4678}
4679
34dc7c2f
BB
4680/*
4681 * ==========================================================================
4682 * Device manipulation
4683 * ==========================================================================
4684 */
4685
4686/*
4687 * Add a device to a storage pool.
4688 */
4689int
4690spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4691{
428870ff 4692 uint64_t txg, id;
fb5f0bc8 4693 int error;
34dc7c2f
BB
4694 vdev_t *rvd = spa->spa_root_vdev;
4695 vdev_t *vd, *tvd;
4696 nvlist_t **spares, **l2cache;
4697 uint_t nspares, nl2cache;
d6320ddb 4698 int c;
34dc7c2f 4699
572e2857
BB
4700 ASSERT(spa_writeable(spa));
4701
34dc7c2f
BB
4702 txg = spa_vdev_enter(spa);
4703
4704 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4705 VDEV_ALLOC_ADD)) != 0)
4706 return (spa_vdev_exit(spa, NULL, txg, error));
4707
b128c09f 4708 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
34dc7c2f
BB
4709
4710 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4711 &nspares) != 0)
4712 nspares = 0;
4713
4714 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4715 &nl2cache) != 0)
4716 nl2cache = 0;
4717
b128c09f 4718 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
34dc7c2f 4719 return (spa_vdev_exit(spa, vd, txg, EINVAL));
34dc7c2f 4720
b128c09f
BB
4721 if (vd->vdev_children != 0 &&
4722 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4723 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4724
4725 /*
4726 * We must validate the spares and l2cache devices after checking the
4727 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4728 */
b128c09f 4729 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
34dc7c2f 4730 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4731
4732 /*
4733 * Transfer each new top-level vdev from vd to rvd.
4734 */
d6320ddb 4735 for (c = 0; c < vd->vdev_children; c++) {
428870ff
BB
4736
4737 /*
4738 * Set the vdev id to the first hole, if one exists.
4739 */
4740 for (id = 0; id < rvd->vdev_children; id++) {
4741 if (rvd->vdev_child[id]->vdev_ishole) {
4742 vdev_free(rvd->vdev_child[id]);
4743 break;
4744 }
4745 }
34dc7c2f
BB
4746 tvd = vd->vdev_child[c];
4747 vdev_remove_child(vd, tvd);
428870ff 4748 tvd->vdev_id = id;
34dc7c2f
BB
4749 vdev_add_child(rvd, tvd);
4750 vdev_config_dirty(tvd);
4751 }
4752
4753 if (nspares != 0) {
4754 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4755 ZPOOL_CONFIG_SPARES);
4756 spa_load_spares(spa);
4757 spa->spa_spares.sav_sync = B_TRUE;
4758 }
4759
4760 if (nl2cache != 0) {
4761 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4762 ZPOOL_CONFIG_L2CACHE);
4763 spa_load_l2cache(spa);
4764 spa->spa_l2cache.sav_sync = B_TRUE;
4765 }
4766
4767 /*
4768 * We have to be careful when adding new vdevs to an existing pool.
4769 * If other threads start allocating from these vdevs before we
4770 * sync the config cache, and we lose power, then upon reboot we may
4771 * fail to open the pool because there are DVAs that the config cache
4772 * can't translate. Therefore, we first add the vdevs without
4773 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4774 * and then let spa_config_update() initialize the new metaslabs.
4775 *
4776 * spa_load() checks for added-but-not-initialized vdevs, so that
4777 * if we lose power at any point in this sequence, the remaining
4778 * steps will be completed the next time we load the pool.
4779 */
4780 (void) spa_vdev_exit(spa, vd, txg, 0);
4781
4782 mutex_enter(&spa_namespace_lock);
4783 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
cae5b340 4784 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
34dc7c2f
BB
4785 mutex_exit(&spa_namespace_lock);
4786
4787 return (0);
4788}
4789
4790/*
4791 * Attach a device to a mirror. The arguments are the path to any device
4792 * in the mirror, and the nvroot for the new device. If the path specifies
4793 * a device that is not mirrored, we automatically insert the mirror vdev.
4794 *
4795 * If 'replacing' is specified, the new device is intended to replace the
4796 * existing device; in this case the two devices are made into their own
4797 * mirror using the 'replacing' vdev, which is functionally identical to
4798 * the mirror vdev (it actually reuses all the same ops) but has a few
4799 * extra rules: you can't attach to it after it's been created, and upon
4800 * completion of resilvering, the first disk (the one being replaced)
4801 * is automatically detached.
4802 */
4803int
4804spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4805{
428870ff 4806 uint64_t txg, dtl_max_txg;
34dc7c2f
BB
4807 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4808 vdev_ops_t *pvops;
b128c09f
BB
4809 char *oldvdpath, *newvdpath;
4810 int newvd_isspare;
4811 int error;
a08ee875 4812 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
34dc7c2f 4813
572e2857
BB
4814 ASSERT(spa_writeable(spa));
4815
34dc7c2f
BB
4816 txg = spa_vdev_enter(spa);
4817
b128c09f 4818 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4819
4820 if (oldvd == NULL)
4821 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4822
4823 if (!oldvd->vdev_ops->vdev_op_leaf)
4824 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4825
4826 pvd = oldvd->vdev_parent;
4827
4828 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
5ffb9d1d 4829 VDEV_ALLOC_ATTACH)) != 0)
34dc7c2f
BB
4830 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4831
4832 if (newrootvd->vdev_children != 1)
4833 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4834
4835 newvd = newrootvd->vdev_child[0];
4836
4837 if (!newvd->vdev_ops->vdev_op_leaf)
4838 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4839
4840 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4841 return (spa_vdev_exit(spa, newrootvd, txg, error));
4842
4843 /*
4844 * Spares can't replace logs
4845 */
b128c09f 4846 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
34dc7c2f
BB
4847 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4848
4849 if (!replacing) {
4850 /*
4851 * For attach, the only allowable parent is a mirror or the root
4852 * vdev.
4853 */
4854 if (pvd->vdev_ops != &vdev_mirror_ops &&
4855 pvd->vdev_ops != &vdev_root_ops)
4856 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4857
4858 pvops = &vdev_mirror_ops;
4859 } else {
4860 /*
4861 * Active hot spares can only be replaced by inactive hot
4862 * spares.
4863 */
4864 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857 4865 oldvd->vdev_isspare &&
34dc7c2f
BB
4866 !spa_has_spare(spa, newvd->vdev_guid))
4867 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4868
4869 /*
4870 * If the source is a hot spare, and the parent isn't already a
4871 * spare, then we want to create a new hot spare. Otherwise, we
4872 * want to create a replacing vdev. The user is not allowed to
4873 * attach to a spared vdev child unless the 'isspare' state is
4874 * the same (spare replaces spare, non-spare replaces
4875 * non-spare).
4876 */
572e2857
BB
4877 if (pvd->vdev_ops == &vdev_replacing_ops &&
4878 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
34dc7c2f 4879 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4880 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4881 newvd->vdev_isspare != oldvd->vdev_isspare) {
34dc7c2f 4882 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4883 }
4884
4885 if (newvd->vdev_isspare)
34dc7c2f
BB
4886 pvops = &vdev_spare_ops;
4887 else
4888 pvops = &vdev_replacing_ops;
4889 }
4890
4891 /*
9babb374 4892 * Make sure the new device is big enough.
34dc7c2f 4893 */
9babb374 4894 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
34dc7c2f
BB
4895 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4896
4897 /*
4898 * The new device cannot have a higher alignment requirement
4899 * than the top-level vdev.
4900 */
4901 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4902 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4903
4904 /*
4905 * If this is an in-place replacement, update oldvd's path and devid
4906 * to make it distinguishable from newvd, and unopenable from now on.
4907 */
4908 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4909 spa_strfree(oldvd->vdev_path);
4910 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
ea04106b 4911 KM_SLEEP);
34dc7c2f
BB
4912 (void) sprintf(oldvd->vdev_path, "%s/%s",
4913 newvd->vdev_path, "old");
4914 if (oldvd->vdev_devid != NULL) {
4915 spa_strfree(oldvd->vdev_devid);
4916 oldvd->vdev_devid = NULL;
4917 }
4918 }
4919
572e2857 4920 /* mark the device being resilvered */
a08ee875 4921 newvd->vdev_resilver_txg = txg;
572e2857 4922
34dc7c2f
BB
4923 /*
4924 * If the parent is not a mirror, or if we're replacing, insert the new
4925 * mirror/replacing/spare vdev above oldvd.
4926 */
4927 if (pvd->vdev_ops != pvops)
4928 pvd = vdev_add_parent(oldvd, pvops);
4929
4930 ASSERT(pvd->vdev_top->vdev_parent == rvd);
4931 ASSERT(pvd->vdev_ops == pvops);
4932 ASSERT(oldvd->vdev_parent == pvd);
4933
4934 /*
4935 * Extract the new device from its root and add it to pvd.
4936 */
4937 vdev_remove_child(newrootvd, newvd);
4938 newvd->vdev_id = pvd->vdev_children;
428870ff 4939 newvd->vdev_crtxg = oldvd->vdev_crtxg;
34dc7c2f
BB
4940 vdev_add_child(pvd, newvd);
4941
cae5b340
AX
4942 /*
4943 * Reevaluate the parent vdev state.
4944 */
4945 vdev_propagate_state(pvd);
4946
34dc7c2f
BB
4947 tvd = newvd->vdev_top;
4948 ASSERT(pvd->vdev_top == tvd);
4949 ASSERT(tvd->vdev_parent == rvd);
4950
4951 vdev_config_dirty(tvd);
4952
4953 /*
428870ff
BB
4954 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4955 * for any dmu_sync-ed blocks. It will propagate upward when
4956 * spa_vdev_exit() calls vdev_dtl_reassess().
34dc7c2f 4957 */
428870ff 4958 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
34dc7c2f 4959
428870ff
BB
4960 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4961 dtl_max_txg - TXG_INITIAL);
34dc7c2f 4962
9babb374 4963 if (newvd->vdev_isspare) {
34dc7c2f 4964 spa_spare_activate(newvd);
cae5b340 4965 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
9babb374
BB
4966 }
4967
b128c09f
BB
4968 oldvdpath = spa_strdup(oldvd->vdev_path);
4969 newvdpath = spa_strdup(newvd->vdev_path);
4970 newvd_isspare = newvd->vdev_isspare;
34dc7c2f
BB
4971
4972 /*
4973 * Mark newvd's DTL dirty in this txg.
4974 */
4975 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4976
428870ff 4977 /*
ea04106b
AX
4978 * Schedule the resilver to restart in the future. We do this to
4979 * ensure that dmu_sync-ed blocks have been stitched into the
4980 * respective datasets.
428870ff
BB
4981 */
4982 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4983
cae5b340
AX
4984 if (spa->spa_bootfs)
4985 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
4986
4987 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
4988
428870ff
BB
4989 /*
4990 * Commit the config
4991 */
4992 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
34dc7c2f 4993
a08ee875 4994 spa_history_log_internal(spa, "vdev attach", NULL,
428870ff 4995 "%s vdev=%s %s vdev=%s",
45d1cae3
BB
4996 replacing && newvd_isspare ? "spare in" :
4997 replacing ? "replace" : "attach", newvdpath,
4998 replacing ? "for" : "to", oldvdpath);
b128c09f
BB
4999
5000 spa_strfree(oldvdpath);
5001 spa_strfree(newvdpath);
5002
34dc7c2f
BB
5003 return (0);
5004}
5005
5006/*
5007 * Detach a device from a mirror or replacing vdev.
a08ee875 5008 *
34dc7c2f
BB
5009 * If 'replace_done' is specified, only detach if the parent
5010 * is a replacing vdev.
5011 */
5012int
fb5f0bc8 5013spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
34dc7c2f
BB
5014{
5015 uint64_t txg;
fb5f0bc8 5016 int error;
34dc7c2f
BB
5017 vdev_t *vd, *pvd, *cvd, *tvd;
5018 boolean_t unspare = B_FALSE;
d4ed6673 5019 uint64_t unspare_guid = 0;
428870ff 5020 char *vdpath;
d6320ddb 5021 int c, t;
a08ee875 5022 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
572e2857
BB
5023 ASSERT(spa_writeable(spa));
5024
34dc7c2f
BB
5025 txg = spa_vdev_enter(spa);
5026
b128c09f 5027 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
5028
5029 if (vd == NULL)
5030 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
5031
5032 if (!vd->vdev_ops->vdev_op_leaf)
5033 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5034
5035 pvd = vd->vdev_parent;
5036
fb5f0bc8
BB
5037 /*
5038 * If the parent/child relationship is not as expected, don't do it.
5039 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
5040 * vdev that's replacing B with C. The user's intent in replacing
5041 * is to go from M(A,B) to M(A,C). If the user decides to cancel
5042 * the replace by detaching C, the expected behavior is to end up
5043 * M(A,B). But suppose that right after deciding to detach C,
5044 * the replacement of B completes. We would have M(A,C), and then
5045 * ask to detach C, which would leave us with just A -- not what
5046 * the user wanted. To prevent this, we make sure that the
5047 * parent/child relationship hasn't changed -- in this example,
5048 * that C's parent is still the replacing vdev R.
5049 */
5050 if (pvd->vdev_guid != pguid && pguid != 0)
5051 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5052
34dc7c2f 5053 /*
572e2857 5054 * Only 'replacing' or 'spare' vdevs can be replaced.
34dc7c2f 5055 */
572e2857
BB
5056 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
5057 pvd->vdev_ops != &vdev_spare_ops)
5058 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
34dc7c2f
BB
5059
5060 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
5061 spa_version(spa) >= SPA_VERSION_SPARES);
5062
5063 /*
5064 * Only mirror, replacing, and spare vdevs support detach.
5065 */
5066 if (pvd->vdev_ops != &vdev_replacing_ops &&
5067 pvd->vdev_ops != &vdev_mirror_ops &&
5068 pvd->vdev_ops != &vdev_spare_ops)
5069 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5070
5071 /*
fb5f0bc8
BB
5072 * If this device has the only valid copy of some data,
5073 * we cannot safely detach it.
34dc7c2f 5074 */
fb5f0bc8 5075 if (vdev_dtl_required(vd))
34dc7c2f
BB
5076 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5077
fb5f0bc8 5078 ASSERT(pvd->vdev_children >= 2);
34dc7c2f 5079
b128c09f
BB
5080 /*
5081 * If we are detaching the second disk from a replacing vdev, then
5082 * check to see if we changed the original vdev's path to have "/old"
5083 * at the end in spa_vdev_attach(). If so, undo that change now.
5084 */
572e2857
BB
5085 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
5086 vd->vdev_path != NULL) {
5087 size_t len = strlen(vd->vdev_path);
5088
d6320ddb 5089 for (c = 0; c < pvd->vdev_children; c++) {
572e2857
BB
5090 cvd = pvd->vdev_child[c];
5091
5092 if (cvd == vd || cvd->vdev_path == NULL)
5093 continue;
5094
5095 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
5096 strcmp(cvd->vdev_path + len, "/old") == 0) {
5097 spa_strfree(cvd->vdev_path);
5098 cvd->vdev_path = spa_strdup(vd->vdev_path);
5099 break;
5100 }
b128c09f
BB
5101 }
5102 }
5103
34dc7c2f
BB
5104 /*
5105 * If we are detaching the original disk from a spare, then it implies
5106 * that the spare should become a real disk, and be removed from the
5107 * active spare list for the pool.
5108 */
5109 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857
BB
5110 vd->vdev_id == 0 &&
5111 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
34dc7c2f
BB
5112 unspare = B_TRUE;
5113
5114 /*
5115 * Erase the disk labels so the disk can be used for other things.
5116 * This must be done after all other error cases are handled,
5117 * but before we disembowel vd (so we can still do I/O to it).
5118 * But if we can't do it, don't treat the error as fatal --
5119 * it may be that the unwritability of the disk is the reason
5120 * it's being detached!
5121 */
5122 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5123
5124 /*
5125 * Remove vd from its parent and compact the parent's children.
5126 */
5127 vdev_remove_child(pvd, vd);
5128 vdev_compact_children(pvd);
5129
5130 /*
5131 * Remember one of the remaining children so we can get tvd below.
5132 */
572e2857 5133 cvd = pvd->vdev_child[pvd->vdev_children - 1];
34dc7c2f
BB
5134
5135 /*
5136 * If we need to remove the remaining child from the list of hot spares,
fb5f0bc8
BB
5137 * do it now, marking the vdev as no longer a spare in the process.
5138 * We must do this before vdev_remove_parent(), because that can
5139 * change the GUID if it creates a new toplevel GUID. For a similar
5140 * reason, we must remove the spare now, in the same txg as the detach;
5141 * otherwise someone could attach a new sibling, change the GUID, and
5142 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
34dc7c2f
BB
5143 */
5144 if (unspare) {
5145 ASSERT(cvd->vdev_isspare);
5146 spa_spare_remove(cvd);
5147 unspare_guid = cvd->vdev_guid;
fb5f0bc8 5148 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
572e2857 5149 cvd->vdev_unspare = B_TRUE;
34dc7c2f
BB
5150 }
5151
428870ff
BB
5152 /*
5153 * If the parent mirror/replacing vdev only has one child,
5154 * the parent is no longer needed. Remove it from the tree.
5155 */
572e2857
BB
5156 if (pvd->vdev_children == 1) {
5157 if (pvd->vdev_ops == &vdev_spare_ops)
5158 cvd->vdev_unspare = B_FALSE;
428870ff 5159 vdev_remove_parent(cvd);
572e2857
BB
5160 }
5161
428870ff
BB
5162
5163 /*
5164 * We don't set tvd until now because the parent we just removed
5165 * may have been the previous top-level vdev.
5166 */
5167 tvd = cvd->vdev_top;
5168 ASSERT(tvd->vdev_parent == rvd);
5169
5170 /*
5171 * Reevaluate the parent vdev state.
5172 */
5173 vdev_propagate_state(cvd);
5174
5175 /*
5176 * If the 'autoexpand' property is set on the pool then automatically
5177 * try to expand the size of the pool. For example if the device we
5178 * just detached was smaller than the others, it may be possible to
5179 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
5180 * first so that we can obtain the updated sizes of the leaf vdevs.
5181 */
5182 if (spa->spa_autoexpand) {
5183 vdev_reopen(tvd);
5184 vdev_expand(tvd, txg);
5185 }
5186
5187 vdev_config_dirty(tvd);
5188
5189 /*
5190 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
5191 * vd->vdev_detached is set and free vd's DTL object in syncing context.
5192 * But first make sure we're not on any *other* txg's DTL list, to
5193 * prevent vd from being accessed after it's freed.
5194 */
cae5b340 5195 vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
d6320ddb 5196 for (t = 0; t < TXG_SIZE; t++)
428870ff
BB
5197 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
5198 vd->vdev_detached = B_TRUE;
5199 vdev_dirty(tvd, VDD_DTL, vd, txg);
5200
cae5b340 5201 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
428870ff 5202
572e2857
BB
5203 /* hang on to the spa before we release the lock */
5204 spa_open_ref(spa, FTAG);
5205
428870ff
BB
5206 error = spa_vdev_exit(spa, vd, txg, 0);
5207
a08ee875 5208 spa_history_log_internal(spa, "detach", NULL,
428870ff
BB
5209 "vdev=%s", vdpath);
5210 spa_strfree(vdpath);
5211
5212 /*
5213 * If this was the removal of the original device in a hot spare vdev,
5214 * then we want to go through and remove the device from the hot spare
5215 * list of every other pool.
5216 */
5217 if (unspare) {
572e2857
BB
5218 spa_t *altspa = NULL;
5219
428870ff 5220 mutex_enter(&spa_namespace_lock);
572e2857
BB
5221 while ((altspa = spa_next(altspa)) != NULL) {
5222 if (altspa->spa_state != POOL_STATE_ACTIVE ||
5223 altspa == spa)
428870ff 5224 continue;
572e2857
BB
5225
5226 spa_open_ref(altspa, FTAG);
428870ff 5227 mutex_exit(&spa_namespace_lock);
572e2857 5228 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
428870ff 5229 mutex_enter(&spa_namespace_lock);
572e2857 5230 spa_close(altspa, FTAG);
428870ff
BB
5231 }
5232 mutex_exit(&spa_namespace_lock);
572e2857
BB
5233
5234 /* search the rest of the vdevs for spares to remove */
5235 spa_vdev_resilver_done(spa);
428870ff
BB
5236 }
5237
572e2857
BB
5238 /* all done with the spa; OK to release */
5239 mutex_enter(&spa_namespace_lock);
5240 spa_close(spa, FTAG);
5241 mutex_exit(&spa_namespace_lock);
5242
428870ff
BB
5243 return (error);
5244}
5245
5246/*
5247 * Split a set of devices from their mirrors, and create a new pool from them.
5248 */
5249int
5250spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
5251 nvlist_t *props, boolean_t exp)
5252{
5253 int error = 0;
5254 uint64_t txg, *glist;
5255 spa_t *newspa;
5256 uint_t c, children, lastlog;
5257 nvlist_t **child, *nvl, *tmp;
5258 dmu_tx_t *tx;
5259 char *altroot = NULL;
5260 vdev_t *rvd, **vml = NULL; /* vdev modify list */
5261 boolean_t activate_slog;
5262
572e2857 5263 ASSERT(spa_writeable(spa));
428870ff
BB
5264
5265 txg = spa_vdev_enter(spa);
5266
5267 /* clear the log and flush everything up to now */
5268 activate_slog = spa_passivate_log(spa);
5269 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5270 error = spa_offline_log(spa);
5271 txg = spa_vdev_config_enter(spa);
5272
5273 if (activate_slog)
5274 spa_activate_log(spa);
5275
5276 if (error != 0)
5277 return (spa_vdev_exit(spa, NULL, txg, error));
5278
5279 /* check new spa name before going any further */
5280 if (spa_lookup(newname) != NULL)
5281 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
5282
5283 /*
5284 * scan through all the children to ensure they're all mirrors
5285 */
5286 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
5287 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
5288 &children) != 0)
5289 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5290
5291 /* first, check to ensure we've got the right child count */
5292 rvd = spa->spa_root_vdev;
5293 lastlog = 0;
5294 for (c = 0; c < rvd->vdev_children; c++) {
5295 vdev_t *vd = rvd->vdev_child[c];
5296
5297 /* don't count the holes & logs as children */
5298 if (vd->vdev_islog || vd->vdev_ishole) {
5299 if (lastlog == 0)
5300 lastlog = c;
5301 continue;
5302 }
5303
5304 lastlog = 0;
5305 }
5306 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
5307 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5308
5309 /* next, ensure no spare or cache devices are part of the split */
5310 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
5311 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
5312 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5313
ea04106b
AX
5314 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
5315 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
428870ff
BB
5316
5317 /* then, loop over each vdev and validate it */
5318 for (c = 0; c < children; c++) {
5319 uint64_t is_hole = 0;
5320
5321 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
5322 &is_hole);
5323
5324 if (is_hole != 0) {
5325 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
5326 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
5327 continue;
5328 } else {
a08ee875 5329 error = SET_ERROR(EINVAL);
428870ff
BB
5330 break;
5331 }
5332 }
5333
5334 /* which disk is going to be split? */
5335 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
5336 &glist[c]) != 0) {
a08ee875 5337 error = SET_ERROR(EINVAL);
428870ff
BB
5338 break;
5339 }
5340
5341 /* look it up in the spa */
5342 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
5343 if (vml[c] == NULL) {
a08ee875 5344 error = SET_ERROR(ENODEV);
428870ff
BB
5345 break;
5346 }
5347
5348 /* make sure there's nothing stopping the split */
5349 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
5350 vml[c]->vdev_islog ||
5351 vml[c]->vdev_ishole ||
5352 vml[c]->vdev_isspare ||
5353 vml[c]->vdev_isl2cache ||
5354 !vdev_writeable(vml[c]) ||
5355 vml[c]->vdev_children != 0 ||
5356 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
5357 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
a08ee875 5358 error = SET_ERROR(EINVAL);
428870ff
BB
5359 break;
5360 }
5361
5362 if (vdev_dtl_required(vml[c])) {
a08ee875 5363 error = SET_ERROR(EBUSY);
428870ff
BB
5364 break;
5365 }
5366
5367 /* we need certain info from the top level */
5368 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
5369 vml[c]->vdev_top->vdev_ms_array) == 0);
5370 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
5371 vml[c]->vdev_top->vdev_ms_shift) == 0);
5372 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
5373 vml[c]->vdev_top->vdev_asize) == 0);
5374 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
5375 vml[c]->vdev_top->vdev_ashift) == 0);
cae5b340
AX
5376
5377 /* transfer per-vdev ZAPs */
5378 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
5379 VERIFY0(nvlist_add_uint64(child[c],
5380 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
5381
5382 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
5383 VERIFY0(nvlist_add_uint64(child[c],
5384 ZPOOL_CONFIG_VDEV_TOP_ZAP,
5385 vml[c]->vdev_parent->vdev_top_zap));
428870ff
BB
5386 }
5387
5388 if (error != 0) {
5389 kmem_free(vml, children * sizeof (vdev_t *));
5390 kmem_free(glist, children * sizeof (uint64_t));
5391 return (spa_vdev_exit(spa, NULL, txg, error));
5392 }
5393
5394 /* stop writers from using the disks */
5395 for (c = 0; c < children; c++) {
5396 if (vml[c] != NULL)
5397 vml[c]->vdev_offline = B_TRUE;
5398 }
5399 vdev_reopen(spa->spa_root_vdev);
34dc7c2f
BB
5400
5401 /*
428870ff
BB
5402 * Temporarily record the splitting vdevs in the spa config. This
5403 * will disappear once the config is regenerated.
34dc7c2f 5404 */
ea04106b 5405 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
428870ff
BB
5406 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5407 glist, children) == 0);
5408 kmem_free(glist, children * sizeof (uint64_t));
34dc7c2f 5409
428870ff
BB
5410 mutex_enter(&spa->spa_props_lock);
5411 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5412 nvl) == 0);
5413 mutex_exit(&spa->spa_props_lock);
5414 spa->spa_config_splitting = nvl;
5415 vdev_config_dirty(spa->spa_root_vdev);
5416
5417 /* configure and create the new pool */
5418 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5419 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5420 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5421 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5422 spa_version(spa)) == 0);
5423 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5424 spa->spa_config_txg) == 0);
5425 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5426 spa_generate_guid(NULL)) == 0);
cae5b340 5427 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
428870ff
BB
5428 (void) nvlist_lookup_string(props,
5429 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
34dc7c2f 5430
428870ff
BB
5431 /* add the new pool to the namespace */
5432 newspa = spa_add(newname, config, altroot);
cae5b340 5433 newspa->spa_avz_action = AVZ_ACTION_REBUILD;
428870ff
BB
5434 newspa->spa_config_txg = spa->spa_config_txg;
5435 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5436
5437 /* release the spa config lock, retaining the namespace lock */
5438 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5439
5440 if (zio_injection_enabled)
5441 zio_handle_panic_injection(spa, FTAG, 1);
5442
5443 spa_activate(newspa, spa_mode_global);
5444 spa_async_suspend(newspa);
5445
5446 /* create the new pool from the disks of the original pool */
5447 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5448 if (error)
5449 goto out;
5450
5451 /* if that worked, generate a real config for the new pool */
5452 if (newspa->spa_root_vdev != NULL) {
5453 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
ea04106b 5454 NV_UNIQUE_NAME, KM_SLEEP) == 0);
428870ff
BB
5455 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5456 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5457 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5458 B_TRUE));
9babb374 5459 }
34dc7c2f 5460
428870ff
BB
5461 /* set the props */
5462 if (props != NULL) {
5463 spa_configfile_set(newspa, props, B_FALSE);
5464 error = spa_prop_set(newspa, props);
5465 if (error)
5466 goto out;
5467 }
34dc7c2f 5468
428870ff
BB
5469 /* flush everything */
5470 txg = spa_vdev_config_enter(newspa);
5471 vdev_config_dirty(newspa->spa_root_vdev);
5472 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
34dc7c2f 5473
428870ff
BB
5474 if (zio_injection_enabled)
5475 zio_handle_panic_injection(spa, FTAG, 2);
34dc7c2f 5476
428870ff 5477 spa_async_resume(newspa);
34dc7c2f 5478
428870ff
BB
5479 /* finally, update the original pool's config */
5480 txg = spa_vdev_config_enter(spa);
5481 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5482 error = dmu_tx_assign(tx, TXG_WAIT);
5483 if (error != 0)
5484 dmu_tx_abort(tx);
5485 for (c = 0; c < children; c++) {
5486 if (vml[c] != NULL) {
5487 vdev_split(vml[c]);
5488 if (error == 0)
a08ee875
LG
5489 spa_history_log_internal(spa, "detach", tx,
5490 "vdev=%s", vml[c]->vdev_path);
cae5b340 5491
428870ff 5492 vdev_free(vml[c]);
34dc7c2f 5493 }
34dc7c2f 5494 }
cae5b340 5495 spa->spa_avz_action = AVZ_ACTION_REBUILD;
428870ff
BB
5496 vdev_config_dirty(spa->spa_root_vdev);
5497 spa->spa_config_splitting = NULL;
5498 nvlist_free(nvl);
5499 if (error == 0)
5500 dmu_tx_commit(tx);
5501 (void) spa_vdev_exit(spa, NULL, txg, 0);
5502
5503 if (zio_injection_enabled)
5504 zio_handle_panic_injection(spa, FTAG, 3);
5505
5506 /* split is complete; log a history record */
a08ee875
LG
5507 spa_history_log_internal(newspa, "split", NULL,
5508 "from pool %s", spa_name(spa));
428870ff
BB
5509
5510 kmem_free(vml, children * sizeof (vdev_t *));
5511
5512 /* if we're not going to mount the filesystems in userland, export */
5513 if (exp)
5514 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5515 B_FALSE, B_FALSE);
5516
5517 return (error);
5518
5519out:
5520 spa_unload(newspa);
5521 spa_deactivate(newspa);
5522 spa_remove(newspa);
5523
5524 txg = spa_vdev_config_enter(spa);
5525
5526 /* re-online all offlined disks */
5527 for (c = 0; c < children; c++) {
5528 if (vml[c] != NULL)
5529 vml[c]->vdev_offline = B_FALSE;
5530 }
5531 vdev_reopen(spa->spa_root_vdev);
5532
5533 nvlist_free(spa->spa_config_splitting);
5534 spa->spa_config_splitting = NULL;
5535 (void) spa_vdev_exit(spa, NULL, txg, error);
34dc7c2f 5536
428870ff 5537 kmem_free(vml, children * sizeof (vdev_t *));
34dc7c2f
BB
5538 return (error);
5539}
5540
b128c09f
BB
5541static nvlist_t *
5542spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
34dc7c2f 5543{
d6320ddb
BB
5544 int i;
5545
5546 for (i = 0; i < count; i++) {
b128c09f 5547 uint64_t guid;
34dc7c2f 5548
b128c09f
BB
5549 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5550 &guid) == 0);
34dc7c2f 5551
b128c09f
BB
5552 if (guid == target_guid)
5553 return (nvpp[i]);
34dc7c2f
BB
5554 }
5555
b128c09f 5556 return (NULL);
34dc7c2f
BB
5557}
5558
b128c09f
BB
5559static void
5560spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
cae5b340 5561 nvlist_t *dev_to_remove)
34dc7c2f 5562{
b128c09f 5563 nvlist_t **newdev = NULL;
d6320ddb 5564 int i, j;
34dc7c2f 5565
b128c09f 5566 if (count > 1)
ea04106b 5567 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
34dc7c2f 5568
d6320ddb 5569 for (i = 0, j = 0; i < count; i++) {
b128c09f
BB
5570 if (dev[i] == dev_to_remove)
5571 continue;
ea04106b 5572 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
34dc7c2f
BB
5573 }
5574
b128c09f
BB
5575 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5576 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
34dc7c2f 5577
d6320ddb 5578 for (i = 0; i < count - 1; i++)
b128c09f 5579 nvlist_free(newdev[i]);
34dc7c2f 5580
b128c09f
BB
5581 if (count > 1)
5582 kmem_free(newdev, (count - 1) * sizeof (void *));
34dc7c2f
BB
5583}
5584
428870ff
BB
5585/*
5586 * Evacuate the device.
5587 */
5588static int
5589spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5590{
5591 uint64_t txg;
5592 int error = 0;
5593
5594 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5595 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5596 ASSERT(vd == vd->vdev_top);
5597
5598 /*
5599 * Evacuate the device. We don't hold the config lock as writer
5600 * since we need to do I/O but we do keep the
5601 * spa_namespace_lock held. Once this completes the device
5602 * should no longer have any blocks allocated on it.
5603 */
5604 if (vd->vdev_islog) {
5605 if (vd->vdev_stat.vs_alloc != 0)
5606 error = spa_offline_log(spa);
5607 } else {
a08ee875 5608 error = SET_ERROR(ENOTSUP);
428870ff
BB
5609 }
5610
5611 if (error)
5612 return (error);
5613
5614 /*
5615 * The evacuation succeeded. Remove any remaining MOS metadata
5616 * associated with this vdev, and wait for these changes to sync.
5617 */
c06d4368 5618 ASSERT0(vd->vdev_stat.vs_alloc);
428870ff
BB
5619 txg = spa_vdev_config_enter(spa);
5620 vd->vdev_removing = B_TRUE;
ea04106b 5621 vdev_dirty_leaves(vd, VDD_DTL, txg);
428870ff
BB
5622 vdev_config_dirty(vd);
5623 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5624
5625 return (0);
5626}
5627
5628/*
5629 * Complete the removal by cleaning up the namespace.
5630 */
5631static void
5632spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5633{
5634 vdev_t *rvd = spa->spa_root_vdev;
5635 uint64_t id = vd->vdev_id;
5636 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5637
5638 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5639 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5640 ASSERT(vd == vd->vdev_top);
5641
5642 /*
5643 * Only remove any devices which are empty.
5644 */
5645 if (vd->vdev_stat.vs_alloc != 0)
5646 return;
5647
5648 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5649
5650 if (list_link_active(&vd->vdev_state_dirty_node))
5651 vdev_state_clean(vd);
5652 if (list_link_active(&vd->vdev_config_dirty_node))
5653 vdev_config_clean(vd);
5654
5655 vdev_free(vd);
5656
5657 if (last_vdev) {
5658 vdev_compact_children(rvd);
5659 } else {
5660 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5661 vdev_add_child(rvd, vd);
5662 }
5663 vdev_config_dirty(rvd);
5664
5665 /*
5666 * Reassess the health of our root vdev.
5667 */
5668 vdev_reopen(rvd);
5669}
5670
5671/*
5672 * Remove a device from the pool -
5673 *
5674 * Removing a device from the vdev namespace requires several steps
5675 * and can take a significant amount of time. As a result we use
5676 * the spa_vdev_config_[enter/exit] functions which allow us to
5677 * grab and release the spa_config_lock while still holding the namespace
5678 * lock. During each step the configuration is synced out.
a08ee875
LG
5679 *
5680 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5681 * devices.
34dc7c2f
BB
5682 */
5683int
5684spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5685{
5686 vdev_t *vd;
cae5b340 5687 sysevent_t *ev = NULL;
428870ff 5688 metaslab_group_t *mg;
b128c09f 5689 nvlist_t **spares, **l2cache, *nv;
fb5f0bc8 5690 uint64_t txg = 0;
428870ff 5691 uint_t nspares, nl2cache;
34dc7c2f 5692 int error = 0;
fb5f0bc8 5693 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
34dc7c2f 5694
572e2857
BB
5695 ASSERT(spa_writeable(spa));
5696
fb5f0bc8
BB
5697 if (!locked)
5698 txg = spa_vdev_enter(spa);
34dc7c2f 5699
b128c09f 5700 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
5701
5702 if (spa->spa_spares.sav_vdevs != NULL &&
34dc7c2f 5703 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
b128c09f
BB
5704 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5705 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5706 /*
5707 * Only remove the hot spare if it's not currently in use
5708 * in this pool.
5709 */
5710 if (vd == NULL || unspare) {
cae5b340
AX
5711 if (vd == NULL)
5712 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5713 ev = spa_event_create(spa, vd, NULL,
5714 ESC_ZFS_VDEV_REMOVE_AUX);
b128c09f
BB
5715 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5716 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5717 spa_load_spares(spa);
5718 spa->spa_spares.sav_sync = B_TRUE;
5719 } else {
a08ee875 5720 error = SET_ERROR(EBUSY);
b128c09f
BB
5721 }
5722 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
34dc7c2f 5723 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
b128c09f
BB
5724 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5725 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5726 /*
5727 * Cache devices can always be removed.
5728 */
cae5b340
AX
5729 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
5730 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
b128c09f
BB
5731 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5732 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
34dc7c2f
BB
5733 spa_load_l2cache(spa);
5734 spa->spa_l2cache.sav_sync = B_TRUE;
428870ff
BB
5735 } else if (vd != NULL && vd->vdev_islog) {
5736 ASSERT(!locked);
5737 ASSERT(vd == vd->vdev_top);
5738
428870ff
BB
5739 mg = vd->vdev_mg;
5740
5741 /*
5742 * Stop allocating from this vdev.
5743 */
5744 metaslab_group_passivate(mg);
5745
5746 /*
5747 * Wait for the youngest allocations and frees to sync,
5748 * and then wait for the deferral of those frees to finish.
5749 */
5750 spa_vdev_config_exit(spa, NULL,
5751 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5752
5753 /*
5754 * Attempt to evacuate the vdev.
5755 */
5756 error = spa_vdev_remove_evacuate(spa, vd);
5757
5758 txg = spa_vdev_config_enter(spa);
5759
5760 /*
5761 * If we couldn't evacuate the vdev, unwind.
5762 */
5763 if (error) {
5764 metaslab_group_activate(mg);
5765 return (spa_vdev_exit(spa, NULL, txg, error));
5766 }
5767
5768 /*
5769 * Clean up the vdev namespace.
5770 */
cae5b340 5771 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_DEV);
428870ff
BB
5772 spa_vdev_remove_from_namespace(spa, vd);
5773
b128c09f
BB
5774 } else if (vd != NULL) {
5775 /*
5776 * Normal vdevs cannot be removed (yet).
5777 */
a08ee875 5778 error = SET_ERROR(ENOTSUP);
b128c09f
BB
5779 } else {
5780 /*
5781 * There is no vdev of any kind with the specified guid.
5782 */
a08ee875 5783 error = SET_ERROR(ENOENT);
34dc7c2f
BB
5784 }
5785
fb5f0bc8 5786 if (!locked)
cae5b340
AX
5787 error = spa_vdev_exit(spa, NULL, txg, error);
5788
5789 if (ev)
5790 spa_event_post(ev);
fb5f0bc8
BB
5791
5792 return (error);
34dc7c2f
BB
5793}
5794
5795/*
5796 * Find any device that's done replacing, or a vdev marked 'unspare' that's
a08ee875 5797 * currently spared, so we can detach it.
34dc7c2f
BB
5798 */
5799static vdev_t *
5800spa_vdev_resilver_done_hunt(vdev_t *vd)
5801{
5802 vdev_t *newvd, *oldvd;
d6320ddb 5803 int c;
34dc7c2f 5804
d6320ddb 5805 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f
BB
5806 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5807 if (oldvd != NULL)
5808 return (oldvd);
5809 }
5810
5811 /*
572e2857
BB
5812 * Check for a completed replacement. We always consider the first
5813 * vdev in the list to be the oldest vdev, and the last one to be
5814 * the newest (see spa_vdev_attach() for how that works). In
5815 * the case where the newest vdev is faulted, we will not automatically
5816 * remove it after a resilver completes. This is OK as it will require
5817 * user intervention to determine which disk the admin wishes to keep.
34dc7c2f 5818 */
572e2857
BB
5819 if (vd->vdev_ops == &vdev_replacing_ops) {
5820 ASSERT(vd->vdev_children > 1);
5821
5822 newvd = vd->vdev_child[vd->vdev_children - 1];
34dc7c2f 5823 oldvd = vd->vdev_child[0];
34dc7c2f 5824
fb5f0bc8 5825 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5826 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
fb5f0bc8 5827 !vdev_dtl_required(oldvd))
34dc7c2f 5828 return (oldvd);
34dc7c2f
BB
5829 }
5830
5831 /*
5832 * Check for a completed resilver with the 'unspare' flag set.
5833 */
572e2857
BB
5834 if (vd->vdev_ops == &vdev_spare_ops) {
5835 vdev_t *first = vd->vdev_child[0];
5836 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5837
5838 if (last->vdev_unspare) {
5839 oldvd = first;
5840 newvd = last;
5841 } else if (first->vdev_unspare) {
5842 oldvd = last;
5843 newvd = first;
5844 } else {
5845 oldvd = NULL;
5846 }
34dc7c2f 5847
572e2857 5848 if (oldvd != NULL &&
fb5f0bc8 5849 vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5850 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
572e2857 5851 !vdev_dtl_required(oldvd))
34dc7c2f 5852 return (oldvd);
572e2857
BB
5853
5854 /*
5855 * If there are more than two spares attached to a disk,
5856 * and those spares are not required, then we want to
5857 * attempt to free them up now so that they can be used
5858 * by other pools. Once we're back down to a single
5859 * disk+spare, we stop removing them.
5860 */
5861 if (vd->vdev_children > 2) {
5862 newvd = vd->vdev_child[1];
5863
5864 if (newvd->vdev_isspare && last->vdev_isspare &&
5865 vdev_dtl_empty(last, DTL_MISSING) &&
5866 vdev_dtl_empty(last, DTL_OUTAGE) &&
5867 !vdev_dtl_required(newvd))
5868 return (newvd);
34dc7c2f 5869 }
34dc7c2f
BB
5870 }
5871
5872 return (NULL);
5873}
5874
5875static void
5876spa_vdev_resilver_done(spa_t *spa)
5877{
fb5f0bc8
BB
5878 vdev_t *vd, *pvd, *ppvd;
5879 uint64_t guid, sguid, pguid, ppguid;
34dc7c2f 5880
fb5f0bc8 5881 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5882
5883 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
fb5f0bc8
BB
5884 pvd = vd->vdev_parent;
5885 ppvd = pvd->vdev_parent;
34dc7c2f 5886 guid = vd->vdev_guid;
fb5f0bc8
BB
5887 pguid = pvd->vdev_guid;
5888 ppguid = ppvd->vdev_guid;
5889 sguid = 0;
34dc7c2f
BB
5890 /*
5891 * If we have just finished replacing a hot spared device, then
5892 * we need to detach the parent's first child (the original hot
5893 * spare) as well.
5894 */
572e2857
BB
5895 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5896 ppvd->vdev_children == 2) {
34dc7c2f 5897 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
fb5f0bc8 5898 sguid = ppvd->vdev_child[1]->vdev_guid;
34dc7c2f 5899 }
a08ee875
LG
5900 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5901
fb5f0bc8
BB
5902 spa_config_exit(spa, SCL_ALL, FTAG);
5903 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
34dc7c2f 5904 return;
fb5f0bc8 5905 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
34dc7c2f 5906 return;
fb5f0bc8 5907 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5908 }
5909
fb5f0bc8 5910 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
5911}
5912
5913/*
428870ff 5914 * Update the stored path or FRU for this vdev.
34dc7c2f
BB
5915 */
5916int
9babb374
BB
5917spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5918 boolean_t ispath)
34dc7c2f 5919{
b128c09f 5920 vdev_t *vd;
428870ff 5921 boolean_t sync = B_FALSE;
34dc7c2f 5922
572e2857
BB
5923 ASSERT(spa_writeable(spa));
5924
428870ff 5925 spa_vdev_state_enter(spa, SCL_ALL);
34dc7c2f 5926
9babb374 5927 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
428870ff 5928 return (spa_vdev_state_exit(spa, NULL, ENOENT));
34dc7c2f
BB
5929
5930 if (!vd->vdev_ops->vdev_op_leaf)
428870ff 5931 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 5932
9babb374 5933 if (ispath) {
428870ff
BB
5934 if (strcmp(value, vd->vdev_path) != 0) {
5935 spa_strfree(vd->vdev_path);
5936 vd->vdev_path = spa_strdup(value);
5937 sync = B_TRUE;
5938 }
9babb374 5939 } else {
428870ff
BB
5940 if (vd->vdev_fru == NULL) {
5941 vd->vdev_fru = spa_strdup(value);
5942 sync = B_TRUE;
5943 } else if (strcmp(value, vd->vdev_fru) != 0) {
9babb374 5944 spa_strfree(vd->vdev_fru);
428870ff
BB
5945 vd->vdev_fru = spa_strdup(value);
5946 sync = B_TRUE;
5947 }
9babb374 5948 }
34dc7c2f 5949
428870ff 5950 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
34dc7c2f
BB
5951}
5952
9babb374
BB
5953int
5954spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5955{
5956 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5957}
5958
5959int
5960spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5961{
5962 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5963}
5964
34dc7c2f
BB
5965/*
5966 * ==========================================================================
428870ff 5967 * SPA Scanning
34dc7c2f
BB
5968 * ==========================================================================
5969 */
cae5b340
AX
5970int
5971spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
5972{
5973 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5974
5975 if (dsl_scan_resilvering(spa->spa_dsl_pool))
5976 return (SET_ERROR(EBUSY));
5977
5978 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
5979}
34dc7c2f 5980
34dc7c2f 5981int
428870ff
BB
5982spa_scan_stop(spa_t *spa)
5983{
5984 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5985 if (dsl_scan_resilvering(spa->spa_dsl_pool))
a08ee875 5986 return (SET_ERROR(EBUSY));
428870ff
BB
5987 return (dsl_scan_cancel(spa->spa_dsl_pool));
5988}
5989
5990int
5991spa_scan(spa_t *spa, pool_scan_func_t func)
34dc7c2f 5992{
b128c09f 5993 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
34dc7c2f 5994
428870ff 5995 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
a08ee875 5996 return (SET_ERROR(ENOTSUP));
34dc7c2f 5997
34dc7c2f 5998 /*
b128c09f
BB
5999 * If a resilver was requested, but there is no DTL on a
6000 * writeable leaf device, we have nothing to do.
34dc7c2f 6001 */
428870ff 6002 if (func == POOL_SCAN_RESILVER &&
b128c09f
BB
6003 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
6004 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
34dc7c2f
BB
6005 return (0);
6006 }
6007
428870ff 6008 return (dsl_scan(spa->spa_dsl_pool, func));
34dc7c2f
BB
6009}
6010
6011/*
6012 * ==========================================================================
6013 * SPA async task processing
6014 * ==========================================================================
6015 */
6016
6017static void
6018spa_async_remove(spa_t *spa, vdev_t *vd)
6019{
d6320ddb
BB
6020 int c;
6021
b128c09f 6022 if (vd->vdev_remove_wanted) {
428870ff
BB
6023 vd->vdev_remove_wanted = B_FALSE;
6024 vd->vdev_delayed_close = B_FALSE;
b128c09f 6025 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
428870ff
BB
6026
6027 /*
6028 * We want to clear the stats, but we don't want to do a full
6029 * vdev_clear() as that will cause us to throw away
6030 * degraded/faulted state as well as attempt to reopen the
6031 * device, all of which is a waste.
6032 */
6033 vd->vdev_stat.vs_read_errors = 0;
6034 vd->vdev_stat.vs_write_errors = 0;
6035 vd->vdev_stat.vs_checksum_errors = 0;
6036
b128c09f
BB
6037 vdev_state_dirty(vd->vdev_top);
6038 }
34dc7c2f 6039
d6320ddb 6040 for (c = 0; c < vd->vdev_children; c++)
b128c09f
BB
6041 spa_async_remove(spa, vd->vdev_child[c]);
6042}
6043
6044static void
6045spa_async_probe(spa_t *spa, vdev_t *vd)
6046{
d6320ddb
BB
6047 int c;
6048
b128c09f 6049 if (vd->vdev_probe_wanted) {
428870ff 6050 vd->vdev_probe_wanted = B_FALSE;
b128c09f 6051 vdev_reopen(vd); /* vdev_open() does the actual probe */
34dc7c2f 6052 }
b128c09f 6053
d6320ddb 6054 for (c = 0; c < vd->vdev_children; c++)
b128c09f 6055 spa_async_probe(spa, vd->vdev_child[c]);
34dc7c2f
BB
6056}
6057
9babb374
BB
6058static void
6059spa_async_autoexpand(spa_t *spa, vdev_t *vd)
6060{
d6320ddb 6061 int c;
9babb374
BB
6062
6063 if (!spa->spa_autoexpand)
6064 return;
6065
d6320ddb 6066 for (c = 0; c < vd->vdev_children; c++) {
9babb374
BB
6067 vdev_t *cvd = vd->vdev_child[c];
6068 spa_async_autoexpand(spa, cvd);
6069 }
6070
6071 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
6072 return;
6073
cae5b340 6074 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
9babb374
BB
6075}
6076
34dc7c2f
BB
6077static void
6078spa_async_thread(spa_t *spa)
6079{
d6320ddb 6080 int tasks, i;
34dc7c2f
BB
6081
6082 ASSERT(spa->spa_sync_on);
6083
6084 mutex_enter(&spa->spa_async_lock);
6085 tasks = spa->spa_async_tasks;
6086 spa->spa_async_tasks = 0;
6087 mutex_exit(&spa->spa_async_lock);
6088
6089 /*
6090 * See if the config needs to be updated.
6091 */
6092 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
428870ff 6093 uint64_t old_space, new_space;
9babb374 6094
34dc7c2f 6095 mutex_enter(&spa_namespace_lock);
428870ff 6096 old_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 6097 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
428870ff 6098 new_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 6099 mutex_exit(&spa_namespace_lock);
9babb374
BB
6100
6101 /*
6102 * If the pool grew as a result of the config update,
6103 * then log an internal history event.
6104 */
428870ff 6105 if (new_space != old_space) {
a08ee875 6106 spa_history_log_internal(spa, "vdev online", NULL,
45d1cae3 6107 "pool '%s' size: %llu(+%llu)",
428870ff 6108 spa_name(spa), new_space, new_space - old_space);
9babb374 6109 }
34dc7c2f
BB
6110 }
6111
6112 /*
6113 * See if any devices need to be marked REMOVED.
34dc7c2f 6114 */
b128c09f 6115 if (tasks & SPA_ASYNC_REMOVE) {
428870ff 6116 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 6117 spa_async_remove(spa, spa->spa_root_vdev);
d6320ddb 6118 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
b128c09f 6119 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
d6320ddb 6120 for (i = 0; i < spa->spa_spares.sav_count; i++)
b128c09f
BB
6121 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
6122 (void) spa_vdev_state_exit(spa, NULL, 0);
34dc7c2f
BB
6123 }
6124
9babb374
BB
6125 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
6126 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6127 spa_async_autoexpand(spa, spa->spa_root_vdev);
6128 spa_config_exit(spa, SCL_CONFIG, FTAG);
6129 }
6130
34dc7c2f 6131 /*
b128c09f 6132 * See if any devices need to be probed.
34dc7c2f 6133 */
b128c09f 6134 if (tasks & SPA_ASYNC_PROBE) {
428870ff 6135 spa_vdev_state_enter(spa, SCL_NONE);
b128c09f
BB
6136 spa_async_probe(spa, spa->spa_root_vdev);
6137 (void) spa_vdev_state_exit(spa, NULL, 0);
6138 }
34dc7c2f
BB
6139
6140 /*
b128c09f 6141 * If any devices are done replacing, detach them.
34dc7c2f 6142 */
b128c09f
BB
6143 if (tasks & SPA_ASYNC_RESILVER_DONE)
6144 spa_vdev_resilver_done(spa);
34dc7c2f
BB
6145
6146 /*
6147 * Kick off a resilver.
6148 */
b128c09f 6149 if (tasks & SPA_ASYNC_RESILVER)
428870ff 6150 dsl_resilver_restart(spa->spa_dsl_pool, 0);
34dc7c2f
BB
6151
6152 /*
6153 * Let the world know that we're done.
6154 */
6155 mutex_enter(&spa->spa_async_lock);
6156 spa->spa_async_thread = NULL;
6157 cv_broadcast(&spa->spa_async_cv);
6158 mutex_exit(&spa->spa_async_lock);
6159 thread_exit();
6160}
6161
6162void
6163spa_async_suspend(spa_t *spa)
6164{
6165 mutex_enter(&spa->spa_async_lock);
6166 spa->spa_async_suspended++;
6167 while (spa->spa_async_thread != NULL)
6168 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
6169 mutex_exit(&spa->spa_async_lock);
6170}
6171
6172void
6173spa_async_resume(spa_t *spa)
6174{
6175 mutex_enter(&spa->spa_async_lock);
6176 ASSERT(spa->spa_async_suspended != 0);
6177 spa->spa_async_suspended--;
6178 mutex_exit(&spa->spa_async_lock);
6179}
6180
cae5b340
AX
6181static boolean_t
6182spa_async_tasks_pending(spa_t *spa)
6183{
6184 uint_t non_config_tasks;
6185 uint_t config_task;
6186 boolean_t config_task_suspended;
6187
6188 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
6189 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
6190 if (spa->spa_ccw_fail_time == 0) {
6191 config_task_suspended = B_FALSE;
6192 } else {
6193 config_task_suspended =
6194 (gethrtime() - spa->spa_ccw_fail_time) <
6195 ((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
6196 }
6197
6198 return (non_config_tasks || (config_task && !config_task_suspended));
6199}
6200
34dc7c2f
BB
6201static void
6202spa_async_dispatch(spa_t *spa)
6203{
6204 mutex_enter(&spa->spa_async_lock);
cae5b340
AX
6205 if (spa_async_tasks_pending(spa) &&
6206 !spa->spa_async_suspended &&
34dc7c2f 6207 spa->spa_async_thread == NULL &&
cae5b340 6208 rootdir != NULL)
34dc7c2f
BB
6209 spa->spa_async_thread = thread_create(NULL, 0,
6210 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
6211 mutex_exit(&spa->spa_async_lock);
6212}
6213
6214void
6215spa_async_request(spa_t *spa, int task)
6216{
428870ff 6217 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
34dc7c2f
BB
6218 mutex_enter(&spa->spa_async_lock);
6219 spa->spa_async_tasks |= task;
6220 mutex_exit(&spa->spa_async_lock);
6221}
6222
6223/*
6224 * ==========================================================================
6225 * SPA syncing routines
6226 * ==========================================================================
6227 */
6228
428870ff
BB
6229static int
6230bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
34dc7c2f 6231{
428870ff
BB
6232 bpobj_t *bpo = arg;
6233 bpobj_enqueue(bpo, bp, tx);
6234 return (0);
6235}
34dc7c2f 6236
428870ff
BB
6237static int
6238spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6239{
6240 zio_t *zio = arg;
34dc7c2f 6241
428870ff
BB
6242 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
6243 zio->io_flags));
6244 return (0);
34dc7c2f
BB
6245}
6246
a08ee875
LG
6247/*
6248 * Note: this simple function is not inlined to make it easier to dtrace the
6249 * amount of time spent syncing frees.
6250 */
6251static void
6252spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
6253{
6254 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6255 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
6256 VERIFY(zio_wait(zio) == 0);
6257}
6258
6259/*
6260 * Note: this simple function is not inlined to make it easier to dtrace the
6261 * amount of time spent syncing deferred frees.
6262 */
6263static void
6264spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
6265{
6266 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6267 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
6268 spa_free_sync_cb, zio, tx), ==, 0);
6269 VERIFY0(zio_wait(zio));
6270}
6271
34dc7c2f
BB
6272static void
6273spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
6274{
6275 char *packed = NULL;
b128c09f 6276 size_t bufsize;
34dc7c2f
BB
6277 size_t nvsize = 0;
6278 dmu_buf_t *db;
6279
6280 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
6281
b128c09f
BB
6282 /*
6283 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
ea04106b 6284 * information. This avoids the dmu_buf_will_dirty() path and
b128c09f
BB
6285 * saves us a pre-read to get data we don't actually care about.
6286 */
9ae529ec 6287 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
ea04106b 6288 packed = vmem_alloc(bufsize, KM_SLEEP);
34dc7c2f
BB
6289
6290 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
ea04106b 6291 KM_SLEEP) == 0);
b128c09f 6292 bzero(packed + nvsize, bufsize - nvsize);
34dc7c2f 6293
b128c09f 6294 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
34dc7c2f 6295
00b46022 6296 vmem_free(packed, bufsize);
34dc7c2f
BB
6297
6298 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
6299 dmu_buf_will_dirty(db, tx);
6300 *(uint64_t *)db->db_data = nvsize;
6301 dmu_buf_rele(db, FTAG);
6302}
6303
6304static void
6305spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
6306 const char *config, const char *entry)
6307{
6308 nvlist_t *nvroot;
6309 nvlist_t **list;
6310 int i;
6311
6312 if (!sav->sav_sync)
6313 return;
6314
6315 /*
6316 * Update the MOS nvlist describing the list of available devices.
6317 * spa_validate_aux() will have already made sure this nvlist is
6318 * valid and the vdevs are labeled appropriately.
6319 */
6320 if (sav->sav_object == 0) {
6321 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
6322 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
6323 sizeof (uint64_t), tx);
6324 VERIFY(zap_update(spa->spa_meta_objset,
6325 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
6326 &sav->sav_object, tx) == 0);
6327 }
6328
ea04106b 6329 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
34dc7c2f
BB
6330 if (sav->sav_count == 0) {
6331 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
6332 } else {
ea04106b 6333 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
34dc7c2f
BB
6334 for (i = 0; i < sav->sav_count; i++)
6335 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
428870ff 6336 B_FALSE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
6337 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
6338 sav->sav_count) == 0);
6339 for (i = 0; i < sav->sav_count; i++)
6340 nvlist_free(list[i]);
6341 kmem_free(list, sav->sav_count * sizeof (void *));
6342 }
6343
6344 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
6345 nvlist_free(nvroot);
6346
6347 sav->sav_sync = B_FALSE;
6348}
6349
cae5b340
AX
6350/*
6351 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
6352 * The all-vdev ZAP must be empty.
6353 */
6354static void
6355spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
6356{
6357 spa_t *spa = vd->vdev_spa;
6358 uint64_t i;
6359
6360 if (vd->vdev_top_zap != 0) {
6361 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6362 vd->vdev_top_zap, tx));
6363 }
6364 if (vd->vdev_leaf_zap != 0) {
6365 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6366 vd->vdev_leaf_zap, tx));
6367 }
6368 for (i = 0; i < vd->vdev_children; i++) {
6369 spa_avz_build(vd->vdev_child[i], avz, tx);
6370 }
6371}
6372
34dc7c2f
BB
6373static void
6374spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
6375{
6376 nvlist_t *config;
6377
cae5b340
AX
6378 /*
6379 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
6380 * its config may not be dirty but we still need to build per-vdev ZAPs.
6381 * Similarly, if the pool is being assembled (e.g. after a split), we
6382 * need to rebuild the AVZ although the config may not be dirty.
6383 */
6384 if (list_is_empty(&spa->spa_config_dirty_list) &&
6385 spa->spa_avz_action == AVZ_ACTION_NONE)
34dc7c2f
BB
6386 return;
6387
b128c09f
BB
6388 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6389
cae5b340
AX
6390 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
6391 spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
6392 spa->spa_all_vdev_zaps != 0);
6393
6394 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
6395 zap_cursor_t zc;
6396 zap_attribute_t za;
6397
6398 /* Make and build the new AVZ */
6399 uint64_t new_avz = zap_create(spa->spa_meta_objset,
6400 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
6401 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
6402
6403 /* Diff old AVZ with new one */
6404 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6405 spa->spa_all_vdev_zaps);
6406 zap_cursor_retrieve(&zc, &za) == 0;
6407 zap_cursor_advance(&zc)) {
6408 uint64_t vdzap = za.za_first_integer;
6409 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
6410 vdzap) == ENOENT) {
6411 /*
6412 * ZAP is listed in old AVZ but not in new one;
6413 * destroy it
6414 */
6415 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
6416 tx));
6417 }
6418 }
6419
6420 zap_cursor_fini(&zc);
6421
6422 /* Destroy the old AVZ */
6423 VERIFY0(zap_destroy(spa->spa_meta_objset,
6424 spa->spa_all_vdev_zaps, tx));
6425
6426 /* Replace the old AVZ in the dir obj with the new one */
6427 VERIFY0(zap_update(spa->spa_meta_objset,
6428 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
6429 sizeof (new_avz), 1, &new_avz, tx));
6430
6431 spa->spa_all_vdev_zaps = new_avz;
6432 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
6433 zap_cursor_t zc;
6434 zap_attribute_t za;
6435
6436 /* Walk through the AVZ and destroy all listed ZAPs */
6437 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6438 spa->spa_all_vdev_zaps);
6439 zap_cursor_retrieve(&zc, &za) == 0;
6440 zap_cursor_advance(&zc)) {
6441 uint64_t zap = za.za_first_integer;
6442 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
6443 }
6444
6445 zap_cursor_fini(&zc);
6446
6447 /* Destroy and unlink the AVZ itself */
6448 VERIFY0(zap_destroy(spa->spa_meta_objset,
6449 spa->spa_all_vdev_zaps, tx));
6450 VERIFY0(zap_remove(spa->spa_meta_objset,
6451 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
6452 spa->spa_all_vdev_zaps = 0;
6453 }
6454
6455 if (spa->spa_all_vdev_zaps == 0) {
6456 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
6457 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
6458 DMU_POOL_VDEV_ZAP_MAP, tx);
6459 }
6460 spa->spa_avz_action = AVZ_ACTION_NONE;
6461
6462 /* Create ZAPs for vdevs that don't have them. */
6463 vdev_construct_zaps(spa->spa_root_vdev, tx);
6464
b128c09f
BB
6465 config = spa_config_generate(spa, spa->spa_root_vdev,
6466 dmu_tx_get_txg(tx), B_FALSE);
6467
ea0b2538
GW
6468 /*
6469 * If we're upgrading the spa version then make sure that
6470 * the config object gets updated with the correct version.
6471 */
6472 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
6473 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
6474 spa->spa_uberblock.ub_version);
6475
b128c09f 6476 spa_config_exit(spa, SCL_STATE, FTAG);
34dc7c2f 6477
cae5b340 6478 nvlist_free(spa->spa_config_syncing);
34dc7c2f
BB
6479 spa->spa_config_syncing = config;
6480
6481 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
6482}
6483
9ae529ec 6484static void
a08ee875 6485spa_sync_version(void *arg, dmu_tx_t *tx)
9ae529ec 6486{
a08ee875
LG
6487 uint64_t *versionp = arg;
6488 uint64_t version = *versionp;
6489 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9ae529ec
CS
6490
6491 /*
6492 * Setting the version is special cased when first creating the pool.
6493 */
6494 ASSERT(tx->tx_txg != TXG_INITIAL);
6495
8dca0a9a 6496 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
9ae529ec
CS
6497 ASSERT(version >= spa_version(spa));
6498
6499 spa->spa_uberblock.ub_version = version;
6500 vdev_config_dirty(spa->spa_root_vdev);
a08ee875 6501 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
9ae529ec
CS
6502}
6503
34dc7c2f
BB
6504/*
6505 * Set zpool properties.
6506 */
6507static void
a08ee875 6508spa_sync_props(void *arg, dmu_tx_t *tx)
34dc7c2f 6509{
a08ee875
LG
6510 nvlist_t *nvp = arg;
6511 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
34dc7c2f 6512 objset_t *mos = spa->spa_meta_objset;
9ae529ec 6513 nvpair_t *elem = NULL;
b128c09f
BB
6514
6515 mutex_enter(&spa->spa_props_lock);
34dc7c2f 6516
34dc7c2f 6517 while ((elem = nvlist_next_nvpair(nvp, elem))) {
9ae529ec
CS
6518 uint64_t intval;
6519 char *strval, *fname;
6520 zpool_prop_t prop;
6521 const char *propname;
6522 zprop_type_t proptype;
ea04106b 6523 spa_feature_t fid;
9ae529ec
CS
6524
6525 prop = zpool_name_to_prop(nvpair_name(elem));
6526 switch ((int)prop) {
6527 case ZPROP_INVAL:
6528 /*
6529 * We checked this earlier in spa_prop_validate().
6530 */
6531 ASSERT(zpool_prop_feature(nvpair_name(elem)));
6532
6533 fname = strchr(nvpair_name(elem), '@') + 1;
ea04106b 6534 VERIFY0(zfeature_lookup_name(fname, &fid));
9ae529ec 6535
ea04106b 6536 spa_feature_enable(spa, fid, tx);
a08ee875
LG
6537 spa_history_log_internal(spa, "set", tx,
6538 "%s=enabled", nvpair_name(elem));
9ae529ec
CS
6539 break;
6540
34dc7c2f 6541 case ZPOOL_PROP_VERSION:
ea04106b 6542 intval = fnvpair_value_uint64(elem);
34dc7c2f 6543 /*
cae5b340 6544 * The version is synced separately before other
9ae529ec 6545 * properties and should be correct by now.
34dc7c2f 6546 */
9ae529ec 6547 ASSERT3U(spa_version(spa), >=, intval);
34dc7c2f
BB
6548 break;
6549
6550 case ZPOOL_PROP_ALTROOT:
6551 /*
6552 * 'altroot' is a non-persistent property. It should
6553 * have been set temporarily at creation or import time.
6554 */
6555 ASSERT(spa->spa_root != NULL);
6556 break;
6557
572e2857 6558 case ZPOOL_PROP_READONLY:
34dc7c2f
BB
6559 case ZPOOL_PROP_CACHEFILE:
6560 /*
572e2857
BB
6561 * 'readonly' and 'cachefile' are also non-persisitent
6562 * properties.
34dc7c2f 6563 */
34dc7c2f 6564 break;
d96eb2b1 6565 case ZPOOL_PROP_COMMENT:
ea04106b 6566 strval = fnvpair_value_string(elem);
d96eb2b1
DM
6567 if (spa->spa_comment != NULL)
6568 spa_strfree(spa->spa_comment);
6569 spa->spa_comment = spa_strdup(strval);
6570 /*
6571 * We need to dirty the configuration on all the vdevs
6572 * so that their labels get updated. It's unnecessary
6573 * to do this for pool creation since the vdev's
cae5b340 6574 * configuration has already been dirtied.
d96eb2b1
DM
6575 */
6576 if (tx->tx_txg != TXG_INITIAL)
6577 vdev_config_dirty(spa->spa_root_vdev);
a08ee875
LG
6578 spa_history_log_internal(spa, "set", tx,
6579 "%s=%s", nvpair_name(elem), strval);
d96eb2b1 6580 break;
34dc7c2f
BB
6581 default:
6582 /*
6583 * Set pool property values in the poolprops mos object.
6584 */
34dc7c2f 6585 if (spa->spa_pool_props_object == 0) {
9ae529ec
CS
6586 spa->spa_pool_props_object =
6587 zap_create_link(mos, DMU_OT_POOL_PROPS,
34dc7c2f 6588 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
9ae529ec 6589 tx);
34dc7c2f 6590 }
34dc7c2f
BB
6591
6592 /* normalize the property name */
6593 propname = zpool_prop_to_name(prop);
6594 proptype = zpool_prop_get_type(prop);
6595
6596 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6597 ASSERT(proptype == PROP_TYPE_STRING);
ea04106b
AX
6598 strval = fnvpair_value_string(elem);
6599 VERIFY0(zap_update(mos,
34dc7c2f 6600 spa->spa_pool_props_object, propname,
ea04106b 6601 1, strlen(strval) + 1, strval, tx));
a08ee875
LG
6602 spa_history_log_internal(spa, "set", tx,
6603 "%s=%s", nvpair_name(elem), strval);
34dc7c2f 6604 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
ea04106b 6605 intval = fnvpair_value_uint64(elem);
34dc7c2f
BB
6606
6607 if (proptype == PROP_TYPE_INDEX) {
6608 const char *unused;
ea04106b
AX
6609 VERIFY0(zpool_prop_index_to_string(
6610 prop, intval, &unused));
34dc7c2f 6611 }
ea04106b 6612 VERIFY0(zap_update(mos,
34dc7c2f 6613 spa->spa_pool_props_object, propname,
ea04106b 6614 8, 1, &intval, tx));
a08ee875
LG
6615 spa_history_log_internal(spa, "set", tx,
6616 "%s=%lld", nvpair_name(elem), intval);
34dc7c2f
BB
6617 } else {
6618 ASSERT(0); /* not allowed */
6619 }
6620
6621 switch (prop) {
6622 case ZPOOL_PROP_DELEGATION:
6623 spa->spa_delegation = intval;
6624 break;
6625 case ZPOOL_PROP_BOOTFS:
6626 spa->spa_bootfs = intval;
6627 break;
6628 case ZPOOL_PROP_FAILUREMODE:
6629 spa->spa_failmode = intval;
6630 break;
9babb374
BB
6631 case ZPOOL_PROP_AUTOEXPAND:
6632 spa->spa_autoexpand = intval;
428870ff
BB
6633 if (tx->tx_txg != TXG_INITIAL)
6634 spa_async_request(spa,
6635 SPA_ASYNC_AUTOEXPAND);
6636 break;
cae5b340
AX
6637 case ZPOOL_PROP_MULTIHOST:
6638 spa->spa_multihost = intval;
6639 break;
428870ff
BB
6640 case ZPOOL_PROP_DEDUPDITTO:
6641 spa->spa_dedup_ditto = intval;
9babb374 6642 break;
34dc7c2f
BB
6643 default:
6644 break;
6645 }
6646 }
6647
34dc7c2f 6648 }
b128c09f
BB
6649
6650 mutex_exit(&spa->spa_props_lock);
34dc7c2f
BB
6651}
6652
428870ff
BB
6653/*
6654 * Perform one-time upgrade on-disk changes. spa_version() does not
6655 * reflect the new version this txg, so there must be no changes this
6656 * txg to anything that the upgrade code depends on after it executes.
6657 * Therefore this must be called after dsl_pool_sync() does the sync
6658 * tasks.
6659 */
6660static void
6661spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6662{
6663 dsl_pool_t *dp = spa->spa_dsl_pool;
6664
6665 ASSERT(spa->spa_sync_pass == 1);
6666
a08ee875
LG
6667 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6668
428870ff
BB
6669 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6670 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6671 dsl_pool_create_origin(dp, tx);
6672
6673 /* Keeping the origin open increases spa_minref */
6674 spa->spa_minref += 3;
6675 }
6676
6677 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6678 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6679 dsl_pool_upgrade_clones(dp, tx);
6680 }
6681
6682 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6683 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6684 dsl_pool_upgrade_dir_clones(dp, tx);
6685
6686 /* Keeping the freedir open increases spa_minref */
6687 spa->spa_minref += 3;
6688 }
9ae529ec
CS
6689
6690 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6691 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6692 spa_feature_create_zap_objects(spa, tx);
6693 }
ea04106b
AX
6694
6695 /*
6696 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6697 * when possibility to use lz4 compression for metadata was added
6698 * Old pools that have this feature enabled must be upgraded to have
6699 * this feature active
6700 */
6701 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6702 boolean_t lz4_en = spa_feature_is_enabled(spa,
6703 SPA_FEATURE_LZ4_COMPRESS);
6704 boolean_t lz4_ac = spa_feature_is_active(spa,
6705 SPA_FEATURE_LZ4_COMPRESS);
6706
6707 if (lz4_en && !lz4_ac)
6708 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6709 }
cae5b340
AX
6710
6711 /*
6712 * If we haven't written the salt, do so now. Note that the
6713 * feature may not be activated yet, but that's fine since
6714 * the presence of this ZAP entry is backwards compatible.
6715 */
6716 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
6717 DMU_POOL_CHECKSUM_SALT) == ENOENT) {
6718 VERIFY0(zap_add(spa->spa_meta_objset,
6719 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
6720 sizeof (spa->spa_cksum_salt.zcs_bytes),
6721 spa->spa_cksum_salt.zcs_bytes, tx));
6722 }
6723
a08ee875 6724 rrw_exit(&dp->dp_config_rwlock, FTAG);
428870ff
BB
6725}
6726
34dc7c2f
BB
6727/*
6728 * Sync the specified transaction group. New blocks may be dirtied as
6729 * part of the process, so we iterate until it converges.
6730 */
6731void
6732spa_sync(spa_t *spa, uint64_t txg)
6733{
6734 dsl_pool_t *dp = spa->spa_dsl_pool;
6735 objset_t *mos = spa->spa_meta_objset;
428870ff 6736 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
cae5b340 6737 metaslab_class_t *mc;
34dc7c2f
BB
6738 vdev_t *rvd = spa->spa_root_vdev;
6739 vdev_t *vd;
34dc7c2f 6740 dmu_tx_t *tx;
b128c09f 6741 int error;
cae5b340
AX
6742 uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
6743 zfs_vdev_queue_depth_pct / 100;
6744 uint64_t queue_depth_total;
d6320ddb 6745 int c;
34dc7c2f 6746
572e2857
BB
6747 VERIFY(spa_writeable(spa));
6748
34dc7c2f
BB
6749 /*
6750 * Lock out configuration changes.
6751 */
b128c09f 6752 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
6753
6754 spa->spa_syncing_txg = txg;
6755 spa->spa_sync_pass = 0;
6756
cae5b340
AX
6757 mutex_enter(&spa->spa_alloc_lock);
6758 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
6759 mutex_exit(&spa->spa_alloc_lock);
6760
b128c09f
BB
6761 /*
6762 * If there are any pending vdev state changes, convert them
6763 * into config changes that go out with this transaction group.
6764 */
6765 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
fb5f0bc8
BB
6766 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6767 /*
6768 * We need the write lock here because, for aux vdevs,
6769 * calling vdev_config_dirty() modifies sav_config.
6770 * This is ugly and will become unnecessary when we
6771 * eliminate the aux vdev wart by integrating all vdevs
6772 * into the root vdev tree.
6773 */
6774 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6775 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6776 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6777 vdev_state_clean(vd);
6778 vdev_config_dirty(vd);
6779 }
6780 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6781 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
b128c09f
BB
6782 }
6783 spa_config_exit(spa, SCL_STATE, FTAG);
6784
34dc7c2f
BB
6785 tx = dmu_tx_create_assigned(dp, txg);
6786
c06d4368 6787 spa->spa_sync_starttime = gethrtime();
cae5b340
AX
6788 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
6789 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
ea04106b 6790 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
c06d4368
AX
6791 NSEC_TO_TICK(spa->spa_deadman_synctime));
6792
34dc7c2f
BB
6793 /*
6794 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6795 * set spa_deflate if we have no raid-z vdevs.
6796 */
6797 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6798 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6799 int i;
6800
6801 for (i = 0; i < rvd->vdev_children; i++) {
6802 vd = rvd->vdev_child[i];
6803 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6804 break;
6805 }
6806 if (i == rvd->vdev_children) {
6807 spa->spa_deflate = TRUE;
6808 VERIFY(0 == zap_add(spa->spa_meta_objset,
6809 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6810 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6811 }
6812 }
6813
cae5b340
AX
6814 /*
6815 * Set the top-level vdev's max queue depth. Evaluate each
6816 * top-level's async write queue depth in case it changed.
6817 * The max queue depth will not change in the middle of syncing
6818 * out this txg.
6819 */
6820 queue_depth_total = 0;
6821 for (c = 0; c < rvd->vdev_children; c++) {
6822 vdev_t *tvd = rvd->vdev_child[c];
6823 metaslab_group_t *mg = tvd->vdev_mg;
6824
6825 if (mg == NULL || mg->mg_class != spa_normal_class(spa) ||
6826 !metaslab_group_initialized(mg))
6827 continue;
6828
6829 /*
6830 * It is safe to do a lock-free check here because only async
6831 * allocations look at mg_max_alloc_queue_depth, and async
6832 * allocations all happen from spa_sync().
6833 */
6834 ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
6835 mg->mg_max_alloc_queue_depth = max_queue_depth;
6836 queue_depth_total += mg->mg_max_alloc_queue_depth;
6837 }
6838 mc = spa_normal_class(spa);
6839 ASSERT0(refcount_count(&mc->mc_alloc_slots));
6840 mc->mc_alloc_max_slots = queue_depth_total;
6841 mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
6842
6843 ASSERT3U(mc->mc_alloc_max_slots, <=,
6844 max_queue_depth * rvd->vdev_children);
6845
34dc7c2f
BB
6846 /*
6847 * Iterate to convergence.
6848 */
6849 do {
428870ff 6850 int pass = ++spa->spa_sync_pass;
34dc7c2f
BB
6851
6852 spa_sync_config_object(spa, tx);
6853 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6854 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6855 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6856 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6857 spa_errlog_sync(spa, txg);
6858 dsl_pool_sync(dp, txg);
6859
c06d4368 6860 if (pass < zfs_sync_pass_deferred_free) {
a08ee875 6861 spa_sync_frees(spa, free_bpl, tx);
428870ff 6862 } else {
e10b0808
AX
6863 /*
6864 * We can not defer frees in pass 1, because
6865 * we sync the deferred frees later in pass 1.
6866 */
6867 ASSERT3U(pass, >, 1);
428870ff 6868 bplist_iterate(free_bpl, bpobj_enqueue_cb,
a08ee875 6869 &spa->spa_deferred_bpobj, tx);
34dc7c2f
BB
6870 }
6871
428870ff
BB
6872 ddt_sync(spa, txg);
6873 dsl_scan_sync(dp, tx);
34dc7c2f 6874
c65aa5b2 6875 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
428870ff
BB
6876 vdev_sync(vd, txg);
6877
e10b0808 6878 if (pass == 1) {
428870ff 6879 spa_sync_upgrades(spa, tx);
e10b0808
AX
6880 ASSERT3U(txg, >=,
6881 spa->spa_uberblock.ub_rootbp.blk_birth);
6882 /*
6883 * Note: We need to check if the MOS is dirty
6884 * because we could have marked the MOS dirty
6885 * without updating the uberblock (e.g. if we
6886 * have sync tasks but no dirty user data). We
6887 * need to check the uberblock's rootbp because
6888 * it is updated if we have synced out dirty
6889 * data (though in this case the MOS will most
6890 * likely also be dirty due to second order
6891 * effects, we don't want to rely on that here).
6892 */
6893 if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
6894 !dmu_objset_is_dirty(mos, txg)) {
6895 /*
6896 * Nothing changed on the first pass,
6897 * therefore this TXG is a no-op. Avoid
6898 * syncing deferred frees, so that we
6899 * can keep this TXG as a no-op.
6900 */
6901 ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
6902 txg));
6903 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6904 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
6905 break;
6906 }
6907 spa_sync_deferred_frees(spa, tx);
6908 }
34dc7c2f 6909
428870ff 6910 } while (dmu_objset_is_dirty(mos, txg));
34dc7c2f 6911
cae5b340
AX
6912#ifdef ZFS_DEBUG
6913 if (!list_is_empty(&spa->spa_config_dirty_list)) {
6914 /*
6915 * Make sure that the number of ZAPs for all the vdevs matches
6916 * the number of ZAPs in the per-vdev ZAP list. This only gets
6917 * called if the config is dirty; otherwise there may be
6918 * outstanding AVZ operations that weren't completed in
6919 * spa_sync_config_object.
6920 */
6921 uint64_t all_vdev_zap_entry_count;
6922 ASSERT0(zap_count(spa->spa_meta_objset,
6923 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
6924 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
6925 all_vdev_zap_entry_count);
6926 }
6927#endif
6928
34dc7c2f
BB
6929 /*
6930 * Rewrite the vdev configuration (which includes the uberblock)
6931 * to commit the transaction group.
6932 *
6933 * If there are no dirty vdevs, we sync the uberblock to a few
6934 * random top-level vdevs that are known to be visible in the
b128c09f
BB
6935 * config cache (see spa_vdev_add() for a complete description).
6936 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
34dc7c2f 6937 */
b128c09f
BB
6938 for (;;) {
6939 /*
6940 * We hold SCL_STATE to prevent vdev open/close/etc.
6941 * while we're attempting to write the vdev labels.
6942 */
6943 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6944
6945 if (list_is_empty(&spa->spa_config_dirty_list)) {
6946 vdev_t *svd[SPA_DVAS_PER_BP];
6947 int svdcount = 0;
6948 int children = rvd->vdev_children;
6949 int c0 = spa_get_random(children);
b128c09f 6950
d6320ddb 6951 for (c = 0; c < children; c++) {
b128c09f
BB
6952 vd = rvd->vdev_child[(c0 + c) % children];
6953 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6954 continue;
6955 svd[svdcount++] = vd;
6956 if (svdcount == SPA_DVAS_PER_BP)
6957 break;
6958 }
cae5b340 6959 error = vdev_config_sync(svd, svdcount, txg);
b128c09f
BB
6960 } else {
6961 error = vdev_config_sync(rvd->vdev_child,
cae5b340 6962 rvd->vdev_children, txg);
34dc7c2f 6963 }
34dc7c2f 6964
3bc7e0fb
GW
6965 if (error == 0)
6966 spa->spa_last_synced_guid = rvd->vdev_guid;
6967
b128c09f
BB
6968 spa_config_exit(spa, SCL_STATE, FTAG);
6969
6970 if (error == 0)
6971 break;
6972 zio_suspend(spa, NULL);
6973 zio_resume_wait(spa);
6974 }
34dc7c2f
BB
6975 dmu_tx_commit(tx);
6976
cae5b340 6977 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
c06d4368
AX
6978 spa->spa_deadman_tqid = 0;
6979
34dc7c2f
BB
6980 /*
6981 * Clear the dirty config list.
6982 */
b128c09f 6983 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
34dc7c2f
BB
6984 vdev_config_clean(vd);
6985
6986 /*
6987 * Now that the new config has synced transactionally,
6988 * let it become visible to the config cache.
6989 */
6990 if (spa->spa_config_syncing != NULL) {
6991 spa_config_set(spa, spa->spa_config_syncing);
6992 spa->spa_config_txg = txg;
6993 spa->spa_config_syncing = NULL;
6994 }
6995
428870ff 6996 dsl_pool_sync_done(dp, txg);
34dc7c2f 6997
cae5b340
AX
6998 mutex_enter(&spa->spa_alloc_lock);
6999 VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
7000 mutex_exit(&spa->spa_alloc_lock);
7001
34dc7c2f
BB
7002 /*
7003 * Update usable space statistics.
7004 */
c65aa5b2 7005 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
34dc7c2f
BB
7006 vdev_sync_done(vd, txg);
7007
428870ff
BB
7008 spa_update_dspace(spa);
7009
34dc7c2f
BB
7010 /*
7011 * It had better be the case that we didn't dirty anything
7012 * since vdev_config_sync().
7013 */
7014 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
7015 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7016 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
428870ff
BB
7017
7018 spa->spa_sync_pass = 0;
34dc7c2f 7019
cae5b340
AX
7020 /*
7021 * Update the last synced uberblock here. We want to do this at
7022 * the end of spa_sync() so that consumers of spa_last_synced_txg()
7023 * will be guaranteed that all the processing associated with
7024 * that txg has been completed.
7025 */
7026 spa->spa_ubsync = spa->spa_uberblock;
b128c09f 7027 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 7028
428870ff
BB
7029 spa_handle_ignored_writes(spa);
7030
34dc7c2f
BB
7031 /*
7032 * If any async tasks have been requested, kick them off.
7033 */
7034 spa_async_dispatch(spa);
7035}
7036
7037/*
7038 * Sync all pools. We don't want to hold the namespace lock across these
7039 * operations, so we take a reference on the spa_t and drop the lock during the
7040 * sync.
7041 */
7042void
7043spa_sync_allpools(void)
7044{
7045 spa_t *spa = NULL;
7046 mutex_enter(&spa_namespace_lock);
7047 while ((spa = spa_next(spa)) != NULL) {
572e2857
BB
7048 if (spa_state(spa) != POOL_STATE_ACTIVE ||
7049 !spa_writeable(spa) || spa_suspended(spa))
34dc7c2f
BB
7050 continue;
7051 spa_open_ref(spa, FTAG);
7052 mutex_exit(&spa_namespace_lock);
7053 txg_wait_synced(spa_get_dsl(spa), 0);
7054 mutex_enter(&spa_namespace_lock);
7055 spa_close(spa, FTAG);
7056 }
7057 mutex_exit(&spa_namespace_lock);
7058}
7059
7060/*
7061 * ==========================================================================
7062 * Miscellaneous routines
7063 * ==========================================================================
7064 */
7065
7066/*
7067 * Remove all pools in the system.
7068 */
7069void
7070spa_evict_all(void)
7071{
7072 spa_t *spa;
7073
7074 /*
7075 * Remove all cached state. All pools should be closed now,
7076 * so every spa in the AVL tree should be unreferenced.
7077 */
7078 mutex_enter(&spa_namespace_lock);
7079 while ((spa = spa_next(NULL)) != NULL) {
7080 /*
7081 * Stop async tasks. The async thread may need to detach
7082 * a device that's been replaced, which requires grabbing
7083 * spa_namespace_lock, so we must drop it here.
7084 */
7085 spa_open_ref(spa, FTAG);
7086 mutex_exit(&spa_namespace_lock);
7087 spa_async_suspend(spa);
7088 mutex_enter(&spa_namespace_lock);
34dc7c2f
BB
7089 spa_close(spa, FTAG);
7090
7091 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7092 spa_unload(spa);
7093 spa_deactivate(spa);
7094 }
7095 spa_remove(spa);
7096 }
7097 mutex_exit(&spa_namespace_lock);
7098}
7099
7100vdev_t *
9babb374 7101spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
34dc7c2f 7102{
b128c09f
BB
7103 vdev_t *vd;
7104 int i;
7105
7106 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
7107 return (vd);
7108
9babb374 7109 if (aux) {
b128c09f
BB
7110 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
7111 vd = spa->spa_l2cache.sav_vdevs[i];
9babb374
BB
7112 if (vd->vdev_guid == guid)
7113 return (vd);
7114 }
7115
7116 for (i = 0; i < spa->spa_spares.sav_count; i++) {
7117 vd = spa->spa_spares.sav_vdevs[i];
b128c09f
BB
7118 if (vd->vdev_guid == guid)
7119 return (vd);
7120 }
7121 }
7122
7123 return (NULL);
34dc7c2f
BB
7124}
7125
7126void
7127spa_upgrade(spa_t *spa, uint64_t version)
7128{
572e2857
BB
7129 ASSERT(spa_writeable(spa));
7130
b128c09f 7131 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
7132
7133 /*
7134 * This should only be called for a non-faulted pool, and since a
7135 * future version would result in an unopenable pool, this shouldn't be
7136 * possible.
7137 */
8dca0a9a 7138 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ea04106b 7139 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
34dc7c2f
BB
7140
7141 spa->spa_uberblock.ub_version = version;
7142 vdev_config_dirty(spa->spa_root_vdev);
7143
b128c09f 7144 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
7145
7146 txg_wait_synced(spa_get_dsl(spa), 0);
7147}
7148
7149boolean_t
7150spa_has_spare(spa_t *spa, uint64_t guid)
7151{
7152 int i;
7153 uint64_t spareguid;
7154 spa_aux_vdev_t *sav = &spa->spa_spares;
7155
7156 for (i = 0; i < sav->sav_count; i++)
7157 if (sav->sav_vdevs[i]->vdev_guid == guid)
7158 return (B_TRUE);
7159
7160 for (i = 0; i < sav->sav_npending; i++) {
7161 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
7162 &spareguid) == 0 && spareguid == guid)
7163 return (B_TRUE);
7164 }
7165
7166 return (B_FALSE);
7167}
7168
b128c09f
BB
7169/*
7170 * Check if a pool has an active shared spare device.
7171 * Note: reference count of an active spare is 2, as a spare and as a replace
7172 */
7173static boolean_t
7174spa_has_active_shared_spare(spa_t *spa)
7175{
7176 int i, refcnt;
7177 uint64_t pool;
7178 spa_aux_vdev_t *sav = &spa->spa_spares;
7179
7180 for (i = 0; i < sav->sav_count; i++) {
7181 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
7182 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
7183 refcnt > 2)
7184 return (B_TRUE);
7185 }
7186
7187 return (B_FALSE);
7188}
7189
cae5b340
AX
7190static sysevent_t *
7191spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
7192{
7193 sysevent_t *ev = NULL;
7194#ifdef _KERNEL
7195 nvlist_t *resource;
7196
7197 resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
7198 if (resource) {
7199 ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
7200 ev->resource = resource;
7201 }
7202#endif
7203 return (ev);
7204}
7205
7206static void
7207spa_event_post(sysevent_t *ev)
7208{
7209#ifdef _KERNEL
7210 if (ev) {
7211 zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
7212 kmem_free(ev, sizeof (*ev));
7213 }
7214#endif
7215}
7216
34dc7c2f 7217/*
cae5b340
AX
7218 * Post a zevent corresponding to the given sysevent. The 'name' must be one
7219 * of the event definitions in sys/sysevent/eventdefs.h. The payload will be
34dc7c2f
BB
7220 * filled in from the spa and (optionally) the vdev. This doesn't do anything
7221 * in the userland libzpool, as we don't want consumers to misinterpret ztest
7222 * or zdb as real changes.
7223 */
7224void
cae5b340 7225spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
34dc7c2f 7226{
cae5b340 7227 spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
34dc7c2f 7228}
c28b2279
BB
7229
7230#if defined(_KERNEL) && defined(HAVE_SPL)
7231/* state manipulation functions */
7232EXPORT_SYMBOL(spa_open);
7233EXPORT_SYMBOL(spa_open_rewind);
7234EXPORT_SYMBOL(spa_get_stats);
7235EXPORT_SYMBOL(spa_create);
c28b2279
BB
7236EXPORT_SYMBOL(spa_import);
7237EXPORT_SYMBOL(spa_tryimport);
7238EXPORT_SYMBOL(spa_destroy);
7239EXPORT_SYMBOL(spa_export);
7240EXPORT_SYMBOL(spa_reset);
7241EXPORT_SYMBOL(spa_async_request);
7242EXPORT_SYMBOL(spa_async_suspend);
7243EXPORT_SYMBOL(spa_async_resume);
7244EXPORT_SYMBOL(spa_inject_addref);
7245EXPORT_SYMBOL(spa_inject_delref);
7246EXPORT_SYMBOL(spa_scan_stat_init);
7247EXPORT_SYMBOL(spa_scan_get_stats);
7248
7249/* device maniion */
7250EXPORT_SYMBOL(spa_vdev_add);
7251EXPORT_SYMBOL(spa_vdev_attach);
7252EXPORT_SYMBOL(spa_vdev_detach);
7253EXPORT_SYMBOL(spa_vdev_remove);
7254EXPORT_SYMBOL(spa_vdev_setpath);
7255EXPORT_SYMBOL(spa_vdev_setfru);
7256EXPORT_SYMBOL(spa_vdev_split_mirror);
7257
7258/* spare statech is global across all pools) */
7259EXPORT_SYMBOL(spa_spare_add);
7260EXPORT_SYMBOL(spa_spare_remove);
7261EXPORT_SYMBOL(spa_spare_exists);
7262EXPORT_SYMBOL(spa_spare_activate);
7263
7264/* L2ARC statech is global across all pools) */
7265EXPORT_SYMBOL(spa_l2cache_add);
7266EXPORT_SYMBOL(spa_l2cache_remove);
7267EXPORT_SYMBOL(spa_l2cache_exists);
7268EXPORT_SYMBOL(spa_l2cache_activate);
7269EXPORT_SYMBOL(spa_l2cache_drop);
7270
7271/* scanning */
7272EXPORT_SYMBOL(spa_scan);
7273EXPORT_SYMBOL(spa_scan_stop);
7274
7275/* spa syncing */
7276EXPORT_SYMBOL(spa_sync); /* only for DMU use */
7277EXPORT_SYMBOL(spa_sync_allpools);
7278
7279/* properties */
7280EXPORT_SYMBOL(spa_prop_set);
7281EXPORT_SYMBOL(spa_prop_get);
7282EXPORT_SYMBOL(spa_prop_clear_bootfs);
7283
7284/* asynchronous event notification */
7285EXPORT_SYMBOL(spa_event_notify);
7286#endif
ea04106b
AX
7287
7288#if defined(_KERNEL) && defined(HAVE_SPL)
7289module_param(spa_load_verify_maxinflight, int, 0644);
7290MODULE_PARM_DESC(spa_load_verify_maxinflight,
7291 "Max concurrent traversal I/Os while verifying pool during import -X");
7292
7293module_param(spa_load_verify_metadata, int, 0644);
7294MODULE_PARM_DESC(spa_load_verify_metadata,
7295 "Set to traverse metadata on pool import");
7296
7297module_param(spa_load_verify_data, int, 0644);
7298MODULE_PARM_DESC(spa_load_verify_data,
7299 "Set to traverse data on pool import");
94a40997 7300
cae5b340 7301/* CSTYLED */
94a40997
AX
7302module_param(zio_taskq_batch_pct, uint, 0444);
7303MODULE_PARM_DESC(zio_taskq_batch_pct,
7304 "Percentage of CPUs to run an IO worker thread");
7305
ea04106b 7306#endif