]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/spa.c
Create an 'overlay' property
[mirror_zfs.git] / module / zfs / spa.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Delphix. All rights reserved.
7011fb60 25 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
a38718a6 26 */
34dc7c2f 27
34dc7c2f 28/*
e49f1e20
WA
29 * SPA: Storage Pool Allocator
30 *
34dc7c2f
BB
31 * This file contains all the routines used when modifying on-disk SPA state.
32 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 * pool.
34 */
35
36#include <sys/zfs_context.h>
37#include <sys/fm/fs/zfs.h>
38#include <sys/spa_impl.h>
39#include <sys/zio.h>
40#include <sys/zio_checksum.h>
34dc7c2f
BB
41#include <sys/dmu.h>
42#include <sys/dmu_tx.h>
43#include <sys/zap.h>
44#include <sys/zil.h>
428870ff 45#include <sys/ddt.h>
34dc7c2f 46#include <sys/vdev_impl.h>
c28b2279 47#include <sys/vdev_disk.h>
34dc7c2f 48#include <sys/metaslab.h>
428870ff 49#include <sys/metaslab_impl.h>
34dc7c2f
BB
50#include <sys/uberblock_impl.h>
51#include <sys/txg.h>
52#include <sys/avl.h>
53#include <sys/dmu_traverse.h>
54#include <sys/dmu_objset.h>
55#include <sys/unique.h>
56#include <sys/dsl_pool.h>
57#include <sys/dsl_dataset.h>
58#include <sys/dsl_dir.h>
59#include <sys/dsl_prop.h>
60#include <sys/dsl_synctask.h>
61#include <sys/fs/zfs.h>
62#include <sys/arc.h>
63#include <sys/callb.h>
64#include <sys/systeminfo.h>
34dc7c2f 65#include <sys/spa_boot.h>
9babb374 66#include <sys/zfs_ioctl.h>
428870ff 67#include <sys/dsl_scan.h>
9ae529ec 68#include <sys/zfeature.h>
13fe0198 69#include <sys/dsl_destroy.h>
526af785 70#include <sys/zvol.h>
34dc7c2f 71
d164b209 72#ifdef _KERNEL
428870ff
BB
73#include <sys/bootprops.h>
74#include <sys/callb.h>
75#include <sys/cpupart.h>
76#include <sys/pool.h>
77#include <sys/sysdc.h>
d164b209
BB
78#include <sys/zone.h>
79#endif /* _KERNEL */
80
34dc7c2f
BB
81#include "zfs_prop.h"
82#include "zfs_comutil.h"
83
428870ff 84typedef enum zti_modes {
7ef5e54e 85 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
7ef5e54e
AL
86 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
87 ZTI_MODE_NULL, /* don't create a taskq */
88 ZTI_NMODES
428870ff 89} zti_modes_t;
34dc7c2f 90
7ef5e54e
AL
91#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
92#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
93#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
94#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
9babb374 95
7ef5e54e
AL
96#define ZTI_N(n) ZTI_P(n, 1)
97#define ZTI_ONE ZTI_N(1)
9babb374
BB
98
99typedef struct zio_taskq_info {
7ef5e54e 100 zti_modes_t zti_mode;
428870ff 101 uint_t zti_value;
7ef5e54e 102 uint_t zti_count;
9babb374
BB
103} zio_taskq_info_t;
104
105static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
451041db 106 "iss", "iss_h", "int", "int_h"
9babb374
BB
107};
108
428870ff 109/*
7ef5e54e
AL
110 * This table defines the taskq settings for each ZFS I/O type. When
111 * initializing a pool, we use this table to create an appropriately sized
112 * taskq. Some operations are low volume and therefore have a small, static
113 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
114 * macros. Other operations process a large amount of data; the ZTI_BATCH
115 * macro causes us to create a taskq oriented for throughput. Some operations
116 * are so high frequency and short-lived that the taskq itself can become a a
117 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
118 * additional degree of parallelism specified by the number of threads per-
119 * taskq and the number of taskqs; when dispatching an event in this case, the
120 * particular taskq is chosen at random.
121 *
122 * The different taskq priorities are to handle the different contexts (issue
123 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
124 * need to be handled with minimum delay.
428870ff
BB
125 */
126const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
127 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
7ef5e54e
AL
128 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
129 { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */
130 { ZTI_BATCH, ZTI_N(5), ZTI_N(16), ZTI_N(5) }, /* WRITE */
131 { ZTI_P(4, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
132 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
133 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
9babb374
BB
134};
135
13fe0198
MA
136static void spa_sync_version(void *arg, dmu_tx_t *tx);
137static void spa_sync_props(void *arg, dmu_tx_t *tx);
b128c09f 138static boolean_t spa_has_active_shared_spare(spa_t *spa);
bf701a83 139static inline int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
428870ff
BB
140 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
141 char **ereport);
572e2857 142static void spa_vdev_resilver_done(spa_t *spa);
428870ff 143
e8b96c60 144uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
428870ff
BB
145id_t zio_taskq_psrset_bind = PS_NONE;
146boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
147uint_t zio_taskq_basedc = 80; /* base duty cycle */
148
149boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
150
151/*
152 * This (illegal) pool name is used when temporarily importing a spa_t in order
153 * to get the vdev stats associated with the imported devices.
154 */
155#define TRYIMPORT_NAME "$import"
34dc7c2f
BB
156
157/*
158 * ==========================================================================
159 * SPA properties routines
160 * ==========================================================================
161 */
162
163/*
164 * Add a (source=src, propname=propval) list to an nvlist.
165 */
166static void
167spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
168 uint64_t intval, zprop_source_t src)
169{
170 const char *propname = zpool_prop_to_name(prop);
171 nvlist_t *propval;
172
b8d06fca 173 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
174 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
175
176 if (strval != NULL)
177 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
178 else
179 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
180
181 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
182 nvlist_free(propval);
183}
184
185/*
186 * Get property values from the spa configuration.
187 */
188static void
189spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
190{
1bd201e7 191 vdev_t *rvd = spa->spa_root_vdev;
9ae529ec 192 dsl_pool_t *pool = spa->spa_dsl_pool;
d164b209 193 uint64_t size;
428870ff 194 uint64_t alloc;
1bd201e7 195 uint64_t space;
34dc7c2f
BB
196 uint64_t cap, version;
197 zprop_source_t src = ZPROP_SRC_NONE;
b128c09f 198 spa_config_dirent_t *dp;
1bd201e7 199 int c;
b128c09f
BB
200
201 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
34dc7c2f 202
1bd201e7 203 if (rvd != NULL) {
428870ff
BB
204 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
205 size = metaslab_class_get_space(spa_normal_class(spa));
d164b209
BB
206 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
207 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
428870ff
BB
208 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
209 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
210 size - alloc, src);
1bd201e7
CS
211
212 space = 0;
213 for (c = 0; c < rvd->vdev_children; c++) {
214 vdev_t *tvd = rvd->vdev_child[c];
215 space += tvd->vdev_max_asize - tvd->vdev_asize;
216 }
217 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space,
218 src);
219
572e2857
BB
220 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
221 (spa_mode(spa) == FREAD), src);
d164b209 222
428870ff 223 cap = (size == 0) ? 0 : (alloc * 100 / size);
d164b209
BB
224 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
225
428870ff
BB
226 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
227 ddt_get_pool_dedup_ratio(spa), src);
228
d164b209 229 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
1bd201e7 230 rvd->vdev_state, src);
d164b209
BB
231
232 version = spa_version(spa);
233 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
234 src = ZPROP_SRC_DEFAULT;
235 else
236 src = ZPROP_SRC_LOCAL;
237 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
238 }
34dc7c2f 239
9ae529ec 240 if (pool != NULL) {
9ae529ec
CS
241 /*
242 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
243 * when opening pools before this version freedir will be NULL.
244 */
fbeddd60 245 if (pool->dp_free_dir != NULL) {
9ae529ec 246 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
fbeddd60 247 pool->dp_free_dir->dd_phys->dd_used_bytes, src);
9ae529ec
CS
248 } else {
249 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
250 NULL, 0, src);
251 }
fbeddd60
MA
252
253 if (pool->dp_leak_dir != NULL) {
254 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
255 pool->dp_leak_dir->dd_phys->dd_used_bytes, src);
256 } else {
257 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
258 NULL, 0, src);
259 }
9ae529ec
CS
260 }
261
34dc7c2f 262 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
34dc7c2f 263
d96eb2b1
DM
264 if (spa->spa_comment != NULL) {
265 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
266 0, ZPROP_SRC_LOCAL);
267 }
268
34dc7c2f
BB
269 if (spa->spa_root != NULL)
270 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
271 0, ZPROP_SRC_LOCAL);
272
b128c09f
BB
273 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
274 if (dp->scd_path == NULL) {
34dc7c2f 275 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f
BB
276 "none", 0, ZPROP_SRC_LOCAL);
277 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
34dc7c2f 278 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f 279 dp->scd_path, 0, ZPROP_SRC_LOCAL);
34dc7c2f
BB
280 }
281 }
282}
283
284/*
285 * Get zpool property values.
286 */
287int
288spa_prop_get(spa_t *spa, nvlist_t **nvp)
289{
428870ff 290 objset_t *mos = spa->spa_meta_objset;
34dc7c2f
BB
291 zap_cursor_t zc;
292 zap_attribute_t za;
34dc7c2f
BB
293 int err;
294
b8d06fca 295 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE);
c28b2279 296 if (err)
d1d7e268 297 return (err);
34dc7c2f 298
b128c09f
BB
299 mutex_enter(&spa->spa_props_lock);
300
34dc7c2f
BB
301 /*
302 * Get properties from the spa config.
303 */
304 spa_prop_get_config(spa, nvp);
305
34dc7c2f 306 /* If no pool property object, no more prop to get. */
428870ff 307 if (mos == NULL || spa->spa_pool_props_object == 0) {
34dc7c2f 308 mutex_exit(&spa->spa_props_lock);
c28b2279 309 goto out;
34dc7c2f
BB
310 }
311
312 /*
313 * Get properties from the MOS pool property object.
314 */
315 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
316 (err = zap_cursor_retrieve(&zc, &za)) == 0;
317 zap_cursor_advance(&zc)) {
318 uint64_t intval = 0;
319 char *strval = NULL;
320 zprop_source_t src = ZPROP_SRC_DEFAULT;
321 zpool_prop_t prop;
322
323 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
324 continue;
325
326 switch (za.za_integer_length) {
327 case 8:
328 /* integer property */
329 if (za.za_first_integer !=
330 zpool_prop_default_numeric(prop))
331 src = ZPROP_SRC_LOCAL;
332
333 if (prop == ZPOOL_PROP_BOOTFS) {
334 dsl_pool_t *dp;
335 dsl_dataset_t *ds = NULL;
336
337 dp = spa_get_dsl(spa);
13fe0198 338 dsl_pool_config_enter(dp, FTAG);
c65aa5b2
BB
339 if ((err = dsl_dataset_hold_obj(dp,
340 za.za_first_integer, FTAG, &ds))) {
13fe0198 341 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
342 break;
343 }
344
345 strval = kmem_alloc(
346 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
b8d06fca 347 KM_PUSHPAGE);
34dc7c2f 348 dsl_dataset_name(ds, strval);
b128c09f 349 dsl_dataset_rele(ds, FTAG);
13fe0198 350 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
351 } else {
352 strval = NULL;
353 intval = za.za_first_integer;
354 }
355
356 spa_prop_add_list(*nvp, prop, strval, intval, src);
357
358 if (strval != NULL)
359 kmem_free(strval,
360 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
361
362 break;
363
364 case 1:
365 /* string property */
b8d06fca 366 strval = kmem_alloc(za.za_num_integers, KM_PUSHPAGE);
34dc7c2f
BB
367 err = zap_lookup(mos, spa->spa_pool_props_object,
368 za.za_name, 1, za.za_num_integers, strval);
369 if (err) {
370 kmem_free(strval, za.za_num_integers);
371 break;
372 }
373 spa_prop_add_list(*nvp, prop, strval, 0, src);
374 kmem_free(strval, za.za_num_integers);
375 break;
376
377 default:
378 break;
379 }
380 }
381 zap_cursor_fini(&zc);
382 mutex_exit(&spa->spa_props_lock);
383out:
384 if (err && err != ENOENT) {
385 nvlist_free(*nvp);
386 *nvp = NULL;
387 return (err);
388 }
389
390 return (0);
391}
392
393/*
394 * Validate the given pool properties nvlist and modify the list
395 * for the property values to be set.
396 */
397static int
398spa_prop_validate(spa_t *spa, nvlist_t *props)
399{
400 nvpair_t *elem;
401 int error = 0, reset_bootfs = 0;
d4ed6673 402 uint64_t objnum = 0;
9ae529ec 403 boolean_t has_feature = B_FALSE;
34dc7c2f
BB
404
405 elem = NULL;
406 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
34dc7c2f 407 uint64_t intval;
9ae529ec
CS
408 char *strval, *slash, *check, *fname;
409 const char *propname = nvpair_name(elem);
410 zpool_prop_t prop = zpool_name_to_prop(propname);
411
412 switch ((int)prop) {
413 case ZPROP_INVAL:
414 if (!zpool_prop_feature(propname)) {
2e528b49 415 error = SET_ERROR(EINVAL);
9ae529ec
CS
416 break;
417 }
418
419 /*
420 * Sanitize the input.
421 */
422 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
2e528b49 423 error = SET_ERROR(EINVAL);
9ae529ec
CS
424 break;
425 }
426
427 if (nvpair_value_uint64(elem, &intval) != 0) {
2e528b49 428 error = SET_ERROR(EINVAL);
9ae529ec
CS
429 break;
430 }
34dc7c2f 431
9ae529ec 432 if (intval != 0) {
2e528b49 433 error = SET_ERROR(EINVAL);
9ae529ec
CS
434 break;
435 }
34dc7c2f 436
9ae529ec
CS
437 fname = strchr(propname, '@') + 1;
438 if (zfeature_lookup_name(fname, NULL) != 0) {
2e528b49 439 error = SET_ERROR(EINVAL);
9ae529ec
CS
440 break;
441 }
442
443 has_feature = B_TRUE;
444 break;
34dc7c2f 445
34dc7c2f
BB
446 case ZPOOL_PROP_VERSION:
447 error = nvpair_value_uint64(elem, &intval);
448 if (!error &&
9ae529ec
CS
449 (intval < spa_version(spa) ||
450 intval > SPA_VERSION_BEFORE_FEATURES ||
451 has_feature))
2e528b49 452 error = SET_ERROR(EINVAL);
34dc7c2f
BB
453 break;
454
455 case ZPOOL_PROP_DELEGATION:
456 case ZPOOL_PROP_AUTOREPLACE:
b128c09f 457 case ZPOOL_PROP_LISTSNAPS:
9babb374 458 case ZPOOL_PROP_AUTOEXPAND:
34dc7c2f
BB
459 error = nvpair_value_uint64(elem, &intval);
460 if (!error && intval > 1)
2e528b49 461 error = SET_ERROR(EINVAL);
34dc7c2f
BB
462 break;
463
464 case ZPOOL_PROP_BOOTFS:
9babb374
BB
465 /*
466 * If the pool version is less than SPA_VERSION_BOOTFS,
467 * or the pool is still being created (version == 0),
468 * the bootfs property cannot be set.
469 */
34dc7c2f 470 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
2e528b49 471 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
472 break;
473 }
474
475 /*
b128c09f 476 * Make sure the vdev config is bootable
34dc7c2f 477 */
b128c09f 478 if (!vdev_is_bootable(spa->spa_root_vdev)) {
2e528b49 479 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
480 break;
481 }
482
483 reset_bootfs = 1;
484
485 error = nvpair_value_string(elem, &strval);
486
487 if (!error) {
9ae529ec 488 objset_t *os;
b128c09f
BB
489 uint64_t compress;
490
34dc7c2f
BB
491 if (strval == NULL || strval[0] == '\0') {
492 objnum = zpool_prop_default_numeric(
493 ZPOOL_PROP_BOOTFS);
494 break;
495 }
496
d1d7e268
MK
497 error = dmu_objset_hold(strval, FTAG, &os);
498 if (error)
34dc7c2f 499 break;
b128c09f 500
428870ff
BB
501 /* Must be ZPL and not gzip compressed. */
502
503 if (dmu_objset_type(os) != DMU_OST_ZFS) {
2e528b49 504 error = SET_ERROR(ENOTSUP);
13fe0198
MA
505 } else if ((error =
506 dsl_prop_get_int_ds(dmu_objset_ds(os),
b128c09f 507 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
13fe0198 508 &compress)) == 0 &&
b128c09f 509 !BOOTFS_COMPRESS_VALID(compress)) {
2e528b49 510 error = SET_ERROR(ENOTSUP);
b128c09f
BB
511 } else {
512 objnum = dmu_objset_id(os);
513 }
428870ff 514 dmu_objset_rele(os, FTAG);
34dc7c2f
BB
515 }
516 break;
b128c09f 517
34dc7c2f
BB
518 case ZPOOL_PROP_FAILUREMODE:
519 error = nvpair_value_uint64(elem, &intval);
520 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
521 intval > ZIO_FAILURE_MODE_PANIC))
2e528b49 522 error = SET_ERROR(EINVAL);
34dc7c2f
BB
523
524 /*
525 * This is a special case which only occurs when
526 * the pool has completely failed. This allows
527 * the user to change the in-core failmode property
528 * without syncing it out to disk (I/Os might
529 * currently be blocked). We do this by returning
530 * EIO to the caller (spa_prop_set) to trick it
531 * into thinking we encountered a property validation
532 * error.
533 */
b128c09f 534 if (!error && spa_suspended(spa)) {
34dc7c2f 535 spa->spa_failmode = intval;
2e528b49 536 error = SET_ERROR(EIO);
34dc7c2f
BB
537 }
538 break;
539
540 case ZPOOL_PROP_CACHEFILE:
541 if ((error = nvpair_value_string(elem, &strval)) != 0)
542 break;
543
544 if (strval[0] == '\0')
545 break;
546
547 if (strcmp(strval, "none") == 0)
548 break;
549
550 if (strval[0] != '/') {
2e528b49 551 error = SET_ERROR(EINVAL);
34dc7c2f
BB
552 break;
553 }
554
555 slash = strrchr(strval, '/');
556 ASSERT(slash != NULL);
557
558 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
559 strcmp(slash, "/..") == 0)
2e528b49 560 error = SET_ERROR(EINVAL);
34dc7c2f 561 break;
428870ff 562
d96eb2b1
DM
563 case ZPOOL_PROP_COMMENT:
564 if ((error = nvpair_value_string(elem, &strval)) != 0)
565 break;
566 for (check = strval; *check != '\0'; check++) {
567 if (!isprint(*check)) {
2e528b49 568 error = SET_ERROR(EINVAL);
d96eb2b1
DM
569 break;
570 }
571 check++;
572 }
573 if (strlen(strval) > ZPROP_MAX_COMMENT)
2e528b49 574 error = SET_ERROR(E2BIG);
d96eb2b1
DM
575 break;
576
428870ff
BB
577 case ZPOOL_PROP_DEDUPDITTO:
578 if (spa_version(spa) < SPA_VERSION_DEDUP)
2e528b49 579 error = SET_ERROR(ENOTSUP);
428870ff
BB
580 else
581 error = nvpair_value_uint64(elem, &intval);
582 if (error == 0 &&
583 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
2e528b49 584 error = SET_ERROR(EINVAL);
428870ff 585 break;
e75c13c3
BB
586
587 default:
588 break;
34dc7c2f
BB
589 }
590
591 if (error)
592 break;
593 }
594
595 if (!error && reset_bootfs) {
596 error = nvlist_remove(props,
597 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
598
599 if (!error) {
600 error = nvlist_add_uint64(props,
601 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
602 }
603 }
604
605 return (error);
606}
607
d164b209
BB
608void
609spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
610{
611 char *cachefile;
612 spa_config_dirent_t *dp;
613
614 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
615 &cachefile) != 0)
616 return;
617
618 dp = kmem_alloc(sizeof (spa_config_dirent_t),
b8d06fca 619 KM_PUSHPAGE);
d164b209
BB
620
621 if (cachefile[0] == '\0')
622 dp->scd_path = spa_strdup(spa_config_path);
623 else if (strcmp(cachefile, "none") == 0)
624 dp->scd_path = NULL;
625 else
626 dp->scd_path = spa_strdup(cachefile);
627
628 list_insert_head(&spa->spa_config_list, dp);
629 if (need_sync)
630 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
631}
632
34dc7c2f
BB
633int
634spa_prop_set(spa_t *spa, nvlist_t *nvp)
635{
636 int error;
9ae529ec 637 nvpair_t *elem = NULL;
d164b209 638 boolean_t need_sync = B_FALSE;
34dc7c2f
BB
639
640 if ((error = spa_prop_validate(spa, nvp)) != 0)
641 return (error);
642
d164b209 643 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
9ae529ec 644 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
d164b209 645
572e2857
BB
646 if (prop == ZPOOL_PROP_CACHEFILE ||
647 prop == ZPOOL_PROP_ALTROOT ||
648 prop == ZPOOL_PROP_READONLY)
d164b209
BB
649 continue;
650
9ae529ec
CS
651 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
652 uint64_t ver;
653
654 if (prop == ZPOOL_PROP_VERSION) {
655 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
656 } else {
657 ASSERT(zpool_prop_feature(nvpair_name(elem)));
658 ver = SPA_VERSION_FEATURES;
659 need_sync = B_TRUE;
660 }
661
662 /* Save time if the version is already set. */
663 if (ver == spa_version(spa))
664 continue;
665
666 /*
667 * In addition to the pool directory object, we might
668 * create the pool properties object, the features for
669 * read object, the features for write object, or the
670 * feature descriptions object.
671 */
13fe0198
MA
672 error = dsl_sync_task(spa->spa_name, NULL,
673 spa_sync_version, &ver, 6);
9ae529ec
CS
674 if (error)
675 return (error);
676 continue;
677 }
678
d164b209
BB
679 need_sync = B_TRUE;
680 break;
681 }
682
9ae529ec 683 if (need_sync) {
13fe0198
MA
684 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
685 nvp, 6));
9ae529ec
CS
686 }
687
688 return (0);
34dc7c2f
BB
689}
690
691/*
692 * If the bootfs property value is dsobj, clear it.
693 */
694void
695spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
696{
697 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
698 VERIFY(zap_remove(spa->spa_meta_objset,
699 spa->spa_pool_props_object,
700 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
701 spa->spa_bootfs = 0;
702 }
703}
704
3bc7e0fb
GW
705/*ARGSUSED*/
706static int
13fe0198 707spa_change_guid_check(void *arg, dmu_tx_t *tx)
3bc7e0fb 708{
13fe0198 709 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
710 vdev_t *rvd = spa->spa_root_vdev;
711 uint64_t vdev_state;
13fe0198 712 ASSERTV(uint64_t *newguid = arg);
3bc7e0fb
GW
713
714 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
715 vdev_state = rvd->vdev_state;
716 spa_config_exit(spa, SCL_STATE, FTAG);
717
718 if (vdev_state != VDEV_STATE_HEALTHY)
2e528b49 719 return (SET_ERROR(ENXIO));
3bc7e0fb
GW
720
721 ASSERT3U(spa_guid(spa), !=, *newguid);
722
723 return (0);
724}
725
726static void
13fe0198 727spa_change_guid_sync(void *arg, dmu_tx_t *tx)
3bc7e0fb 728{
13fe0198
MA
729 uint64_t *newguid = arg;
730 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
731 uint64_t oldguid;
732 vdev_t *rvd = spa->spa_root_vdev;
733
734 oldguid = spa_guid(spa);
735
736 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
737 rvd->vdev_guid = *newguid;
738 rvd->vdev_guid_sum += (*newguid - oldguid);
739 vdev_config_dirty(rvd);
740 spa_config_exit(spa, SCL_STATE, FTAG);
741
6f1ffb06
MA
742 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
743 oldguid, *newguid);
3bc7e0fb
GW
744}
745
3541dc6d
GA
746/*
747 * Change the GUID for the pool. This is done so that we can later
748 * re-import a pool built from a clone of our own vdevs. We will modify
749 * the root vdev's guid, our own pool guid, and then mark all of our
750 * vdevs dirty. Note that we must make sure that all our vdevs are
751 * online when we do this, or else any vdevs that weren't present
752 * would be orphaned from our pool. We are also going to issue a
753 * sysevent to update any watchers.
754 */
755int
756spa_change_guid(spa_t *spa)
757{
3bc7e0fb
GW
758 int error;
759 uint64_t guid;
3541dc6d 760
621dd7bb 761 mutex_enter(&spa->spa_vdev_top_lock);
3bc7e0fb
GW
762 mutex_enter(&spa_namespace_lock);
763 guid = spa_generate_guid(NULL);
3541dc6d 764
13fe0198
MA
765 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
766 spa_change_guid_sync, &guid, 5);
3541dc6d 767
3bc7e0fb
GW
768 if (error == 0) {
769 spa_config_sync(spa, B_FALSE, B_TRUE);
770 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_REGUID);
771 }
3541dc6d 772
3bc7e0fb 773 mutex_exit(&spa_namespace_lock);
621dd7bb 774 mutex_exit(&spa->spa_vdev_top_lock);
3541dc6d 775
3bc7e0fb 776 return (error);
3541dc6d
GA
777}
778
34dc7c2f
BB
779/*
780 * ==========================================================================
781 * SPA state manipulation (open/create/destroy/import/export)
782 * ==========================================================================
783 */
784
785static int
786spa_error_entry_compare(const void *a, const void *b)
787{
788 spa_error_entry_t *sa = (spa_error_entry_t *)a;
789 spa_error_entry_t *sb = (spa_error_entry_t *)b;
790 int ret;
791
792 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
5dbd68a3 793 sizeof (zbookmark_phys_t));
34dc7c2f
BB
794
795 if (ret < 0)
796 return (-1);
797 else if (ret > 0)
798 return (1);
799 else
800 return (0);
801}
802
803/*
804 * Utility function which retrieves copies of the current logs and
805 * re-initializes them in the process.
806 */
807void
808spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
809{
810 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
811
812 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
813 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
814
815 avl_create(&spa->spa_errlist_scrub,
816 spa_error_entry_compare, sizeof (spa_error_entry_t),
817 offsetof(spa_error_entry_t, se_avl));
818 avl_create(&spa->spa_errlist_last,
819 spa_error_entry_compare, sizeof (spa_error_entry_t),
820 offsetof(spa_error_entry_t, se_avl));
821}
822
7ef5e54e
AL
823static void
824spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
34dc7c2f 825{
7ef5e54e
AL
826 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
827 enum zti_modes mode = ztip->zti_mode;
828 uint_t value = ztip->zti_value;
829 uint_t count = ztip->zti_count;
830 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
831 char name[32];
832 uint_t i, flags = 0;
428870ff 833 boolean_t batch = B_FALSE;
34dc7c2f 834
7ef5e54e
AL
835 if (mode == ZTI_MODE_NULL) {
836 tqs->stqs_count = 0;
837 tqs->stqs_taskq = NULL;
838 return;
839 }
428870ff 840
7ef5e54e 841 ASSERT3U(count, >, 0);
428870ff 842
7ef5e54e
AL
843 tqs->stqs_count = count;
844 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
428870ff 845
e8b96c60
MA
846 switch (mode) {
847 case ZTI_MODE_FIXED:
848 ASSERT3U(value, >=, 1);
849 value = MAX(value, 1);
850 break;
7ef5e54e 851
e8b96c60
MA
852 case ZTI_MODE_BATCH:
853 batch = B_TRUE;
854 flags |= TASKQ_THREADS_CPU_PCT;
855 value = zio_taskq_batch_pct;
856 break;
7ef5e54e 857
e8b96c60
MA
858 default:
859 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
860 "spa_activate()",
861 zio_type_name[t], zio_taskq_types[q], mode, value);
862 break;
863 }
7ef5e54e 864
e8b96c60
MA
865 for (i = 0; i < count; i++) {
866 taskq_t *tq;
7ef5e54e
AL
867
868 if (count > 1) {
869 (void) snprintf(name, sizeof (name), "%s_%s_%u",
870 zio_type_name[t], zio_taskq_types[q], i);
871 } else {
872 (void) snprintf(name, sizeof (name), "%s_%s",
873 zio_type_name[t], zio_taskq_types[q]);
874 }
875
876 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
877 if (batch)
878 flags |= TASKQ_DC_BATCH;
879
880 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
881 spa->spa_proc, zio_taskq_basedc, flags);
882 } else {
e8b96c60
MA
883 pri_t pri = maxclsyspri;
884 /*
885 * The write issue taskq can be extremely CPU
886 * intensive. Run it at slightly lower priority
887 * than the other taskqs.
888 */
889 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
890 pri--;
891
892 tq = taskq_create_proc(name, value, pri, 50,
7ef5e54e
AL
893 INT_MAX, spa->spa_proc, flags);
894 }
895
896 tqs->stqs_taskq[i] = tq;
897 }
898}
899
900static void
901spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
902{
903 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
904 uint_t i;
905
906 if (tqs->stqs_taskq == NULL) {
907 ASSERT3U(tqs->stqs_count, ==, 0);
908 return;
909 }
910
911 for (i = 0; i < tqs->stqs_count; i++) {
912 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
913 taskq_destroy(tqs->stqs_taskq[i]);
428870ff 914 }
34dc7c2f 915
7ef5e54e
AL
916 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
917 tqs->stqs_taskq = NULL;
918}
34dc7c2f 919
7ef5e54e
AL
920/*
921 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
922 * Note that a type may have multiple discrete taskqs to avoid lock contention
923 * on the taskq itself. In that case we choose which taskq at random by using
924 * the low bits of gethrtime().
925 */
926void
927spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
928 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
929{
930 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
931 taskq_t *tq;
932
933 ASSERT3P(tqs->stqs_taskq, !=, NULL);
934 ASSERT3U(tqs->stqs_count, !=, 0);
935
936 if (tqs->stqs_count == 1) {
937 tq = tqs->stqs_taskq[0];
938 } else {
c12936b1 939 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
428870ff 940 }
7ef5e54e
AL
941
942 taskq_dispatch_ent(tq, func, arg, flags, ent);
428870ff
BB
943}
944
044baf00
BB
945/*
946 * Same as spa_taskq_dispatch_ent() but block on the task until completion.
947 */
948void
949spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
950 task_func_t *func, void *arg, uint_t flags)
951{
952 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
953 taskq_t *tq;
954 taskqid_t id;
955
956 ASSERT3P(tqs->stqs_taskq, !=, NULL);
957 ASSERT3U(tqs->stqs_count, !=, 0);
958
959 if (tqs->stqs_count == 1) {
960 tq = tqs->stqs_taskq[0];
961 } else {
c12936b1 962 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
044baf00
BB
963 }
964
965 id = taskq_dispatch(tq, func, arg, flags);
966 if (id)
967 taskq_wait_id(tq, id);
968}
969
428870ff
BB
970static void
971spa_create_zio_taskqs(spa_t *spa)
972{
d6320ddb
BB
973 int t, q;
974
975 for (t = 0; t < ZIO_TYPES; t++) {
976 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 977 spa_taskqs_init(spa, t, q);
428870ff
BB
978 }
979 }
980}
9babb374 981
7b89a549 982#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
428870ff
BB
983static void
984spa_thread(void *arg)
985{
986 callb_cpr_t cprinfo;
9babb374 987
428870ff
BB
988 spa_t *spa = arg;
989 user_t *pu = PTOU(curproc);
9babb374 990
428870ff
BB
991 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
992 spa->spa_name);
9babb374 993
428870ff
BB
994 ASSERT(curproc != &p0);
995 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
996 "zpool-%s", spa->spa_name);
997 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
998
999 /* bind this thread to the requested psrset */
1000 if (zio_taskq_psrset_bind != PS_NONE) {
1001 pool_lock();
1002 mutex_enter(&cpu_lock);
1003 mutex_enter(&pidlock);
1004 mutex_enter(&curproc->p_lock);
1005
1006 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1007 0, NULL, NULL) == 0) {
1008 curthread->t_bind_pset = zio_taskq_psrset_bind;
1009 } else {
1010 cmn_err(CE_WARN,
1011 "Couldn't bind process for zfs pool \"%s\" to "
1012 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1013 }
1014
1015 mutex_exit(&curproc->p_lock);
1016 mutex_exit(&pidlock);
1017 mutex_exit(&cpu_lock);
1018 pool_unlock();
1019 }
1020
1021 if (zio_taskq_sysdc) {
1022 sysdc_thread_enter(curthread, 100, 0);
1023 }
1024
1025 spa->spa_proc = curproc;
1026 spa->spa_did = curthread->t_did;
1027
1028 spa_create_zio_taskqs(spa);
1029
1030 mutex_enter(&spa->spa_proc_lock);
1031 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1032
1033 spa->spa_proc_state = SPA_PROC_ACTIVE;
1034 cv_broadcast(&spa->spa_proc_cv);
1035
1036 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1037 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1038 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1039 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1040
1041 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1042 spa->spa_proc_state = SPA_PROC_GONE;
1043 spa->spa_proc = &p0;
1044 cv_broadcast(&spa->spa_proc_cv);
1045 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1046
1047 mutex_enter(&curproc->p_lock);
1048 lwp_exit();
1049}
1050#endif
1051
1052/*
1053 * Activate an uninitialized pool.
1054 */
1055static void
1056spa_activate(spa_t *spa, int mode)
1057{
1058 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1059
1060 spa->spa_state = POOL_STATE_ACTIVE;
1061 spa->spa_mode = mode;
1062
1063 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1064 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1065
1066 /* Try to create a covering process */
1067 mutex_enter(&spa->spa_proc_lock);
1068 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1069 ASSERT(spa->spa_proc == &p0);
1070 spa->spa_did = 0;
1071
7b89a549 1072#ifdef HAVE_SPA_THREAD
428870ff
BB
1073 /* Only create a process if we're going to be around a while. */
1074 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1075 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1076 NULL, 0) == 0) {
1077 spa->spa_proc_state = SPA_PROC_CREATED;
1078 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1079 cv_wait(&spa->spa_proc_cv,
1080 &spa->spa_proc_lock);
9babb374 1081 }
428870ff
BB
1082 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1083 ASSERT(spa->spa_proc != &p0);
1084 ASSERT(spa->spa_did != 0);
1085 } else {
1086#ifdef _KERNEL
1087 cmn_err(CE_WARN,
1088 "Couldn't create process for zfs pool \"%s\"\n",
1089 spa->spa_name);
1090#endif
b128c09f 1091 }
34dc7c2f 1092 }
7b89a549 1093#endif /* HAVE_SPA_THREAD */
428870ff
BB
1094 mutex_exit(&spa->spa_proc_lock);
1095
1096 /* If we didn't create a process, we need to create our taskqs. */
1097 if (spa->spa_proc == &p0) {
1098 spa_create_zio_taskqs(spa);
1099 }
34dc7c2f 1100
b128c09f
BB
1101 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1102 offsetof(vdev_t, vdev_config_dirty_node));
1103 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1104 offsetof(vdev_t, vdev_state_dirty_node));
34dc7c2f
BB
1105
1106 txg_list_create(&spa->spa_vdev_txg_list,
1107 offsetof(struct vdev, vdev_txg_node));
1108
1109 avl_create(&spa->spa_errlist_scrub,
1110 spa_error_entry_compare, sizeof (spa_error_entry_t),
1111 offsetof(spa_error_entry_t, se_avl));
1112 avl_create(&spa->spa_errlist_last,
1113 spa_error_entry_compare, sizeof (spa_error_entry_t),
1114 offsetof(spa_error_entry_t, se_avl));
1115}
1116
1117/*
1118 * Opposite of spa_activate().
1119 */
1120static void
1121spa_deactivate(spa_t *spa)
1122{
d6320ddb
BB
1123 int t, q;
1124
34dc7c2f
BB
1125 ASSERT(spa->spa_sync_on == B_FALSE);
1126 ASSERT(spa->spa_dsl_pool == NULL);
1127 ASSERT(spa->spa_root_vdev == NULL);
9babb374 1128 ASSERT(spa->spa_async_zio_root == NULL);
34dc7c2f
BB
1129 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1130
1131 txg_list_destroy(&spa->spa_vdev_txg_list);
1132
b128c09f
BB
1133 list_destroy(&spa->spa_config_dirty_list);
1134 list_destroy(&spa->spa_state_dirty_list);
34dc7c2f 1135
cc92e9d0
GW
1136 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
1137
d6320ddb
BB
1138 for (t = 0; t < ZIO_TYPES; t++) {
1139 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 1140 spa_taskqs_fini(spa, t, q);
b128c09f 1141 }
34dc7c2f
BB
1142 }
1143
1144 metaslab_class_destroy(spa->spa_normal_class);
1145 spa->spa_normal_class = NULL;
1146
1147 metaslab_class_destroy(spa->spa_log_class);
1148 spa->spa_log_class = NULL;
1149
1150 /*
1151 * If this was part of an import or the open otherwise failed, we may
1152 * still have errors left in the queues. Empty them just in case.
1153 */
1154 spa_errlog_drain(spa);
1155
1156 avl_destroy(&spa->spa_errlist_scrub);
1157 avl_destroy(&spa->spa_errlist_last);
1158
1159 spa->spa_state = POOL_STATE_UNINITIALIZED;
428870ff
BB
1160
1161 mutex_enter(&spa->spa_proc_lock);
1162 if (spa->spa_proc_state != SPA_PROC_NONE) {
1163 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1164 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1165 cv_broadcast(&spa->spa_proc_cv);
1166 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1167 ASSERT(spa->spa_proc != &p0);
1168 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1169 }
1170 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1171 spa->spa_proc_state = SPA_PROC_NONE;
1172 }
1173 ASSERT(spa->spa_proc == &p0);
1174 mutex_exit(&spa->spa_proc_lock);
1175
1176 /*
1177 * We want to make sure spa_thread() has actually exited the ZFS
1178 * module, so that the module can't be unloaded out from underneath
1179 * it.
1180 */
1181 if (spa->spa_did != 0) {
1182 thread_join(spa->spa_did);
1183 spa->spa_did = 0;
1184 }
34dc7c2f
BB
1185}
1186
1187/*
1188 * Verify a pool configuration, and construct the vdev tree appropriately. This
1189 * will create all the necessary vdevs in the appropriate layout, with each vdev
1190 * in the CLOSED state. This will prep the pool before open/creation/import.
1191 * All vdev validation is done by the vdev_alloc() routine.
1192 */
1193static int
1194spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1195 uint_t id, int atype)
1196{
1197 nvlist_t **child;
9babb374 1198 uint_t children;
34dc7c2f 1199 int error;
d6320ddb 1200 int c;
34dc7c2f
BB
1201
1202 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1203 return (error);
1204
1205 if ((*vdp)->vdev_ops->vdev_op_leaf)
1206 return (0);
1207
b128c09f
BB
1208 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1209 &child, &children);
1210
1211 if (error == ENOENT)
1212 return (0);
1213
1214 if (error) {
34dc7c2f
BB
1215 vdev_free(*vdp);
1216 *vdp = NULL;
2e528b49 1217 return (SET_ERROR(EINVAL));
34dc7c2f
BB
1218 }
1219
d6320ddb 1220 for (c = 0; c < children; c++) {
34dc7c2f
BB
1221 vdev_t *vd;
1222 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1223 atype)) != 0) {
1224 vdev_free(*vdp);
1225 *vdp = NULL;
1226 return (error);
1227 }
1228 }
1229
1230 ASSERT(*vdp != NULL);
1231
1232 return (0);
1233}
1234
1235/*
1236 * Opposite of spa_load().
1237 */
1238static void
1239spa_unload(spa_t *spa)
1240{
1241 int i;
1242
b128c09f
BB
1243 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1244
34dc7c2f
BB
1245 /*
1246 * Stop async tasks.
1247 */
1248 spa_async_suspend(spa);
1249
1250 /*
1251 * Stop syncing.
1252 */
1253 if (spa->spa_sync_on) {
1254 txg_sync_stop(spa->spa_dsl_pool);
1255 spa->spa_sync_on = B_FALSE;
1256 }
1257
1258 /*
b128c09f 1259 * Wait for any outstanding async I/O to complete.
34dc7c2f 1260 */
9babb374
BB
1261 if (spa->spa_async_zio_root != NULL) {
1262 (void) zio_wait(spa->spa_async_zio_root);
1263 spa->spa_async_zio_root = NULL;
1264 }
34dc7c2f 1265
428870ff
BB
1266 bpobj_close(&spa->spa_deferred_bpobj);
1267
93cf2076
GW
1268 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1269
1270 /*
1271 * Close all vdevs.
1272 */
1273 if (spa->spa_root_vdev)
1274 vdev_free(spa->spa_root_vdev);
1275 ASSERT(spa->spa_root_vdev == NULL);
1276
34dc7c2f
BB
1277 /*
1278 * Close the dsl pool.
1279 */
1280 if (spa->spa_dsl_pool) {
1281 dsl_pool_close(spa->spa_dsl_pool);
1282 spa->spa_dsl_pool = NULL;
428870ff 1283 spa->spa_meta_objset = NULL;
34dc7c2f
BB
1284 }
1285
428870ff
BB
1286 ddt_unload(spa);
1287
fb5f0bc8
BB
1288
1289 /*
1290 * Drop and purge level 2 cache
1291 */
1292 spa_l2cache_drop(spa);
1293
34dc7c2f
BB
1294 for (i = 0; i < spa->spa_spares.sav_count; i++)
1295 vdev_free(spa->spa_spares.sav_vdevs[i]);
1296 if (spa->spa_spares.sav_vdevs) {
1297 kmem_free(spa->spa_spares.sav_vdevs,
1298 spa->spa_spares.sav_count * sizeof (void *));
1299 spa->spa_spares.sav_vdevs = NULL;
1300 }
1301 if (spa->spa_spares.sav_config) {
1302 nvlist_free(spa->spa_spares.sav_config);
1303 spa->spa_spares.sav_config = NULL;
1304 }
b128c09f 1305 spa->spa_spares.sav_count = 0;
34dc7c2f 1306
5ffb9d1d
GW
1307 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1308 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
34dc7c2f 1309 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
5ffb9d1d 1310 }
34dc7c2f
BB
1311 if (spa->spa_l2cache.sav_vdevs) {
1312 kmem_free(spa->spa_l2cache.sav_vdevs,
1313 spa->spa_l2cache.sav_count * sizeof (void *));
1314 spa->spa_l2cache.sav_vdevs = NULL;
1315 }
1316 if (spa->spa_l2cache.sav_config) {
1317 nvlist_free(spa->spa_l2cache.sav_config);
1318 spa->spa_l2cache.sav_config = NULL;
1319 }
b128c09f 1320 spa->spa_l2cache.sav_count = 0;
34dc7c2f
BB
1321
1322 spa->spa_async_suspended = 0;
fb5f0bc8 1323
d96eb2b1
DM
1324 if (spa->spa_comment != NULL) {
1325 spa_strfree(spa->spa_comment);
1326 spa->spa_comment = NULL;
1327 }
1328
fb5f0bc8 1329 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
1330}
1331
1332/*
1333 * Load (or re-load) the current list of vdevs describing the active spares for
1334 * this pool. When this is called, we have some form of basic information in
1335 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1336 * then re-generate a more complete list including status information.
1337 */
1338static void
1339spa_load_spares(spa_t *spa)
1340{
1341 nvlist_t **spares;
1342 uint_t nspares;
1343 int i;
1344 vdev_t *vd, *tvd;
1345
b128c09f
BB
1346 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1347
34dc7c2f
BB
1348 /*
1349 * First, close and free any existing spare vdevs.
1350 */
1351 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1352 vd = spa->spa_spares.sav_vdevs[i];
1353
1354 /* Undo the call to spa_activate() below */
b128c09f
BB
1355 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1356 B_FALSE)) != NULL && tvd->vdev_isspare)
34dc7c2f
BB
1357 spa_spare_remove(tvd);
1358 vdev_close(vd);
1359 vdev_free(vd);
1360 }
1361
1362 if (spa->spa_spares.sav_vdevs)
1363 kmem_free(spa->spa_spares.sav_vdevs,
1364 spa->spa_spares.sav_count * sizeof (void *));
1365
1366 if (spa->spa_spares.sav_config == NULL)
1367 nspares = 0;
1368 else
1369 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1370 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1371
1372 spa->spa_spares.sav_count = (int)nspares;
1373 spa->spa_spares.sav_vdevs = NULL;
1374
1375 if (nspares == 0)
1376 return;
1377
1378 /*
1379 * Construct the array of vdevs, opening them to get status in the
1380 * process. For each spare, there is potentially two different vdev_t
1381 * structures associated with it: one in the list of spares (used only
1382 * for basic validation purposes) and one in the active vdev
1383 * configuration (if it's spared in). During this phase we open and
1384 * validate each vdev on the spare list. If the vdev also exists in the
1385 * active configuration, then we also mark this vdev as an active spare.
1386 */
904ea276 1387 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
b8d06fca 1388 KM_PUSHPAGE);
34dc7c2f
BB
1389 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1390 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1391 VDEV_ALLOC_SPARE) == 0);
1392 ASSERT(vd != NULL);
1393
1394 spa->spa_spares.sav_vdevs[i] = vd;
1395
b128c09f
BB
1396 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1397 B_FALSE)) != NULL) {
34dc7c2f
BB
1398 if (!tvd->vdev_isspare)
1399 spa_spare_add(tvd);
1400
1401 /*
1402 * We only mark the spare active if we were successfully
1403 * able to load the vdev. Otherwise, importing a pool
1404 * with a bad active spare would result in strange
1405 * behavior, because multiple pool would think the spare
1406 * is actively in use.
1407 *
1408 * There is a vulnerability here to an equally bizarre
1409 * circumstance, where a dead active spare is later
1410 * brought back to life (onlined or otherwise). Given
1411 * the rarity of this scenario, and the extra complexity
1412 * it adds, we ignore the possibility.
1413 */
1414 if (!vdev_is_dead(tvd))
1415 spa_spare_activate(tvd);
1416 }
1417
b128c09f 1418 vd->vdev_top = vd;
9babb374 1419 vd->vdev_aux = &spa->spa_spares;
b128c09f 1420
34dc7c2f
BB
1421 if (vdev_open(vd) != 0)
1422 continue;
1423
34dc7c2f
BB
1424 if (vdev_validate_aux(vd) == 0)
1425 spa_spare_add(vd);
1426 }
1427
1428 /*
1429 * Recompute the stashed list of spares, with status information
1430 * this time.
1431 */
1432 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1433 DATA_TYPE_NVLIST_ARRAY) == 0);
1434
1435 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
b8d06fca 1436 KM_PUSHPAGE);
34dc7c2f
BB
1437 for (i = 0; i < spa->spa_spares.sav_count; i++)
1438 spares[i] = vdev_config_generate(spa,
428870ff 1439 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
34dc7c2f
BB
1440 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1441 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1442 for (i = 0; i < spa->spa_spares.sav_count; i++)
1443 nvlist_free(spares[i]);
1444 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1445}
1446
1447/*
1448 * Load (or re-load) the current list of vdevs describing the active l2cache for
1449 * this pool. When this is called, we have some form of basic information in
1450 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1451 * then re-generate a more complete list including status information.
1452 * Devices which are already active have their details maintained, and are
1453 * not re-opened.
1454 */
1455static void
1456spa_load_l2cache(spa_t *spa)
1457{
1458 nvlist_t **l2cache;
1459 uint_t nl2cache;
1460 int i, j, oldnvdevs;
9babb374 1461 uint64_t guid;
a117a6d6 1462 vdev_t *vd, **oldvdevs, **newvdevs;
34dc7c2f
BB
1463 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1464
b128c09f
BB
1465 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1466
34dc7c2f
BB
1467 if (sav->sav_config != NULL) {
1468 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1469 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
b8d06fca 1470 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
1471 } else {
1472 nl2cache = 0;
a117a6d6 1473 newvdevs = NULL;
34dc7c2f
BB
1474 }
1475
1476 oldvdevs = sav->sav_vdevs;
1477 oldnvdevs = sav->sav_count;
1478 sav->sav_vdevs = NULL;
1479 sav->sav_count = 0;
1480
1481 /*
1482 * Process new nvlist of vdevs.
1483 */
1484 for (i = 0; i < nl2cache; i++) {
1485 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1486 &guid) == 0);
1487
1488 newvdevs[i] = NULL;
1489 for (j = 0; j < oldnvdevs; j++) {
1490 vd = oldvdevs[j];
1491 if (vd != NULL && guid == vd->vdev_guid) {
1492 /*
1493 * Retain previous vdev for add/remove ops.
1494 */
1495 newvdevs[i] = vd;
1496 oldvdevs[j] = NULL;
1497 break;
1498 }
1499 }
1500
1501 if (newvdevs[i] == NULL) {
1502 /*
1503 * Create new vdev
1504 */
1505 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1506 VDEV_ALLOC_L2CACHE) == 0);
1507 ASSERT(vd != NULL);
1508 newvdevs[i] = vd;
1509
1510 /*
1511 * Commit this vdev as an l2cache device,
1512 * even if it fails to open.
1513 */
1514 spa_l2cache_add(vd);
1515
b128c09f
BB
1516 vd->vdev_top = vd;
1517 vd->vdev_aux = sav;
1518
1519 spa_l2cache_activate(vd);
1520
34dc7c2f
BB
1521 if (vdev_open(vd) != 0)
1522 continue;
1523
34dc7c2f
BB
1524 (void) vdev_validate_aux(vd);
1525
9babb374
BB
1526 if (!vdev_is_dead(vd))
1527 l2arc_add_vdev(spa, vd);
34dc7c2f
BB
1528 }
1529 }
1530
1531 /*
1532 * Purge vdevs that were dropped
1533 */
1534 for (i = 0; i < oldnvdevs; i++) {
1535 uint64_t pool;
1536
1537 vd = oldvdevs[i];
1538 if (vd != NULL) {
5ffb9d1d
GW
1539 ASSERT(vd->vdev_isl2cache);
1540
fb5f0bc8
BB
1541 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1542 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 1543 l2arc_remove_vdev(vd);
5ffb9d1d
GW
1544 vdev_clear_stats(vd);
1545 vdev_free(vd);
34dc7c2f
BB
1546 }
1547 }
1548
1549 if (oldvdevs)
1550 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1551
1552 if (sav->sav_config == NULL)
1553 goto out;
1554
1555 sav->sav_vdevs = newvdevs;
1556 sav->sav_count = (int)nl2cache;
1557
1558 /*
1559 * Recompute the stashed list of l2cache devices, with status
1560 * information this time.
1561 */
1562 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1563 DATA_TYPE_NVLIST_ARRAY) == 0);
1564
b8d06fca 1565 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
1566 for (i = 0; i < sav->sav_count; i++)
1567 l2cache[i] = vdev_config_generate(spa,
428870ff 1568 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
1569 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1570 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1571out:
1572 for (i = 0; i < sav->sav_count; i++)
1573 nvlist_free(l2cache[i]);
1574 if (sav->sav_count)
1575 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1576}
1577
1578static int
1579load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1580{
1581 dmu_buf_t *db;
1582 char *packed = NULL;
1583 size_t nvsize = 0;
1584 int error;
1585 *value = NULL;
1586
c3275b56
BB
1587 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1588 if (error)
1589 return (error);
1590
34dc7c2f
BB
1591 nvsize = *(uint64_t *)db->db_data;
1592 dmu_buf_rele(db, FTAG);
1593
b8d06fca 1594 packed = kmem_alloc(nvsize, KM_PUSHPAGE | KM_NODEBUG);
9babb374
BB
1595 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1596 DMU_READ_PREFETCH);
34dc7c2f
BB
1597 if (error == 0)
1598 error = nvlist_unpack(packed, nvsize, value, 0);
1599 kmem_free(packed, nvsize);
1600
1601 return (error);
1602}
1603
1604/*
1605 * Checks to see if the given vdev could not be opened, in which case we post a
1606 * sysevent to notify the autoreplace code that the device has been removed.
1607 */
1608static void
1609spa_check_removed(vdev_t *vd)
1610{
d6320ddb
BB
1611 int c;
1612
1613 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
1614 spa_check_removed(vd->vdev_child[c]);
1615
7011fb60
YP
1616 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1617 !vd->vdev_ishole) {
26685276
BB
1618 zfs_ereport_post(FM_EREPORT_RESOURCE_AUTOREPLACE,
1619 vd->vdev_spa, vd, NULL, 0, 0);
1620 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_CHECK);
34dc7c2f
BB
1621 }
1622}
1623
9babb374 1624/*
572e2857 1625 * Validate the current config against the MOS config
9babb374 1626 */
572e2857
BB
1627static boolean_t
1628spa_config_valid(spa_t *spa, nvlist_t *config)
9babb374 1629{
572e2857
BB
1630 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1631 nvlist_t *nv;
d6320ddb 1632 int c, i;
572e2857
BB
1633
1634 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1635
1636 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1637 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1638
1639 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
9babb374 1640
428870ff 1641 /*
572e2857
BB
1642 * If we're doing a normal import, then build up any additional
1643 * diagnostic information about missing devices in this config.
1644 * We'll pass this up to the user for further processing.
428870ff 1645 */
572e2857
BB
1646 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1647 nvlist_t **child, *nv;
1648 uint64_t idx = 0;
1649
1650 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
b8d06fca
RY
1651 KM_PUSHPAGE);
1652 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
572e2857 1653
d6320ddb 1654 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1655 vdev_t *tvd = rvd->vdev_child[c];
1656 vdev_t *mtvd = mrvd->vdev_child[c];
1657
1658 if (tvd->vdev_ops == &vdev_missing_ops &&
1659 mtvd->vdev_ops != &vdev_missing_ops &&
1660 mtvd->vdev_islog)
1661 child[idx++] = vdev_config_generate(spa, mtvd,
1662 B_FALSE, 0);
1663 }
9babb374 1664
572e2857
BB
1665 if (idx) {
1666 VERIFY(nvlist_add_nvlist_array(nv,
1667 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1668 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1669 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1670
d6320ddb 1671 for (i = 0; i < idx; i++)
572e2857
BB
1672 nvlist_free(child[i]);
1673 }
1674 nvlist_free(nv);
1675 kmem_free(child, rvd->vdev_children * sizeof (char **));
1676 }
1677
1678 /*
1679 * Compare the root vdev tree with the information we have
1680 * from the MOS config (mrvd). Check each top-level vdev
1681 * with the corresponding MOS config top-level (mtvd).
1682 */
d6320ddb 1683 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1684 vdev_t *tvd = rvd->vdev_child[c];
1685 vdev_t *mtvd = mrvd->vdev_child[c];
1686
1687 /*
1688 * Resolve any "missing" vdevs in the current configuration.
1689 * If we find that the MOS config has more accurate information
1690 * about the top-level vdev then use that vdev instead.
1691 */
1692 if (tvd->vdev_ops == &vdev_missing_ops &&
1693 mtvd->vdev_ops != &vdev_missing_ops) {
1694
1695 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1696 continue;
1697
1698 /*
1699 * Device specific actions.
1700 */
1701 if (mtvd->vdev_islog) {
1702 spa_set_log_state(spa, SPA_LOG_CLEAR);
1703 } else {
1704 /*
1705 * XXX - once we have 'readonly' pool
1706 * support we should be able to handle
1707 * missing data devices by transitioning
1708 * the pool to readonly.
1709 */
1710 continue;
1711 }
1712
1713 /*
1714 * Swap the missing vdev with the data we were
1715 * able to obtain from the MOS config.
1716 */
1717 vdev_remove_child(rvd, tvd);
1718 vdev_remove_child(mrvd, mtvd);
1719
1720 vdev_add_child(rvd, mtvd);
1721 vdev_add_child(mrvd, tvd);
1722
1723 spa_config_exit(spa, SCL_ALL, FTAG);
1724 vdev_load(mtvd);
1725 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1726
1727 vdev_reopen(rvd);
1728 } else if (mtvd->vdev_islog) {
1729 /*
1730 * Load the slog device's state from the MOS config
1731 * since it's possible that the label does not
1732 * contain the most up-to-date information.
1733 */
1734 vdev_load_log_state(tvd, mtvd);
1735 vdev_reopen(tvd);
1736 }
9babb374 1737 }
572e2857 1738 vdev_free(mrvd);
428870ff 1739 spa_config_exit(spa, SCL_ALL, FTAG);
572e2857
BB
1740
1741 /*
1742 * Ensure we were able to validate the config.
1743 */
1744 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
9babb374
BB
1745}
1746
b128c09f
BB
1747/*
1748 * Check for missing log devices
1749 */
13fe0198 1750static boolean_t
b128c09f
BB
1751spa_check_logs(spa_t *spa)
1752{
13fe0198
MA
1753 boolean_t rv = B_FALSE;
1754
b128c09f 1755 switch (spa->spa_log_state) {
e75c13c3
BB
1756 default:
1757 break;
b128c09f
BB
1758 case SPA_LOG_MISSING:
1759 /* need to recheck in case slog has been restored */
1760 case SPA_LOG_UNKNOWN:
13fe0198
MA
1761 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1762 NULL, DS_FIND_CHILDREN) != 0);
1763 if (rv)
428870ff 1764 spa_set_log_state(spa, SPA_LOG_MISSING);
b128c09f 1765 break;
b128c09f 1766 }
13fe0198 1767 return (rv);
b128c09f
BB
1768}
1769
428870ff
BB
1770static boolean_t
1771spa_passivate_log(spa_t *spa)
34dc7c2f 1772{
428870ff
BB
1773 vdev_t *rvd = spa->spa_root_vdev;
1774 boolean_t slog_found = B_FALSE;
d6320ddb 1775 int c;
b128c09f 1776
428870ff 1777 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
fb5f0bc8 1778
428870ff
BB
1779 if (!spa_has_slogs(spa))
1780 return (B_FALSE);
34dc7c2f 1781
d6320ddb 1782 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1783 vdev_t *tvd = rvd->vdev_child[c];
1784 metaslab_group_t *mg = tvd->vdev_mg;
34dc7c2f 1785
428870ff
BB
1786 if (tvd->vdev_islog) {
1787 metaslab_group_passivate(mg);
1788 slog_found = B_TRUE;
1789 }
34dc7c2f
BB
1790 }
1791
428870ff
BB
1792 return (slog_found);
1793}
34dc7c2f 1794
428870ff
BB
1795static void
1796spa_activate_log(spa_t *spa)
1797{
1798 vdev_t *rvd = spa->spa_root_vdev;
d6320ddb 1799 int c;
34dc7c2f 1800
428870ff
BB
1801 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1802
d6320ddb 1803 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1804 vdev_t *tvd = rvd->vdev_child[c];
1805 metaslab_group_t *mg = tvd->vdev_mg;
1806
1807 if (tvd->vdev_islog)
1808 metaslab_group_activate(mg);
34dc7c2f 1809 }
428870ff 1810}
34dc7c2f 1811
428870ff
BB
1812int
1813spa_offline_log(spa_t *spa)
1814{
13fe0198 1815 int error;
9babb374 1816
13fe0198
MA
1817 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1818 NULL, DS_FIND_CHILDREN);
1819 if (error == 0) {
428870ff
BB
1820 /*
1821 * We successfully offlined the log device, sync out the
1822 * current txg so that the "stubby" block can be removed
1823 * by zil_sync().
1824 */
1825 txg_wait_synced(spa->spa_dsl_pool, 0);
1826 }
1827 return (error);
1828}
34dc7c2f 1829
428870ff
BB
1830static void
1831spa_aux_check_removed(spa_aux_vdev_t *sav)
1832{
d6320ddb
BB
1833 int i;
1834
1835 for (i = 0; i < sav->sav_count; i++)
428870ff
BB
1836 spa_check_removed(sav->sav_vdevs[i]);
1837}
34dc7c2f 1838
428870ff
BB
1839void
1840spa_claim_notify(zio_t *zio)
1841{
1842 spa_t *spa = zio->io_spa;
34dc7c2f 1843
428870ff
BB
1844 if (zio->io_error)
1845 return;
34dc7c2f 1846
428870ff
BB
1847 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1848 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1849 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1850 mutex_exit(&spa->spa_props_lock);
1851}
34dc7c2f 1852
428870ff
BB
1853typedef struct spa_load_error {
1854 uint64_t sle_meta_count;
1855 uint64_t sle_data_count;
1856} spa_load_error_t;
34dc7c2f 1857
428870ff
BB
1858static void
1859spa_load_verify_done(zio_t *zio)
1860{
1861 blkptr_t *bp = zio->io_bp;
1862 spa_load_error_t *sle = zio->io_private;
1863 dmu_object_type_t type = BP_GET_TYPE(bp);
1864 int error = zio->io_error;
34dc7c2f 1865
428870ff 1866 if (error) {
9ae529ec 1867 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
428870ff
BB
1868 type != DMU_OT_INTENT_LOG)
1869 atomic_add_64(&sle->sle_meta_count, 1);
1870 else
1871 atomic_add_64(&sle->sle_data_count, 1);
34dc7c2f 1872 }
428870ff
BB
1873 zio_data_buf_free(zio->io_data, zio->io_size);
1874}
34dc7c2f 1875
428870ff
BB
1876/*ARGSUSED*/
1877static int
1878spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
5dbd68a3 1879 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
428870ff 1880{
9b67f605 1881 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
428870ff
BB
1882 zio_t *rio = arg;
1883 size_t size = BP_GET_PSIZE(bp);
1884 void *data = zio_data_buf_alloc(size);
34dc7c2f 1885
428870ff
BB
1886 zio_nowait(zio_read(rio, spa, bp, data, size,
1887 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1888 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1889 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
34dc7c2f 1890 }
428870ff
BB
1891 return (0);
1892}
34dc7c2f 1893
428870ff
BB
1894static int
1895spa_load_verify(spa_t *spa)
1896{
1897 zio_t *rio;
1898 spa_load_error_t sle = { 0 };
1899 zpool_rewind_policy_t policy;
1900 boolean_t verify_ok = B_FALSE;
1901 int error;
34dc7c2f 1902
428870ff 1903 zpool_get_rewind_policy(spa->spa_config, &policy);
34dc7c2f 1904
428870ff
BB
1905 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1906 return (0);
34dc7c2f 1907
428870ff
BB
1908 rio = zio_root(spa, NULL, &sle,
1909 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
34dc7c2f 1910
428870ff
BB
1911 error = traverse_pool(spa, spa->spa_verify_min_txg,
1912 TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio);
1913
1914 (void) zio_wait(rio);
1915
1916 spa->spa_load_meta_errors = sle.sle_meta_count;
1917 spa->spa_load_data_errors = sle.sle_data_count;
1918
1919 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1920 sle.sle_data_count <= policy.zrp_maxdata) {
572e2857
BB
1921 int64_t loss = 0;
1922
428870ff
BB
1923 verify_ok = B_TRUE;
1924 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1925 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
572e2857
BB
1926
1927 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1928 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1929 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1930 VERIFY(nvlist_add_int64(spa->spa_load_info,
1931 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1932 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1933 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
428870ff
BB
1934 } else {
1935 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1936 }
1937
1938 if (error) {
1939 if (error != ENXIO && error != EIO)
2e528b49 1940 error = SET_ERROR(EIO);
428870ff
BB
1941 return (error);
1942 }
1943
1944 return (verify_ok ? 0 : EIO);
1945}
1946
1947/*
1948 * Find a value in the pool props object.
1949 */
1950static void
1951spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1952{
1953 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1954 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1955}
1956
1957/*
1958 * Find a value in the pool directory object.
1959 */
1960static int
1961spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1962{
1963 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1964 name, sizeof (uint64_t), 1, val));
1965}
1966
1967static int
1968spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
1969{
1970 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
1971 return (err);
1972}
1973
1974/*
1975 * Fix up config after a partly-completed split. This is done with the
1976 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
1977 * pool have that entry in their config, but only the splitting one contains
1978 * a list of all the guids of the vdevs that are being split off.
1979 *
1980 * This function determines what to do with that list: either rejoin
1981 * all the disks to the pool, or complete the splitting process. To attempt
1982 * the rejoin, each disk that is offlined is marked online again, and
1983 * we do a reopen() call. If the vdev label for every disk that was
1984 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
1985 * then we call vdev_split() on each disk, and complete the split.
1986 *
1987 * Otherwise we leave the config alone, with all the vdevs in place in
1988 * the original pool.
1989 */
1990static void
1991spa_try_repair(spa_t *spa, nvlist_t *config)
1992{
1993 uint_t extracted;
1994 uint64_t *glist;
1995 uint_t i, gcount;
1996 nvlist_t *nvl;
1997 vdev_t **vd;
1998 boolean_t attempt_reopen;
1999
2000 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2001 return;
2002
2003 /* check that the config is complete */
2004 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2005 &glist, &gcount) != 0)
2006 return;
2007
b8d06fca 2008 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_PUSHPAGE);
428870ff
BB
2009
2010 /* attempt to online all the vdevs & validate */
2011 attempt_reopen = B_TRUE;
2012 for (i = 0; i < gcount; i++) {
2013 if (glist[i] == 0) /* vdev is hole */
2014 continue;
2015
2016 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2017 if (vd[i] == NULL) {
2018 /*
2019 * Don't bother attempting to reopen the disks;
2020 * just do the split.
2021 */
2022 attempt_reopen = B_FALSE;
2023 } else {
2024 /* attempt to re-online it */
2025 vd[i]->vdev_offline = B_FALSE;
2026 }
2027 }
2028
2029 if (attempt_reopen) {
2030 vdev_reopen(spa->spa_root_vdev);
2031
2032 /* check each device to see what state it's in */
2033 for (extracted = 0, i = 0; i < gcount; i++) {
2034 if (vd[i] != NULL &&
2035 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2036 break;
2037 ++extracted;
2038 }
2039 }
2040
2041 /*
2042 * If every disk has been moved to the new pool, or if we never
2043 * even attempted to look at them, then we split them off for
2044 * good.
2045 */
2046 if (!attempt_reopen || gcount == extracted) {
2047 for (i = 0; i < gcount; i++)
2048 if (vd[i] != NULL)
2049 vdev_split(vd[i]);
2050 vdev_reopen(spa->spa_root_vdev);
2051 }
2052
2053 kmem_free(vd, gcount * sizeof (vdev_t *));
2054}
2055
2056static int
2057spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2058 boolean_t mosconfig)
2059{
2060 nvlist_t *config = spa->spa_config;
2061 char *ereport = FM_EREPORT_ZFS_POOL;
d96eb2b1 2062 char *comment;
428870ff
BB
2063 int error;
2064 uint64_t pool_guid;
2065 nvlist_t *nvl;
2066
2067 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2e528b49 2068 return (SET_ERROR(EINVAL));
428870ff 2069
d96eb2b1
DM
2070 ASSERT(spa->spa_comment == NULL);
2071 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2072 spa->spa_comment = spa_strdup(comment);
2073
428870ff
BB
2074 /*
2075 * Versioning wasn't explicitly added to the label until later, so if
2076 * it's not present treat it as the initial version.
2077 */
2078 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2079 &spa->spa_ubsync.ub_version) != 0)
2080 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2081
2082 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2083 &spa->spa_config_txg);
2084
2085 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2086 spa_guid_exists(pool_guid, 0)) {
2e528b49 2087 error = SET_ERROR(EEXIST);
428870ff 2088 } else {
3541dc6d 2089 spa->spa_config_guid = pool_guid;
428870ff
BB
2090
2091 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2092 &nvl) == 0) {
2093 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
b8d06fca 2094 KM_PUSHPAGE) == 0);
428870ff
BB
2095 }
2096
9ae529ec
CS
2097 nvlist_free(spa->spa_load_info);
2098 spa->spa_load_info = fnvlist_alloc();
2099
572e2857 2100 gethrestime(&spa->spa_loaded_ts);
428870ff
BB
2101 error = spa_load_impl(spa, pool_guid, config, state, type,
2102 mosconfig, &ereport);
2103 }
2104
2105 spa->spa_minref = refcount_count(&spa->spa_refcount);
572e2857
BB
2106 if (error) {
2107 if (error != EEXIST) {
2108 spa->spa_loaded_ts.tv_sec = 0;
2109 spa->spa_loaded_ts.tv_nsec = 0;
2110 }
2111 if (error != EBADF) {
2112 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2113 }
2114 }
428870ff
BB
2115 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2116 spa->spa_ena = 0;
2117
2118 return (error);
2119}
2120
2121/*
2122 * Load an existing storage pool, using the pool's builtin spa_config as a
2123 * source of configuration information.
2124 */
bf701a83
BB
2125__attribute__((always_inline))
2126static inline int
428870ff
BB
2127spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2128 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2129 char **ereport)
2130{
2131 int error = 0;
2132 nvlist_t *nvroot = NULL;
9ae529ec 2133 nvlist_t *label;
428870ff
BB
2134 vdev_t *rvd;
2135 uberblock_t *ub = &spa->spa_uberblock;
572e2857 2136 uint64_t children, config_cache_txg = spa->spa_config_txg;
428870ff
BB
2137 int orig_mode = spa->spa_mode;
2138 int parse;
2139 uint64_t obj;
9ae529ec 2140 boolean_t missing_feat_write = B_FALSE;
428870ff
BB
2141
2142 /*
2143 * If this is an untrusted config, access the pool in read-only mode.
2144 * This prevents things like resilvering recently removed devices.
2145 */
2146 if (!mosconfig)
2147 spa->spa_mode = FREAD;
2148
2149 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2150
2151 spa->spa_load_state = state;
2152
2153 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2e528b49 2154 return (SET_ERROR(EINVAL));
428870ff
BB
2155
2156 parse = (type == SPA_IMPORT_EXISTING ?
2157 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2158
2159 /*
2160 * Create "The Godfather" zio to hold all async IOs
2161 */
2162 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2163 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2164
2165 /*
2166 * Parse the configuration into a vdev tree. We explicitly set the
2167 * value that will be returned by spa_version() since parsing the
2168 * configuration requires knowing the version number.
2169 */
2170 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2171 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2172 spa_config_exit(spa, SCL_ALL, FTAG);
2173
2174 if (error != 0)
2175 return (error);
2176
2177 ASSERT(spa->spa_root_vdev == rvd);
2178
2179 if (type != SPA_IMPORT_ASSEMBLE) {
2180 ASSERT(spa_guid(spa) == pool_guid);
2181 }
2182
2183 /*
2184 * Try to open all vdevs, loading each label in the process.
2185 */
2186 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2187 error = vdev_open(rvd);
2188 spa_config_exit(spa, SCL_ALL, FTAG);
2189 if (error != 0)
2190 return (error);
2191
2192 /*
2193 * We need to validate the vdev labels against the configuration that
2194 * we have in hand, which is dependent on the setting of mosconfig. If
2195 * mosconfig is true then we're validating the vdev labels based on
2196 * that config. Otherwise, we're validating against the cached config
2197 * (zpool.cache) that was read when we loaded the zfs module, and then
2198 * later we will recursively call spa_load() and validate against
2199 * the vdev config.
2200 *
2201 * If we're assembling a new pool that's been split off from an
2202 * existing pool, the labels haven't yet been updated so we skip
2203 * validation for now.
2204 */
2205 if (type != SPA_IMPORT_ASSEMBLE) {
2206 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
c7f2d69d 2207 error = vdev_validate(rvd, mosconfig);
428870ff
BB
2208 spa_config_exit(spa, SCL_ALL, FTAG);
2209
2210 if (error != 0)
2211 return (error);
2212
2213 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2e528b49 2214 return (SET_ERROR(ENXIO));
428870ff
BB
2215 }
2216
2217 /*
2218 * Find the best uberblock.
2219 */
9ae529ec 2220 vdev_uberblock_load(rvd, ub, &label);
428870ff
BB
2221
2222 /*
2223 * If we weren't able to find a single valid uberblock, return failure.
2224 */
9ae529ec
CS
2225 if (ub->ub_txg == 0) {
2226 nvlist_free(label);
428870ff 2227 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
9ae529ec 2228 }
428870ff
BB
2229
2230 /*
9ae529ec 2231 * If the pool has an unsupported version we can't open it.
428870ff 2232 */
9ae529ec
CS
2233 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2234 nvlist_free(label);
428870ff 2235 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
9ae529ec
CS
2236 }
2237
2238 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2239 nvlist_t *features;
2240
2241 /*
2242 * If we weren't able to find what's necessary for reading the
2243 * MOS in the label, return failure.
2244 */
2245 if (label == NULL || nvlist_lookup_nvlist(label,
2246 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2247 nvlist_free(label);
2248 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2249 ENXIO));
2250 }
2251
2252 /*
2253 * Update our in-core representation with the definitive values
2254 * from the label.
2255 */
2256 nvlist_free(spa->spa_label_features);
2257 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2258 }
2259
2260 nvlist_free(label);
2261
2262 /*
2263 * Look through entries in the label nvlist's features_for_read. If
2264 * there is a feature listed there which we don't understand then we
2265 * cannot open a pool.
2266 */
2267 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2268 nvlist_t *unsup_feat;
2269 nvpair_t *nvp;
2270
2271 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2272 0);
2273
2274 for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL);
2275 nvp != NULL;
2276 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2277 if (!zfeature_is_supported(nvpair_name(nvp))) {
2278 VERIFY(nvlist_add_string(unsup_feat,
2279 nvpair_name(nvp), "") == 0);
2280 }
2281 }
2282
2283 if (!nvlist_empty(unsup_feat)) {
2284 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2285 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2286 nvlist_free(unsup_feat);
2287 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2288 ENOTSUP));
2289 }
2290
2291 nvlist_free(unsup_feat);
2292 }
428870ff
BB
2293
2294 /*
2295 * If the vdev guid sum doesn't match the uberblock, we have an
572e2857
BB
2296 * incomplete configuration. We first check to see if the pool
2297 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2298 * If it is, defer the vdev_guid_sum check till later so we
2299 * can handle missing vdevs.
428870ff 2300 */
572e2857
BB
2301 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2302 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
428870ff
BB
2303 rvd->vdev_guid_sum != ub->ub_guid_sum)
2304 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2305
2306 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2307 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2308 spa_try_repair(spa, config);
2309 spa_config_exit(spa, SCL_ALL, FTAG);
2310 nvlist_free(spa->spa_config_splitting);
2311 spa->spa_config_splitting = NULL;
2312 }
2313
2314 /*
2315 * Initialize internal SPA structures.
2316 */
2317 spa->spa_state = POOL_STATE_ACTIVE;
2318 spa->spa_ubsync = spa->spa_uberblock;
2319 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2320 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2321 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2322 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2323 spa->spa_claim_max_txg = spa->spa_first_txg;
2324 spa->spa_prev_software_version = ub->ub_software_version;
2325
9ae529ec 2326 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
428870ff
BB
2327 if (error)
2328 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2329 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2330
2331 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2332 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2333
9ae529ec
CS
2334 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2335 boolean_t missing_feat_read = B_FALSE;
b9b24bb4 2336 nvlist_t *unsup_feat, *enabled_feat;
b0bc7a84 2337 spa_feature_t i;
9ae529ec
CS
2338
2339 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2340 &spa->spa_feat_for_read_obj) != 0) {
2341 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2342 }
2343
2344 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2345 &spa->spa_feat_for_write_obj) != 0) {
2346 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2347 }
2348
2349 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2350 &spa->spa_feat_desc_obj) != 0) {
2351 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2352 }
2353
b9b24bb4
CS
2354 enabled_feat = fnvlist_alloc();
2355 unsup_feat = fnvlist_alloc();
9ae529ec 2356
fa86b5db 2357 if (!spa_features_check(spa, B_FALSE,
b9b24bb4 2358 unsup_feat, enabled_feat))
9ae529ec
CS
2359 missing_feat_read = B_TRUE;
2360
2361 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
fa86b5db 2362 if (!spa_features_check(spa, B_TRUE,
b9b24bb4 2363 unsup_feat, enabled_feat)) {
9ae529ec 2364 missing_feat_write = B_TRUE;
b9b24bb4 2365 }
9ae529ec
CS
2366 }
2367
b9b24bb4
CS
2368 fnvlist_add_nvlist(spa->spa_load_info,
2369 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2370
9ae529ec 2371 if (!nvlist_empty(unsup_feat)) {
b9b24bb4
CS
2372 fnvlist_add_nvlist(spa->spa_load_info,
2373 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
9ae529ec
CS
2374 }
2375
b9b24bb4
CS
2376 fnvlist_free(enabled_feat);
2377 fnvlist_free(unsup_feat);
9ae529ec
CS
2378
2379 if (!missing_feat_read) {
2380 fnvlist_add_boolean(spa->spa_load_info,
2381 ZPOOL_CONFIG_CAN_RDONLY);
2382 }
2383
2384 /*
2385 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2386 * twofold: to determine whether the pool is available for
2387 * import in read-write mode and (if it is not) whether the
2388 * pool is available for import in read-only mode. If the pool
2389 * is available for import in read-write mode, it is displayed
2390 * as available in userland; if it is not available for import
2391 * in read-only mode, it is displayed as unavailable in
2392 * userland. If the pool is available for import in read-only
2393 * mode but not read-write mode, it is displayed as unavailable
2394 * in userland with a special note that the pool is actually
2395 * available for open in read-only mode.
2396 *
2397 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2398 * missing a feature for write, we must first determine whether
2399 * the pool can be opened read-only before returning to
2400 * userland in order to know whether to display the
2401 * abovementioned note.
2402 */
2403 if (missing_feat_read || (missing_feat_write &&
2404 spa_writeable(spa))) {
2405 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2406 ENOTSUP));
2407 }
b0bc7a84
MG
2408
2409 /*
2410 * Load refcounts for ZFS features from disk into an in-memory
2411 * cache during SPA initialization.
2412 */
2413 for (i = 0; i < SPA_FEATURES; i++) {
2414 uint64_t refcount;
2415
2416 error = feature_get_refcount_from_disk(spa,
2417 &spa_feature_table[i], &refcount);
2418 if (error == 0) {
2419 spa->spa_feat_refcount_cache[i] = refcount;
2420 } else if (error == ENOTSUP) {
2421 spa->spa_feat_refcount_cache[i] =
2422 SPA_FEATURE_DISABLED;
2423 } else {
2424 return (spa_vdev_err(rvd,
2425 VDEV_AUX_CORRUPT_DATA, EIO));
2426 }
2427 }
2428 }
2429
2430 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2431 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
9b67f605 2432 &spa->spa_feat_enabled_txg_obj) != 0)
b0bc7a84 2433 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
9ae529ec
CS
2434 }
2435
2436 spa->spa_is_initializing = B_TRUE;
2437 error = dsl_pool_open(spa->spa_dsl_pool);
2438 spa->spa_is_initializing = B_FALSE;
2439 if (error != 0)
2440 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2441
428870ff
BB
2442 if (!mosconfig) {
2443 uint64_t hostid;
2444 nvlist_t *policy = NULL, *nvconfig;
2445
2446 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2447 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2448
2449 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
b128c09f 2450 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
34dc7c2f
BB
2451 char *hostname;
2452 unsigned long myhostid = 0;
2453
428870ff 2454 VERIFY(nvlist_lookup_string(nvconfig,
34dc7c2f
BB
2455 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2456
d164b209
BB
2457#ifdef _KERNEL
2458 myhostid = zone_get_hostid(NULL);
2459#else /* _KERNEL */
2460 /*
2461 * We're emulating the system's hostid in userland, so
2462 * we can't use zone_get_hostid().
2463 */
34dc7c2f 2464 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
d164b209 2465#endif /* _KERNEL */
34dc7c2f 2466 if (hostid != 0 && myhostid != 0 &&
d164b209 2467 hostid != myhostid) {
428870ff 2468 nvlist_free(nvconfig);
34dc7c2f 2469 cmn_err(CE_WARN, "pool '%s' could not be "
d1d7e268
MK
2470 "loaded as it was last accessed by another "
2471 "system (host: %s hostid: 0x%lx). See: "
2472 "http://zfsonlinux.org/msg/ZFS-8000-EY",
b128c09f 2473 spa_name(spa), hostname,
34dc7c2f 2474 (unsigned long)hostid);
2e528b49 2475 return (SET_ERROR(EBADF));
34dc7c2f
BB
2476 }
2477 }
428870ff
BB
2478 if (nvlist_lookup_nvlist(spa->spa_config,
2479 ZPOOL_REWIND_POLICY, &policy) == 0)
2480 VERIFY(nvlist_add_nvlist(nvconfig,
2481 ZPOOL_REWIND_POLICY, policy) == 0);
34dc7c2f 2482
428870ff 2483 spa_config_set(spa, nvconfig);
34dc7c2f
BB
2484 spa_unload(spa);
2485 spa_deactivate(spa);
fb5f0bc8 2486 spa_activate(spa, orig_mode);
34dc7c2f 2487
428870ff 2488 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
34dc7c2f
BB
2489 }
2490
428870ff
BB
2491 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2492 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2493 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2494 if (error != 0)
2495 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2496
2497 /*
2498 * Load the bit that tells us to use the new accounting function
2499 * (raid-z deflation). If we have an older pool, this will not
2500 * be present.
2501 */
428870ff
BB
2502 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2503 if (error != 0 && error != ENOENT)
2504 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2505
2506 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2507 &spa->spa_creation_version);
2508 if (error != 0 && error != ENOENT)
2509 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2510
2511 /*
2512 * Load the persistent error log. If we have an older pool, this will
2513 * not be present.
2514 */
428870ff
BB
2515 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2516 if (error != 0 && error != ENOENT)
2517 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2518
428870ff
BB
2519 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2520 &spa->spa_errlog_scrub);
2521 if (error != 0 && error != ENOENT)
2522 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2523
2524 /*
2525 * Load the history object. If we have an older pool, this
2526 * will not be present.
2527 */
428870ff
BB
2528 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2529 if (error != 0 && error != ENOENT)
2530 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2531
2532 /*
2533 * If we're assembling the pool from the split-off vdevs of
2534 * an existing pool, we don't want to attach the spares & cache
2535 * devices.
2536 */
34dc7c2f
BB
2537
2538 /*
2539 * Load any hot spares for this pool.
2540 */
428870ff
BB
2541 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2542 if (error != 0 && error != ENOENT)
2543 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2544 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2545 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2546 if (load_nvlist(spa, spa->spa_spares.sav_object,
428870ff
BB
2547 &spa->spa_spares.sav_config) != 0)
2548 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2549
b128c09f 2550 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2551 spa_load_spares(spa);
b128c09f 2552 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2553 } else if (error == 0) {
2554 spa->spa_spares.sav_sync = B_TRUE;
34dc7c2f
BB
2555 }
2556
2557 /*
2558 * Load any level 2 ARC devices for this pool.
2559 */
428870ff 2560 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
34dc7c2f 2561 &spa->spa_l2cache.sav_object);
428870ff
BB
2562 if (error != 0 && error != ENOENT)
2563 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2564 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2565 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2566 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
428870ff
BB
2567 &spa->spa_l2cache.sav_config) != 0)
2568 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2569
b128c09f 2570 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2571 spa_load_l2cache(spa);
b128c09f 2572 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2573 } else if (error == 0) {
2574 spa->spa_l2cache.sav_sync = B_TRUE;
b128c09f
BB
2575 }
2576
34dc7c2f
BB
2577 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2578
428870ff
BB
2579 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2580 if (error && error != ENOENT)
2581 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2582
2583 if (error == 0) {
2dbedf54 2584 uint64_t autoreplace = 0;
428870ff
BB
2585
2586 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2587 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2588 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2589 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2590 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2591 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2592 &spa->spa_dedup_ditto);
2593
2594 spa->spa_autoreplace = (autoreplace != 0);
34dc7c2f
BB
2595 }
2596
2597 /*
2598 * If the 'autoreplace' property is set, then post a resource notifying
2599 * the ZFS DE that it should not issue any faults for unopenable
2600 * devices. We also iterate over the vdevs, and post a sysevent for any
2601 * unopenable vdevs so that the normal autoreplace handler can take
2602 * over.
2603 */
428870ff 2604 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
34dc7c2f 2605 spa_check_removed(spa->spa_root_vdev);
428870ff
BB
2606 /*
2607 * For the import case, this is done in spa_import(), because
2608 * at this point we're using the spare definitions from
2609 * the MOS config, not necessarily from the userland config.
2610 */
2611 if (state != SPA_LOAD_IMPORT) {
2612 spa_aux_check_removed(&spa->spa_spares);
2613 spa_aux_check_removed(&spa->spa_l2cache);
2614 }
2615 }
34dc7c2f
BB
2616
2617 /*
2618 * Load the vdev state for all toplevel vdevs.
2619 */
2620 vdev_load(rvd);
2621
2622 /*
2623 * Propagate the leaf DTLs we just loaded all the way up the tree.
2624 */
b128c09f 2625 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2626 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
b128c09f 2627 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 2628
428870ff
BB
2629 /*
2630 * Load the DDTs (dedup tables).
2631 */
2632 error = ddt_load(spa);
2633 if (error != 0)
2634 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2635
2636 spa_update_dspace(spa);
2637
428870ff 2638 /*
572e2857
BB
2639 * Validate the config, using the MOS config to fill in any
2640 * information which might be missing. If we fail to validate
2641 * the config then declare the pool unfit for use. If we're
2642 * assembling a pool from a split, the log is not transferred
2643 * over.
428870ff
BB
2644 */
2645 if (type != SPA_IMPORT_ASSEMBLE) {
2646 nvlist_t *nvconfig;
2647
2648 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2649 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2650
572e2857
BB
2651 if (!spa_config_valid(spa, nvconfig)) {
2652 nvlist_free(nvconfig);
2653 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2654 ENXIO));
2655 }
428870ff
BB
2656 nvlist_free(nvconfig);
2657
572e2857 2658 /*
9ae529ec 2659 * Now that we've validated the config, check the state of the
572e2857
BB
2660 * root vdev. If it can't be opened, it indicates one or
2661 * more toplevel vdevs are faulted.
2662 */
2663 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2e528b49 2664 return (SET_ERROR(ENXIO));
572e2857 2665
428870ff
BB
2666 if (spa_check_logs(spa)) {
2667 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2668 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2669 }
2670 }
2671
9ae529ec
CS
2672 if (missing_feat_write) {
2673 ASSERT(state == SPA_LOAD_TRYIMPORT);
2674
2675 /*
2676 * At this point, we know that we can open the pool in
2677 * read-only mode but not read-write mode. We now have enough
2678 * information and can return to userland.
2679 */
2680 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2681 }
2682
572e2857
BB
2683 /*
2684 * We've successfully opened the pool, verify that we're ready
2685 * to start pushing transactions.
2686 */
2687 if (state != SPA_LOAD_TRYIMPORT) {
c65aa5b2 2688 if ((error = spa_load_verify(spa)))
572e2857
BB
2689 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2690 error));
2691 }
2692
428870ff
BB
2693 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2694 spa->spa_load_max_txg == UINT64_MAX)) {
34dc7c2f
BB
2695 dmu_tx_t *tx;
2696 int need_update = B_FALSE;
d6320ddb 2697 int c;
fb5f0bc8
BB
2698
2699 ASSERT(state != SPA_LOAD_TRYIMPORT);
34dc7c2f
BB
2700
2701 /*
2702 * Claim log blocks that haven't been committed yet.
2703 * This must all happen in a single txg.
428870ff
BB
2704 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2705 * invoked from zil_claim_log_block()'s i/o done callback.
2706 * Price of rollback is that we abandon the log.
34dc7c2f 2707 */
428870ff
BB
2708 spa->spa_claiming = B_TRUE;
2709
34dc7c2f
BB
2710 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2711 spa_first_txg(spa));
b128c09f 2712 (void) dmu_objset_find(spa_name(spa),
34dc7c2f
BB
2713 zil_claim, tx, DS_FIND_CHILDREN);
2714 dmu_tx_commit(tx);
2715
428870ff
BB
2716 spa->spa_claiming = B_FALSE;
2717
2718 spa_set_log_state(spa, SPA_LOG_GOOD);
34dc7c2f
BB
2719 spa->spa_sync_on = B_TRUE;
2720 txg_sync_start(spa->spa_dsl_pool);
2721
2722 /*
428870ff
BB
2723 * Wait for all claims to sync. We sync up to the highest
2724 * claimed log block birth time so that claimed log blocks
2725 * don't appear to be from the future. spa_claim_max_txg
2726 * will have been set for us by either zil_check_log_chain()
2727 * (invoked from spa_check_logs()) or zil_claim() above.
34dc7c2f 2728 */
428870ff 2729 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
34dc7c2f
BB
2730
2731 /*
2732 * If the config cache is stale, or we have uninitialized
2733 * metaslabs (see spa_vdev_add()), then update the config.
45d1cae3 2734 *
572e2857 2735 * If this is a verbatim import, trust the current
45d1cae3 2736 * in-core spa_config and update the disk labels.
34dc7c2f
BB
2737 */
2738 if (config_cache_txg != spa->spa_config_txg ||
572e2857
BB
2739 state == SPA_LOAD_IMPORT ||
2740 state == SPA_LOAD_RECOVER ||
2741 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
34dc7c2f
BB
2742 need_update = B_TRUE;
2743
d6320ddb 2744 for (c = 0; c < rvd->vdev_children; c++)
34dc7c2f
BB
2745 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2746 need_update = B_TRUE;
2747
2748 /*
2749 * Update the config cache asychronously in case we're the
2750 * root pool, in which case the config cache isn't writable yet.
2751 */
2752 if (need_update)
2753 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
fb5f0bc8
BB
2754
2755 /*
2756 * Check all DTLs to see if anything needs resilvering.
2757 */
428870ff
BB
2758 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2759 vdev_resilver_needed(rvd, NULL, NULL))
fb5f0bc8 2760 spa_async_request(spa, SPA_ASYNC_RESILVER);
428870ff 2761
6f1ffb06
MA
2762 /*
2763 * Log the fact that we booted up (so that we can detect if
2764 * we rebooted in the middle of an operation).
2765 */
2766 spa_history_log_version(spa, "open");
2767
428870ff
BB
2768 /*
2769 * Delete any inconsistent datasets.
2770 */
2771 (void) dmu_objset_find(spa_name(spa),
2772 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2773
2774 /*
2775 * Clean up any stale temporary dataset userrefs.
2776 */
2777 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
34dc7c2f
BB
2778 }
2779
428870ff
BB
2780 return (0);
2781}
34dc7c2f 2782
428870ff
BB
2783static int
2784spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2785{
572e2857
BB
2786 int mode = spa->spa_mode;
2787
428870ff
BB
2788 spa_unload(spa);
2789 spa_deactivate(spa);
2790
2791 spa->spa_load_max_txg--;
2792
572e2857 2793 spa_activate(spa, mode);
428870ff
BB
2794 spa_async_suspend(spa);
2795
2796 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2797}
2798
9ae529ec
CS
2799/*
2800 * If spa_load() fails this function will try loading prior txg's. If
2801 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2802 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2803 * function will not rewind the pool and will return the same error as
2804 * spa_load().
2805 */
428870ff
BB
2806static int
2807spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2808 uint64_t max_request, int rewind_flags)
2809{
9ae529ec 2810 nvlist_t *loadinfo = NULL;
428870ff
BB
2811 nvlist_t *config = NULL;
2812 int load_error, rewind_error;
2813 uint64_t safe_rewind_txg;
2814 uint64_t min_txg;
2815
2816 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2817 spa->spa_load_max_txg = spa->spa_load_txg;
2818 spa_set_log_state(spa, SPA_LOG_CLEAR);
2819 } else {
2820 spa->spa_load_max_txg = max_request;
2821 }
2822
2823 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2824 mosconfig);
2825 if (load_error == 0)
2826 return (0);
2827
2828 if (spa->spa_root_vdev != NULL)
2829 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2830
2831 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2832 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2833
2834 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2835 nvlist_free(config);
2836 return (load_error);
2837 }
2838
9ae529ec
CS
2839 if (state == SPA_LOAD_RECOVER) {
2840 /* Price of rolling back is discarding txgs, including log */
428870ff 2841 spa_set_log_state(spa, SPA_LOG_CLEAR);
9ae529ec
CS
2842 } else {
2843 /*
2844 * If we aren't rolling back save the load info from our first
2845 * import attempt so that we can restore it after attempting
2846 * to rewind.
2847 */
2848 loadinfo = spa->spa_load_info;
2849 spa->spa_load_info = fnvlist_alloc();
2850 }
428870ff
BB
2851
2852 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2853 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2854 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2855 TXG_INITIAL : safe_rewind_txg;
2856
2857 /*
2858 * Continue as long as we're finding errors, we're still within
2859 * the acceptable rewind range, and we're still finding uberblocks
2860 */
2861 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2862 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2863 if (spa->spa_load_max_txg < safe_rewind_txg)
2864 spa->spa_extreme_rewind = B_TRUE;
2865 rewind_error = spa_load_retry(spa, state, mosconfig);
2866 }
2867
428870ff
BB
2868 spa->spa_extreme_rewind = B_FALSE;
2869 spa->spa_load_max_txg = UINT64_MAX;
2870
2871 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2872 spa_config_set(spa, config);
2873
9ae529ec
CS
2874 if (state == SPA_LOAD_RECOVER) {
2875 ASSERT3P(loadinfo, ==, NULL);
2876 return (rewind_error);
2877 } else {
2878 /* Store the rewind info as part of the initial load info */
2879 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2880 spa->spa_load_info);
2881
2882 /* Restore the initial load info */
2883 fnvlist_free(spa->spa_load_info);
2884 spa->spa_load_info = loadinfo;
2885
2886 return (load_error);
2887 }
34dc7c2f
BB
2888}
2889
2890/*
2891 * Pool Open/Import
2892 *
2893 * The import case is identical to an open except that the configuration is sent
2894 * down from userland, instead of grabbed from the configuration cache. For the
2895 * case of an open, the pool configuration will exist in the
2896 * POOL_STATE_UNINITIALIZED state.
2897 *
2898 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2899 * the same time open the pool, without having to keep around the spa_t in some
2900 * ambiguous state.
2901 */
2902static int
428870ff
BB
2903spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2904 nvlist_t **config)
34dc7c2f
BB
2905{
2906 spa_t *spa;
572e2857 2907 spa_load_state_t state = SPA_LOAD_OPEN;
34dc7c2f 2908 int error;
34dc7c2f 2909 int locked = B_FALSE;
526af785 2910 int firstopen = B_FALSE;
34dc7c2f
BB
2911
2912 *spapp = NULL;
2913
2914 /*
2915 * As disgusting as this is, we need to support recursive calls to this
2916 * function because dsl_dir_open() is called during spa_load(), and ends
2917 * up calling spa_open() again. The real fix is to figure out how to
2918 * avoid dsl_dir_open() calling this in the first place.
2919 */
2920 if (mutex_owner(&spa_namespace_lock) != curthread) {
2921 mutex_enter(&spa_namespace_lock);
2922 locked = B_TRUE;
2923 }
2924
2925 if ((spa = spa_lookup(pool)) == NULL) {
2926 if (locked)
2927 mutex_exit(&spa_namespace_lock);
2e528b49 2928 return (SET_ERROR(ENOENT));
34dc7c2f 2929 }
428870ff 2930
34dc7c2f 2931 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
428870ff
BB
2932 zpool_rewind_policy_t policy;
2933
526af785
PJD
2934 firstopen = B_TRUE;
2935
428870ff
BB
2936 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2937 &policy);
2938 if (policy.zrp_request & ZPOOL_DO_REWIND)
2939 state = SPA_LOAD_RECOVER;
34dc7c2f 2940
fb5f0bc8 2941 spa_activate(spa, spa_mode_global);
34dc7c2f 2942
428870ff
BB
2943 if (state != SPA_LOAD_RECOVER)
2944 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2945
2946 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2947 policy.zrp_request);
34dc7c2f
BB
2948
2949 if (error == EBADF) {
2950 /*
2951 * If vdev_validate() returns failure (indicated by
2952 * EBADF), it indicates that one of the vdevs indicates
2953 * that the pool has been exported or destroyed. If
2954 * this is the case, the config cache is out of sync and
2955 * we should remove the pool from the namespace.
2956 */
34dc7c2f
BB
2957 spa_unload(spa);
2958 spa_deactivate(spa);
b128c09f 2959 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 2960 spa_remove(spa);
34dc7c2f
BB
2961 if (locked)
2962 mutex_exit(&spa_namespace_lock);
2e528b49 2963 return (SET_ERROR(ENOENT));
34dc7c2f
BB
2964 }
2965
2966 if (error) {
2967 /*
2968 * We can't open the pool, but we still have useful
2969 * information: the state of each vdev after the
2970 * attempted vdev_open(). Return this to the user.
2971 */
572e2857 2972 if (config != NULL && spa->spa_config) {
428870ff 2973 VERIFY(nvlist_dup(spa->spa_config, config,
b8d06fca 2974 KM_PUSHPAGE) == 0);
572e2857
BB
2975 VERIFY(nvlist_add_nvlist(*config,
2976 ZPOOL_CONFIG_LOAD_INFO,
2977 spa->spa_load_info) == 0);
2978 }
34dc7c2f
BB
2979 spa_unload(spa);
2980 spa_deactivate(spa);
428870ff 2981 spa->spa_last_open_failed = error;
34dc7c2f
BB
2982 if (locked)
2983 mutex_exit(&spa_namespace_lock);
2984 *spapp = NULL;
2985 return (error);
34dc7c2f 2986 }
34dc7c2f
BB
2987 }
2988
2989 spa_open_ref(spa, tag);
2990
b128c09f 2991 if (config != NULL)
34dc7c2f 2992 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f 2993
572e2857
BB
2994 /*
2995 * If we've recovered the pool, pass back any information we
2996 * gathered while doing the load.
2997 */
2998 if (state == SPA_LOAD_RECOVER) {
2999 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3000 spa->spa_load_info) == 0);
3001 }
3002
428870ff
BB
3003 if (locked) {
3004 spa->spa_last_open_failed = 0;
3005 spa->spa_last_ubsync_txg = 0;
3006 spa->spa_load_txg = 0;
3007 mutex_exit(&spa_namespace_lock);
3008 }
3009
526af785
PJD
3010#ifdef _KERNEL
3011 if (firstopen)
3012 zvol_create_minors(spa->spa_name);
3013#endif
3014
428870ff
BB
3015 *spapp = spa;
3016
34dc7c2f
BB
3017 return (0);
3018}
3019
428870ff
BB
3020int
3021spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3022 nvlist_t **config)
3023{
3024 return (spa_open_common(name, spapp, tag, policy, config));
3025}
3026
34dc7c2f
BB
3027int
3028spa_open(const char *name, spa_t **spapp, void *tag)
3029{
428870ff 3030 return (spa_open_common(name, spapp, tag, NULL, NULL));
34dc7c2f
BB
3031}
3032
3033/*
3034 * Lookup the given spa_t, incrementing the inject count in the process,
3035 * preventing it from being exported or destroyed.
3036 */
3037spa_t *
3038spa_inject_addref(char *name)
3039{
3040 spa_t *spa;
3041
3042 mutex_enter(&spa_namespace_lock);
3043 if ((spa = spa_lookup(name)) == NULL) {
3044 mutex_exit(&spa_namespace_lock);
3045 return (NULL);
3046 }
3047 spa->spa_inject_ref++;
3048 mutex_exit(&spa_namespace_lock);
3049
3050 return (spa);
3051}
3052
3053void
3054spa_inject_delref(spa_t *spa)
3055{
3056 mutex_enter(&spa_namespace_lock);
3057 spa->spa_inject_ref--;
3058 mutex_exit(&spa_namespace_lock);
3059}
3060
3061/*
3062 * Add spares device information to the nvlist.
3063 */
3064static void
3065spa_add_spares(spa_t *spa, nvlist_t *config)
3066{
3067 nvlist_t **spares;
3068 uint_t i, nspares;
3069 nvlist_t *nvroot;
3070 uint64_t guid;
3071 vdev_stat_t *vs;
3072 uint_t vsc;
3073 uint64_t pool;
3074
9babb374
BB
3075 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3076
34dc7c2f
BB
3077 if (spa->spa_spares.sav_count == 0)
3078 return;
3079
3080 VERIFY(nvlist_lookup_nvlist(config,
3081 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3082 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3083 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3084 if (nspares != 0) {
3085 VERIFY(nvlist_add_nvlist_array(nvroot,
3086 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3087 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3088 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3089
3090 /*
3091 * Go through and find any spares which have since been
3092 * repurposed as an active spare. If this is the case, update
3093 * their status appropriately.
3094 */
3095 for (i = 0; i < nspares; i++) {
3096 VERIFY(nvlist_lookup_uint64(spares[i],
3097 ZPOOL_CONFIG_GUID, &guid) == 0);
b128c09f
BB
3098 if (spa_spare_exists(guid, &pool, NULL) &&
3099 pool != 0ULL) {
34dc7c2f 3100 VERIFY(nvlist_lookup_uint64_array(
428870ff 3101 spares[i], ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3102 (uint64_t **)&vs, &vsc) == 0);
3103 vs->vs_state = VDEV_STATE_CANT_OPEN;
3104 vs->vs_aux = VDEV_AUX_SPARED;
3105 }
3106 }
3107 }
3108}
3109
3110/*
3111 * Add l2cache device information to the nvlist, including vdev stats.
3112 */
3113static void
3114spa_add_l2cache(spa_t *spa, nvlist_t *config)
3115{
3116 nvlist_t **l2cache;
3117 uint_t i, j, nl2cache;
3118 nvlist_t *nvroot;
3119 uint64_t guid;
3120 vdev_t *vd;
3121 vdev_stat_t *vs;
3122 uint_t vsc;
3123
9babb374
BB
3124 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3125
34dc7c2f
BB
3126 if (spa->spa_l2cache.sav_count == 0)
3127 return;
3128
34dc7c2f
BB
3129 VERIFY(nvlist_lookup_nvlist(config,
3130 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3131 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3132 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3133 if (nl2cache != 0) {
3134 VERIFY(nvlist_add_nvlist_array(nvroot,
3135 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3136 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3137 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3138
3139 /*
3140 * Update level 2 cache device stats.
3141 */
3142
3143 for (i = 0; i < nl2cache; i++) {
3144 VERIFY(nvlist_lookup_uint64(l2cache[i],
3145 ZPOOL_CONFIG_GUID, &guid) == 0);
3146
3147 vd = NULL;
3148 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3149 if (guid ==
3150 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3151 vd = spa->spa_l2cache.sav_vdevs[j];
3152 break;
3153 }
3154 }
3155 ASSERT(vd != NULL);
3156
3157 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
428870ff
BB
3158 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3159 == 0);
34dc7c2f
BB
3160 vdev_get_stats(vd, vs);
3161 }
3162 }
34dc7c2f
BB
3163}
3164
9ae529ec
CS
3165static void
3166spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3167{
3168 nvlist_t *features;
3169 zap_cursor_t zc;
3170 zap_attribute_t za;
3171
3172 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3173 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3174
3175 if (spa->spa_feat_for_read_obj != 0) {
3176 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3177 spa->spa_feat_for_read_obj);
3178 zap_cursor_retrieve(&zc, &za) == 0;
3179 zap_cursor_advance(&zc)) {
3180 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3181 za.za_num_integers == 1);
3182 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3183 za.za_first_integer));
3184 }
3185 zap_cursor_fini(&zc);
3186 }
3187
3188 if (spa->spa_feat_for_write_obj != 0) {
3189 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3190 spa->spa_feat_for_write_obj);
3191 zap_cursor_retrieve(&zc, &za) == 0;
3192 zap_cursor_advance(&zc)) {
3193 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3194 za.za_num_integers == 1);
3195 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3196 za.za_first_integer));
3197 }
3198 zap_cursor_fini(&zc);
3199 }
3200
3201 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3202 features) == 0);
3203 nvlist_free(features);
3204}
3205
34dc7c2f 3206int
9ae529ec
CS
3207spa_get_stats(const char *name, nvlist_t **config,
3208 char *altroot, size_t buflen)
34dc7c2f
BB
3209{
3210 int error;
3211 spa_t *spa;
3212
3213 *config = NULL;
428870ff 3214 error = spa_open_common(name, &spa, FTAG, NULL, config);
34dc7c2f 3215
9babb374
BB
3216 if (spa != NULL) {
3217 /*
3218 * This still leaves a window of inconsistency where the spares
3219 * or l2cache devices could change and the config would be
3220 * self-inconsistent.
3221 */
3222 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f 3223
9babb374 3224 if (*config != NULL) {
572e2857
BB
3225 uint64_t loadtimes[2];
3226
3227 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3228 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3229 VERIFY(nvlist_add_uint64_array(*config,
3230 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3231
b128c09f 3232 VERIFY(nvlist_add_uint64(*config,
9babb374
BB
3233 ZPOOL_CONFIG_ERRCOUNT,
3234 spa_get_errlog_size(spa)) == 0);
3235
3236 if (spa_suspended(spa))
3237 VERIFY(nvlist_add_uint64(*config,
3238 ZPOOL_CONFIG_SUSPENDED,
3239 spa->spa_failmode) == 0);
b128c09f 3240
9babb374
BB
3241 spa_add_spares(spa, *config);
3242 spa_add_l2cache(spa, *config);
9ae529ec 3243 spa_add_feature_stats(spa, *config);
9babb374 3244 }
34dc7c2f
BB
3245 }
3246
3247 /*
3248 * We want to get the alternate root even for faulted pools, so we cheat
3249 * and call spa_lookup() directly.
3250 */
3251 if (altroot) {
3252 if (spa == NULL) {
3253 mutex_enter(&spa_namespace_lock);
3254 spa = spa_lookup(name);
3255 if (spa)
3256 spa_altroot(spa, altroot, buflen);
3257 else
3258 altroot[0] = '\0';
3259 spa = NULL;
3260 mutex_exit(&spa_namespace_lock);
3261 } else {
3262 spa_altroot(spa, altroot, buflen);
3263 }
3264 }
3265
9babb374
BB
3266 if (spa != NULL) {
3267 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 3268 spa_close(spa, FTAG);
9babb374 3269 }
34dc7c2f
BB
3270
3271 return (error);
3272}
3273
3274/*
3275 * Validate that the auxiliary device array is well formed. We must have an
3276 * array of nvlists, each which describes a valid leaf vdev. If this is an
3277 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3278 * specified, as long as they are well-formed.
3279 */
3280static int
3281spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3282 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3283 vdev_labeltype_t label)
3284{
3285 nvlist_t **dev;
3286 uint_t i, ndev;
3287 vdev_t *vd;
3288 int error;
3289
b128c09f
BB
3290 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3291
34dc7c2f
BB
3292 /*
3293 * It's acceptable to have no devs specified.
3294 */
3295 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3296 return (0);
3297
3298 if (ndev == 0)
2e528b49 3299 return (SET_ERROR(EINVAL));
34dc7c2f
BB
3300
3301 /*
3302 * Make sure the pool is formatted with a version that supports this
3303 * device type.
3304 */
3305 if (spa_version(spa) < version)
2e528b49 3306 return (SET_ERROR(ENOTSUP));
34dc7c2f
BB
3307
3308 /*
3309 * Set the pending device list so we correctly handle device in-use
3310 * checking.
3311 */
3312 sav->sav_pending = dev;
3313 sav->sav_npending = ndev;
3314
3315 for (i = 0; i < ndev; i++) {
3316 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3317 mode)) != 0)
3318 goto out;
3319
3320 if (!vd->vdev_ops->vdev_op_leaf) {
3321 vdev_free(vd);
2e528b49 3322 error = SET_ERROR(EINVAL);
34dc7c2f
BB
3323 goto out;
3324 }
3325
3326 /*
b128c09f
BB
3327 * The L2ARC currently only supports disk devices in
3328 * kernel context. For user-level testing, we allow it.
34dc7c2f 3329 */
b128c09f 3330#ifdef _KERNEL
34dc7c2f
BB
3331 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3332 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
2e528b49 3333 error = SET_ERROR(ENOTBLK);
5ffb9d1d 3334 vdev_free(vd);
34dc7c2f
BB
3335 goto out;
3336 }
b128c09f 3337#endif
34dc7c2f
BB
3338 vd->vdev_top = vd;
3339
3340 if ((error = vdev_open(vd)) == 0 &&
3341 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3342 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3343 vd->vdev_guid) == 0);
3344 }
3345
3346 vdev_free(vd);
3347
3348 if (error &&
3349 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3350 goto out;
3351 else
3352 error = 0;
3353 }
3354
3355out:
3356 sav->sav_pending = NULL;
3357 sav->sav_npending = 0;
3358 return (error);
3359}
3360
3361static int
3362spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3363{
3364 int error;
3365
b128c09f
BB
3366 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3367
34dc7c2f
BB
3368 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3369 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3370 VDEV_LABEL_SPARE)) != 0) {
3371 return (error);
3372 }
3373
3374 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3375 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3376 VDEV_LABEL_L2CACHE));
3377}
3378
3379static void
3380spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3381 const char *config)
3382{
3383 int i;
3384
3385 if (sav->sav_config != NULL) {
3386 nvlist_t **olddevs;
3387 uint_t oldndevs;
3388 nvlist_t **newdevs;
3389
3390 /*
3391 * Generate new dev list by concatentating with the
3392 * current dev list.
3393 */
3394 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3395 &olddevs, &oldndevs) == 0);
3396
3397 newdevs = kmem_alloc(sizeof (void *) *
b8d06fca 3398 (ndevs + oldndevs), KM_PUSHPAGE);
34dc7c2f
BB
3399 for (i = 0; i < oldndevs; i++)
3400 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
b8d06fca 3401 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3402 for (i = 0; i < ndevs; i++)
3403 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
b8d06fca 3404 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3405
3406 VERIFY(nvlist_remove(sav->sav_config, config,
3407 DATA_TYPE_NVLIST_ARRAY) == 0);
3408
3409 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3410 config, newdevs, ndevs + oldndevs) == 0);
3411 for (i = 0; i < oldndevs + ndevs; i++)
3412 nvlist_free(newdevs[i]);
3413 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3414 } else {
3415 /*
3416 * Generate a new dev list.
3417 */
3418 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
b8d06fca 3419 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3420 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3421 devs, ndevs) == 0);
3422 }
3423}
3424
3425/*
3426 * Stop and drop level 2 ARC devices
3427 */
3428void
3429spa_l2cache_drop(spa_t *spa)
3430{
3431 vdev_t *vd;
3432 int i;
3433 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3434
3435 for (i = 0; i < sav->sav_count; i++) {
3436 uint64_t pool;
3437
3438 vd = sav->sav_vdevs[i];
3439 ASSERT(vd != NULL);
3440
fb5f0bc8
BB
3441 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3442 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 3443 l2arc_remove_vdev(vd);
34dc7c2f
BB
3444 }
3445}
3446
3447/*
3448 * Pool Creation
3449 */
3450int
3451spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
6f1ffb06 3452 nvlist_t *zplprops)
34dc7c2f
BB
3453{
3454 spa_t *spa;
3455 char *altroot = NULL;
3456 vdev_t *rvd;
3457 dsl_pool_t *dp;
3458 dmu_tx_t *tx;
9babb374 3459 int error = 0;
34dc7c2f
BB
3460 uint64_t txg = TXG_INITIAL;
3461 nvlist_t **spares, **l2cache;
3462 uint_t nspares, nl2cache;
428870ff 3463 uint64_t version, obj;
9ae529ec
CS
3464 boolean_t has_features;
3465 nvpair_t *elem;
d6320ddb 3466 int c;
34dc7c2f
BB
3467
3468 /*
3469 * If this pool already exists, return failure.
3470 */
3471 mutex_enter(&spa_namespace_lock);
3472 if (spa_lookup(pool) != NULL) {
3473 mutex_exit(&spa_namespace_lock);
2e528b49 3474 return (SET_ERROR(EEXIST));
34dc7c2f
BB
3475 }
3476
3477 /*
3478 * Allocate a new spa_t structure.
3479 */
3480 (void) nvlist_lookup_string(props,
3481 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
428870ff 3482 spa = spa_add(pool, NULL, altroot);
fb5f0bc8 3483 spa_activate(spa, spa_mode_global);
34dc7c2f 3484
34dc7c2f 3485 if (props && (error = spa_prop_validate(spa, props))) {
34dc7c2f
BB
3486 spa_deactivate(spa);
3487 spa_remove(spa);
b128c09f 3488 mutex_exit(&spa_namespace_lock);
34dc7c2f
BB
3489 return (error);
3490 }
3491
9ae529ec
CS
3492 has_features = B_FALSE;
3493 for (elem = nvlist_next_nvpair(props, NULL);
3494 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3495 if (zpool_prop_feature(nvpair_name(elem)))
3496 has_features = B_TRUE;
3497 }
3498
3499 if (has_features || nvlist_lookup_uint64(props,
3500 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
34dc7c2f 3501 version = SPA_VERSION;
9ae529ec
CS
3502 }
3503 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
428870ff
BB
3504
3505 spa->spa_first_txg = txg;
3506 spa->spa_uberblock.ub_txg = txg - 1;
34dc7c2f
BB
3507 spa->spa_uberblock.ub_version = version;
3508 spa->spa_ubsync = spa->spa_uberblock;
3509
9babb374
BB
3510 /*
3511 * Create "The Godfather" zio to hold all async IOs
3512 */
3513 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
3514 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
3515
34dc7c2f
BB
3516 /*
3517 * Create the root vdev.
3518 */
b128c09f 3519 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
3520
3521 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3522
3523 ASSERT(error != 0 || rvd != NULL);
3524 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3525
3526 if (error == 0 && !zfs_allocatable_devs(nvroot))
2e528b49 3527 error = SET_ERROR(EINVAL);
34dc7c2f
BB
3528
3529 if (error == 0 &&
3530 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3531 (error = spa_validate_aux(spa, nvroot, txg,
3532 VDEV_ALLOC_ADD)) == 0) {
d6320ddb 3533 for (c = 0; c < rvd->vdev_children; c++) {
9babb374
BB
3534 vdev_metaslab_set_size(rvd->vdev_child[c]);
3535 vdev_expand(rvd->vdev_child[c], txg);
3536 }
34dc7c2f
BB
3537 }
3538
b128c09f 3539 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3540
3541 if (error != 0) {
3542 spa_unload(spa);
3543 spa_deactivate(spa);
3544 spa_remove(spa);
3545 mutex_exit(&spa_namespace_lock);
3546 return (error);
3547 }
3548
3549 /*
3550 * Get the list of spares, if specified.
3551 */
3552 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3553 &spares, &nspares) == 0) {
3554 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
b8d06fca 3555 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3556 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3557 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 3558 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3559 spa_load_spares(spa);
b128c09f 3560 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3561 spa->spa_spares.sav_sync = B_TRUE;
3562 }
3563
3564 /*
3565 * Get the list of level 2 cache devices, if specified.
3566 */
3567 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3568 &l2cache, &nl2cache) == 0) {
3569 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
b8d06fca 3570 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
3571 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3572 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 3573 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3574 spa_load_l2cache(spa);
b128c09f 3575 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3576 spa->spa_l2cache.sav_sync = B_TRUE;
3577 }
3578
9ae529ec 3579 spa->spa_is_initializing = B_TRUE;
b128c09f 3580 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
34dc7c2f 3581 spa->spa_meta_objset = dp->dp_meta_objset;
9ae529ec 3582 spa->spa_is_initializing = B_FALSE;
34dc7c2f 3583
428870ff
BB
3584 /*
3585 * Create DDTs (dedup tables).
3586 */
3587 ddt_create(spa);
3588
3589 spa_update_dspace(spa);
3590
34dc7c2f
BB
3591 tx = dmu_tx_create_assigned(dp, txg);
3592
3593 /*
3594 * Create the pool config object.
3595 */
3596 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
b128c09f 3597 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
34dc7c2f
BB
3598 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3599
3600 if (zap_add(spa->spa_meta_objset,
3601 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3602 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3603 cmn_err(CE_PANIC, "failed to add pool config");
3604 }
3605
9ae529ec
CS
3606 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3607 spa_feature_create_zap_objects(spa, tx);
3608
428870ff
BB
3609 if (zap_add(spa->spa_meta_objset,
3610 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3611 sizeof (uint64_t), 1, &version, tx) != 0) {
3612 cmn_err(CE_PANIC, "failed to add pool version");
3613 }
3614
34dc7c2f
BB
3615 /* Newly created pools with the right version are always deflated. */
3616 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3617 spa->spa_deflate = TRUE;
3618 if (zap_add(spa->spa_meta_objset,
3619 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3620 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3621 cmn_err(CE_PANIC, "failed to add deflate");
3622 }
3623 }
3624
3625 /*
428870ff 3626 * Create the deferred-free bpobj. Turn off compression
34dc7c2f
BB
3627 * because sync-to-convergence takes longer if the blocksize
3628 * keeps changing.
3629 */
428870ff
BB
3630 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3631 dmu_object_set_compress(spa->spa_meta_objset, obj,
34dc7c2f 3632 ZIO_COMPRESS_OFF, tx);
34dc7c2f 3633 if (zap_add(spa->spa_meta_objset,
428870ff
BB
3634 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3635 sizeof (uint64_t), 1, &obj, tx) != 0) {
3636 cmn_err(CE_PANIC, "failed to add bpobj");
34dc7c2f 3637 }
428870ff
BB
3638 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3639 spa->spa_meta_objset, obj));
34dc7c2f
BB
3640
3641 /*
3642 * Create the pool's history object.
3643 */
3644 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3645 spa_history_create_obj(spa, tx);
3646
3647 /*
3648 * Set pool properties.
3649 */
3650 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3651 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3652 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
9babb374 3653 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
428870ff 3654
d164b209
BB
3655 if (props != NULL) {
3656 spa_configfile_set(spa, props, B_FALSE);
13fe0198 3657 spa_sync_props(props, tx);
d164b209 3658 }
34dc7c2f
BB
3659
3660 dmu_tx_commit(tx);
3661
3662 spa->spa_sync_on = B_TRUE;
3663 txg_sync_start(spa->spa_dsl_pool);
3664
3665 /*
3666 * We explicitly wait for the first transaction to complete so that our
3667 * bean counters are appropriately updated.
3668 */
3669 txg_wait_synced(spa->spa_dsl_pool, txg);
3670
b128c09f 3671 spa_config_sync(spa, B_FALSE, B_TRUE);
34dc7c2f 3672
6f1ffb06 3673 spa_history_log_version(spa, "create");
34dc7c2f 3674
b128c09f
BB
3675 spa->spa_minref = refcount_count(&spa->spa_refcount);
3676
d164b209
BB
3677 mutex_exit(&spa_namespace_lock);
3678
34dc7c2f
BB
3679 return (0);
3680}
3681
9babb374 3682#ifdef _KERNEL
34dc7c2f 3683/*
9babb374
BB
3684 * Get the root pool information from the root disk, then import the root pool
3685 * during the system boot up time.
34dc7c2f 3686 */
9babb374
BB
3687extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3688
3689static nvlist_t *
3690spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3691{
3692 nvlist_t *config;
3693 nvlist_t *nvtop, *nvroot;
3694 uint64_t pgid;
3695
3696 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3697 return (NULL);
3698
3699 /*
3700 * Add this top-level vdev to the child array.
3701 */
3702 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3703 &nvtop) == 0);
3704 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3705 &pgid) == 0);
3706 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3707
3708 /*
3709 * Put this pool's top-level vdevs into a root vdev.
3710 */
b8d06fca 3711 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
9babb374
BB
3712 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3713 VDEV_TYPE_ROOT) == 0);
3714 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3715 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3716 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3717 &nvtop, 1) == 0);
3718
3719 /*
3720 * Replace the existing vdev_tree with the new root vdev in
3721 * this pool's configuration (remove the old, add the new).
3722 */
3723 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3724 nvlist_free(nvroot);
3725 return (config);
3726}
3727
3728/*
3729 * Walk the vdev tree and see if we can find a device with "better"
3730 * configuration. A configuration is "better" if the label on that
3731 * device has a more recent txg.
3732 */
3733static void
3734spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3735{
d6320ddb
BB
3736 int c;
3737
3738 for (c = 0; c < vd->vdev_children; c++)
9babb374
BB
3739 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3740
3741 if (vd->vdev_ops->vdev_op_leaf) {
3742 nvlist_t *label;
3743 uint64_t label_txg;
3744
3745 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3746 &label) != 0)
3747 return;
3748
3749 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3750 &label_txg) == 0);
3751
3752 /*
3753 * Do we have a better boot device?
3754 */
3755 if (label_txg > *txg) {
3756 *txg = label_txg;
3757 *avd = vd;
3758 }
3759 nvlist_free(label);
3760 }
3761}
3762
3763/*
3764 * Import a root pool.
3765 *
3766 * For x86. devpath_list will consist of devid and/or physpath name of
3767 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3768 * The GRUB "findroot" command will return the vdev we should boot.
3769 *
3770 * For Sparc, devpath_list consists the physpath name of the booting device
3771 * no matter the rootpool is a single device pool or a mirrored pool.
3772 * e.g.
3773 * "/pci@1f,0/ide@d/disk@0,0:a"
3774 */
3775int
3776spa_import_rootpool(char *devpath, char *devid)
3777{
3778 spa_t *spa;
3779 vdev_t *rvd, *bvd, *avd = NULL;
3780 nvlist_t *config, *nvtop;
3781 uint64_t guid, txg;
3782 char *pname;
3783 int error;
3784
3785 /*
3786 * Read the label from the boot device and generate a configuration.
3787 */
428870ff
BB
3788 config = spa_generate_rootconf(devpath, devid, &guid);
3789#if defined(_OBP) && defined(_KERNEL)
3790 if (config == NULL) {
3791 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3792 /* iscsi boot */
3793 get_iscsi_bootpath_phy(devpath);
3794 config = spa_generate_rootconf(devpath, devid, &guid);
3795 }
3796 }
3797#endif
3798 if (config == NULL) {
9ae529ec 3799 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
9babb374 3800 devpath);
2e528b49 3801 return (SET_ERROR(EIO));
9babb374
BB
3802 }
3803
3804 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3805 &pname) == 0);
3806 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3807
3808 mutex_enter(&spa_namespace_lock);
3809 if ((spa = spa_lookup(pname)) != NULL) {
3810 /*
3811 * Remove the existing root pool from the namespace so that we
3812 * can replace it with the correct config we just read in.
3813 */
3814 spa_remove(spa);
3815 }
3816
428870ff 3817 spa = spa_add(pname, config, NULL);
9babb374 3818 spa->spa_is_root = B_TRUE;
572e2857 3819 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
9babb374
BB
3820
3821 /*
3822 * Build up a vdev tree based on the boot device's label config.
3823 */
3824 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3825 &nvtop) == 0);
3826 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3827 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3828 VDEV_ALLOC_ROOTPOOL);
3829 spa_config_exit(spa, SCL_ALL, FTAG);
3830 if (error) {
3831 mutex_exit(&spa_namespace_lock);
3832 nvlist_free(config);
3833 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3834 pname);
3835 return (error);
3836 }
3837
3838 /*
3839 * Get the boot vdev.
3840 */
3841 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3842 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3843 (u_longlong_t)guid);
2e528b49 3844 error = SET_ERROR(ENOENT);
9babb374
BB
3845 goto out;
3846 }
3847
3848 /*
3849 * Determine if there is a better boot device.
3850 */
3851 avd = bvd;
3852 spa_alt_rootvdev(rvd, &avd, &txg);
3853 if (avd != bvd) {
3854 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3855 "try booting from '%s'", avd->vdev_path);
2e528b49 3856 error = SET_ERROR(EINVAL);
9babb374
BB
3857 goto out;
3858 }
3859
3860 /*
3861 * If the boot device is part of a spare vdev then ensure that
3862 * we're booting off the active spare.
3863 */
3864 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3865 !bvd->vdev_isspare) {
3866 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3867 "try booting from '%s'",
572e2857
BB
3868 bvd->vdev_parent->
3869 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
2e528b49 3870 error = SET_ERROR(EINVAL);
9babb374
BB
3871 goto out;
3872 }
3873
9babb374
BB
3874 error = 0;
3875out:
3876 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3877 vdev_free(rvd);
3878 spa_config_exit(spa, SCL_ALL, FTAG);
3879 mutex_exit(&spa_namespace_lock);
3880
3881 nvlist_free(config);
3882 return (error);
3883}
3884
3885#endif
3886
9babb374
BB
3887/*
3888 * Import a non-root pool into the system.
3889 */
3890int
13fe0198 3891spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
34dc7c2f
BB
3892{
3893 spa_t *spa;
3894 char *altroot = NULL;
428870ff
BB
3895 spa_load_state_t state = SPA_LOAD_IMPORT;
3896 zpool_rewind_policy_t policy;
572e2857
BB
3897 uint64_t mode = spa_mode_global;
3898 uint64_t readonly = B_FALSE;
9babb374 3899 int error;
34dc7c2f
BB
3900 nvlist_t *nvroot;
3901 nvlist_t **spares, **l2cache;
3902 uint_t nspares, nl2cache;
34dc7c2f
BB
3903
3904 /*
3905 * If a pool with this name exists, return failure.
3906 */
3907 mutex_enter(&spa_namespace_lock);
428870ff 3908 if (spa_lookup(pool) != NULL) {
9babb374 3909 mutex_exit(&spa_namespace_lock);
2e528b49 3910 return (SET_ERROR(EEXIST));
34dc7c2f
BB
3911 }
3912
3913 /*
3914 * Create and initialize the spa structure.
3915 */
3916 (void) nvlist_lookup_string(props,
3917 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
572e2857
BB
3918 (void) nvlist_lookup_uint64(props,
3919 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3920 if (readonly)
3921 mode = FREAD;
428870ff 3922 spa = spa_add(pool, config, altroot);
572e2857
BB
3923 spa->spa_import_flags = flags;
3924
3925 /*
3926 * Verbatim import - Take a pool and insert it into the namespace
3927 * as if it had been loaded at boot.
3928 */
3929 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
3930 if (props != NULL)
3931 spa_configfile_set(spa, props, B_FALSE);
3932
3933 spa_config_sync(spa, B_FALSE, B_TRUE);
3934
3935 mutex_exit(&spa_namespace_lock);
572e2857
BB
3936 return (0);
3937 }
3938
3939 spa_activate(spa, mode);
34dc7c2f 3940
9babb374
BB
3941 /*
3942 * Don't start async tasks until we know everything is healthy.
3943 */
3944 spa_async_suspend(spa);
b128c09f 3945
572e2857
BB
3946 zpool_get_rewind_policy(config, &policy);
3947 if (policy.zrp_request & ZPOOL_DO_REWIND)
3948 state = SPA_LOAD_RECOVER;
3949
34dc7c2f 3950 /*
9babb374
BB
3951 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
3952 * because the user-supplied config is actually the one to trust when
b128c09f 3953 * doing an import.
34dc7c2f 3954 */
428870ff
BB
3955 if (state != SPA_LOAD_RECOVER)
3956 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
572e2857 3957
428870ff
BB
3958 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
3959 policy.zrp_request);
3960
3961 /*
572e2857
BB
3962 * Propagate anything learned while loading the pool and pass it
3963 * back to caller (i.e. rewind info, missing devices, etc).
428870ff 3964 */
572e2857
BB
3965 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3966 spa->spa_load_info) == 0);
34dc7c2f 3967
b128c09f 3968 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3969 /*
9babb374
BB
3970 * Toss any existing sparelist, as it doesn't have any validity
3971 * anymore, and conflicts with spa_has_spare().
34dc7c2f 3972 */
9babb374 3973 if (spa->spa_spares.sav_config) {
34dc7c2f
BB
3974 nvlist_free(spa->spa_spares.sav_config);
3975 spa->spa_spares.sav_config = NULL;
3976 spa_load_spares(spa);
3977 }
9babb374 3978 if (spa->spa_l2cache.sav_config) {
34dc7c2f
BB
3979 nvlist_free(spa->spa_l2cache.sav_config);
3980 spa->spa_l2cache.sav_config = NULL;
3981 spa_load_l2cache(spa);
3982 }
3983
3984 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3985 &nvroot) == 0);
3986 if (error == 0)
9babb374
BB
3987 error = spa_validate_aux(spa, nvroot, -1ULL,
3988 VDEV_ALLOC_SPARE);
34dc7c2f
BB
3989 if (error == 0)
3990 error = spa_validate_aux(spa, nvroot, -1ULL,
3991 VDEV_ALLOC_L2CACHE);
b128c09f 3992 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 3993
d164b209
BB
3994 if (props != NULL)
3995 spa_configfile_set(spa, props, B_FALSE);
3996
fb5f0bc8
BB
3997 if (error != 0 || (props && spa_writeable(spa) &&
3998 (error = spa_prop_set(spa, props)))) {
9babb374
BB
3999 spa_unload(spa);
4000 spa_deactivate(spa);
4001 spa_remove(spa);
34dc7c2f
BB
4002 mutex_exit(&spa_namespace_lock);
4003 return (error);
4004 }
4005
572e2857
BB
4006 spa_async_resume(spa);
4007
34dc7c2f
BB
4008 /*
4009 * Override any spares and level 2 cache devices as specified by
4010 * the user, as these may have correct device names/devids, etc.
4011 */
4012 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4013 &spares, &nspares) == 0) {
4014 if (spa->spa_spares.sav_config)
4015 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4016 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4017 else
4018 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
b8d06fca 4019 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
4020 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4021 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 4022 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4023 spa_load_spares(spa);
b128c09f 4024 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4025 spa->spa_spares.sav_sync = B_TRUE;
4026 }
4027 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4028 &l2cache, &nl2cache) == 0) {
4029 if (spa->spa_l2cache.sav_config)
4030 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4031 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4032 else
4033 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
b8d06fca 4034 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
4035 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4036 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 4037 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4038 spa_load_l2cache(spa);
b128c09f 4039 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4040 spa->spa_l2cache.sav_sync = B_TRUE;
4041 }
4042
428870ff
BB
4043 /*
4044 * Check for any removed devices.
4045 */
4046 if (spa->spa_autoreplace) {
4047 spa_aux_check_removed(&spa->spa_spares);
4048 spa_aux_check_removed(&spa->spa_l2cache);
4049 }
4050
fb5f0bc8 4051 if (spa_writeable(spa)) {
b128c09f
BB
4052 /*
4053 * Update the config cache to include the newly-imported pool.
4054 */
45d1cae3 4055 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
b128c09f 4056 }
34dc7c2f 4057
34dc7c2f 4058 /*
9babb374
BB
4059 * It's possible that the pool was expanded while it was exported.
4060 * We kick off an async task to handle this for us.
34dc7c2f 4061 */
9babb374 4062 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
b128c09f 4063
9babb374 4064 mutex_exit(&spa_namespace_lock);
6f1ffb06 4065 spa_history_log_version(spa, "import");
b128c09f 4066
526af785
PJD
4067#ifdef _KERNEL
4068 zvol_create_minors(pool);
4069#endif
4070
b128c09f
BB
4071 return (0);
4072}
4073
34dc7c2f
BB
4074nvlist_t *
4075spa_tryimport(nvlist_t *tryconfig)
4076{
4077 nvlist_t *config = NULL;
4078 char *poolname;
4079 spa_t *spa;
4080 uint64_t state;
d164b209 4081 int error;
34dc7c2f
BB
4082
4083 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4084 return (NULL);
4085
4086 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4087 return (NULL);
4088
4089 /*
4090 * Create and initialize the spa structure.
4091 */
4092 mutex_enter(&spa_namespace_lock);
428870ff 4093 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
fb5f0bc8 4094 spa_activate(spa, FREAD);
34dc7c2f
BB
4095
4096 /*
4097 * Pass off the heavy lifting to spa_load().
4098 * Pass TRUE for mosconfig because the user-supplied config
4099 * is actually the one to trust when doing an import.
4100 */
428870ff 4101 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
34dc7c2f
BB
4102
4103 /*
4104 * If 'tryconfig' was at least parsable, return the current config.
4105 */
4106 if (spa->spa_root_vdev != NULL) {
34dc7c2f 4107 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f
BB
4108 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4109 poolname) == 0);
4110 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4111 state) == 0);
4112 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4113 spa->spa_uberblock.ub_timestamp) == 0);
9ae529ec
CS
4114 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4115 spa->spa_load_info) == 0);
ffe9d382
BB
4116 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
4117 spa->spa_errata) == 0);
34dc7c2f
BB
4118
4119 /*
4120 * If the bootfs property exists on this pool then we
4121 * copy it out so that external consumers can tell which
4122 * pools are bootable.
4123 */
d164b209 4124 if ((!error || error == EEXIST) && spa->spa_bootfs) {
b8d06fca 4125 char *tmpname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
34dc7c2f
BB
4126
4127 /*
4128 * We have to play games with the name since the
4129 * pool was opened as TRYIMPORT_NAME.
4130 */
b128c09f 4131 if (dsl_dsobj_to_dsname(spa_name(spa),
34dc7c2f
BB
4132 spa->spa_bootfs, tmpname) == 0) {
4133 char *cp;
d1d7e268
MK
4134 char *dsname;
4135
4136 dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
34dc7c2f
BB
4137
4138 cp = strchr(tmpname, '/');
4139 if (cp == NULL) {
4140 (void) strlcpy(dsname, tmpname,
4141 MAXPATHLEN);
4142 } else {
4143 (void) snprintf(dsname, MAXPATHLEN,
4144 "%s/%s", poolname, ++cp);
4145 }
4146 VERIFY(nvlist_add_string(config,
4147 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4148 kmem_free(dsname, MAXPATHLEN);
4149 }
4150 kmem_free(tmpname, MAXPATHLEN);
4151 }
4152
4153 /*
4154 * Add the list of hot spares and level 2 cache devices.
4155 */
9babb374 4156 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
4157 spa_add_spares(spa, config);
4158 spa_add_l2cache(spa, config);
9babb374 4159 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f
BB
4160 }
4161
4162 spa_unload(spa);
4163 spa_deactivate(spa);
4164 spa_remove(spa);
4165 mutex_exit(&spa_namespace_lock);
4166
4167 return (config);
4168}
4169
4170/*
4171 * Pool export/destroy
4172 *
4173 * The act of destroying or exporting a pool is very simple. We make sure there
4174 * is no more pending I/O and any references to the pool are gone. Then, we
4175 * update the pool state and sync all the labels to disk, removing the
fb5f0bc8
BB
4176 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4177 * we don't sync the labels or remove the configuration cache.
34dc7c2f
BB
4178 */
4179static int
b128c09f 4180spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
fb5f0bc8 4181 boolean_t force, boolean_t hardforce)
34dc7c2f
BB
4182{
4183 spa_t *spa;
4184
4185 if (oldconfig)
4186 *oldconfig = NULL;
4187
fb5f0bc8 4188 if (!(spa_mode_global & FWRITE))
2e528b49 4189 return (SET_ERROR(EROFS));
34dc7c2f
BB
4190
4191 mutex_enter(&spa_namespace_lock);
4192 if ((spa = spa_lookup(pool)) == NULL) {
4193 mutex_exit(&spa_namespace_lock);
2e528b49 4194 return (SET_ERROR(ENOENT));
34dc7c2f
BB
4195 }
4196
4197 /*
4198 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4199 * reacquire the namespace lock, and see if we can export.
4200 */
4201 spa_open_ref(spa, FTAG);
4202 mutex_exit(&spa_namespace_lock);
4203 spa_async_suspend(spa);
4204 mutex_enter(&spa_namespace_lock);
4205 spa_close(spa, FTAG);
4206
4207 /*
4208 * The pool will be in core if it's openable,
4209 * in which case we can modify its state.
4210 */
4211 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4212 /*
4213 * Objsets may be open only because they're dirty, so we
4214 * have to force it to sync before checking spa_refcnt.
4215 */
34dc7c2f
BB
4216 txg_wait_synced(spa->spa_dsl_pool, 0);
4217
4218 /*
4219 * A pool cannot be exported or destroyed if there are active
4220 * references. If we are resetting a pool, allow references by
4221 * fault injection handlers.
4222 */
4223 if (!spa_refcount_zero(spa) ||
4224 (spa->spa_inject_ref != 0 &&
4225 new_state != POOL_STATE_UNINITIALIZED)) {
34dc7c2f
BB
4226 spa_async_resume(spa);
4227 mutex_exit(&spa_namespace_lock);
2e528b49 4228 return (SET_ERROR(EBUSY));
34dc7c2f
BB
4229 }
4230
b128c09f
BB
4231 /*
4232 * A pool cannot be exported if it has an active shared spare.
4233 * This is to prevent other pools stealing the active spare
4234 * from an exported pool. At user's own will, such pool can
4235 * be forcedly exported.
4236 */
4237 if (!force && new_state == POOL_STATE_EXPORTED &&
4238 spa_has_active_shared_spare(spa)) {
4239 spa_async_resume(spa);
4240 mutex_exit(&spa_namespace_lock);
2e528b49 4241 return (SET_ERROR(EXDEV));
b128c09f 4242 }
34dc7c2f
BB
4243
4244 /*
4245 * We want this to be reflected on every label,
4246 * so mark them all dirty. spa_unload() will do the
4247 * final sync that pushes these changes out.
4248 */
fb5f0bc8 4249 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
b128c09f 4250 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4251 spa->spa_state = new_state;
428870ff
BB
4252 spa->spa_final_txg = spa_last_synced_txg(spa) +
4253 TXG_DEFER_SIZE + 1;
34dc7c2f 4254 vdev_config_dirty(spa->spa_root_vdev);
b128c09f 4255 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4256 }
4257 }
4258
26685276 4259 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_DESTROY);
34dc7c2f
BB
4260
4261 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4262 spa_unload(spa);
4263 spa_deactivate(spa);
4264 }
4265
4266 if (oldconfig && spa->spa_config)
4267 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4268
4269 if (new_state != POOL_STATE_UNINITIALIZED) {
fb5f0bc8
BB
4270 if (!hardforce)
4271 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 4272 spa_remove(spa);
34dc7c2f
BB
4273 }
4274 mutex_exit(&spa_namespace_lock);
4275
4276 return (0);
4277}
4278
4279/*
4280 * Destroy a storage pool.
4281 */
4282int
4283spa_destroy(char *pool)
4284{
fb5f0bc8
BB
4285 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4286 B_FALSE, B_FALSE));
34dc7c2f
BB
4287}
4288
4289/*
4290 * Export a storage pool.
4291 */
4292int
fb5f0bc8
BB
4293spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4294 boolean_t hardforce)
34dc7c2f 4295{
fb5f0bc8
BB
4296 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4297 force, hardforce));
34dc7c2f
BB
4298}
4299
4300/*
4301 * Similar to spa_export(), this unloads the spa_t without actually removing it
4302 * from the namespace in any way.
4303 */
4304int
4305spa_reset(char *pool)
4306{
b128c09f 4307 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
fb5f0bc8 4308 B_FALSE, B_FALSE));
34dc7c2f
BB
4309}
4310
34dc7c2f
BB
4311/*
4312 * ==========================================================================
4313 * Device manipulation
4314 * ==========================================================================
4315 */
4316
4317/*
4318 * Add a device to a storage pool.
4319 */
4320int
4321spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4322{
428870ff 4323 uint64_t txg, id;
fb5f0bc8 4324 int error;
34dc7c2f
BB
4325 vdev_t *rvd = spa->spa_root_vdev;
4326 vdev_t *vd, *tvd;
4327 nvlist_t **spares, **l2cache;
4328 uint_t nspares, nl2cache;
d6320ddb 4329 int c;
34dc7c2f 4330
572e2857
BB
4331 ASSERT(spa_writeable(spa));
4332
34dc7c2f
BB
4333 txg = spa_vdev_enter(spa);
4334
4335 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4336 VDEV_ALLOC_ADD)) != 0)
4337 return (spa_vdev_exit(spa, NULL, txg, error));
4338
b128c09f 4339 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
34dc7c2f
BB
4340
4341 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4342 &nspares) != 0)
4343 nspares = 0;
4344
4345 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4346 &nl2cache) != 0)
4347 nl2cache = 0;
4348
b128c09f 4349 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
34dc7c2f 4350 return (spa_vdev_exit(spa, vd, txg, EINVAL));
34dc7c2f 4351
b128c09f
BB
4352 if (vd->vdev_children != 0 &&
4353 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4354 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4355
4356 /*
4357 * We must validate the spares and l2cache devices after checking the
4358 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4359 */
b128c09f 4360 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
34dc7c2f 4361 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4362
4363 /*
4364 * Transfer each new top-level vdev from vd to rvd.
4365 */
d6320ddb 4366 for (c = 0; c < vd->vdev_children; c++) {
428870ff
BB
4367
4368 /*
4369 * Set the vdev id to the first hole, if one exists.
4370 */
4371 for (id = 0; id < rvd->vdev_children; id++) {
4372 if (rvd->vdev_child[id]->vdev_ishole) {
4373 vdev_free(rvd->vdev_child[id]);
4374 break;
4375 }
4376 }
34dc7c2f
BB
4377 tvd = vd->vdev_child[c];
4378 vdev_remove_child(vd, tvd);
428870ff 4379 tvd->vdev_id = id;
34dc7c2f
BB
4380 vdev_add_child(rvd, tvd);
4381 vdev_config_dirty(tvd);
4382 }
4383
4384 if (nspares != 0) {
4385 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4386 ZPOOL_CONFIG_SPARES);
4387 spa_load_spares(spa);
4388 spa->spa_spares.sav_sync = B_TRUE;
4389 }
4390
4391 if (nl2cache != 0) {
4392 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4393 ZPOOL_CONFIG_L2CACHE);
4394 spa_load_l2cache(spa);
4395 spa->spa_l2cache.sav_sync = B_TRUE;
4396 }
4397
4398 /*
4399 * We have to be careful when adding new vdevs to an existing pool.
4400 * If other threads start allocating from these vdevs before we
4401 * sync the config cache, and we lose power, then upon reboot we may
4402 * fail to open the pool because there are DVAs that the config cache
4403 * can't translate. Therefore, we first add the vdevs without
4404 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4405 * and then let spa_config_update() initialize the new metaslabs.
4406 *
4407 * spa_load() checks for added-but-not-initialized vdevs, so that
4408 * if we lose power at any point in this sequence, the remaining
4409 * steps will be completed the next time we load the pool.
4410 */
4411 (void) spa_vdev_exit(spa, vd, txg, 0);
4412
4413 mutex_enter(&spa_namespace_lock);
4414 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4415 mutex_exit(&spa_namespace_lock);
4416
4417 return (0);
4418}
4419
4420/*
4421 * Attach a device to a mirror. The arguments are the path to any device
4422 * in the mirror, and the nvroot for the new device. If the path specifies
4423 * a device that is not mirrored, we automatically insert the mirror vdev.
4424 *
4425 * If 'replacing' is specified, the new device is intended to replace the
4426 * existing device; in this case the two devices are made into their own
4427 * mirror using the 'replacing' vdev, which is functionally identical to
4428 * the mirror vdev (it actually reuses all the same ops) but has a few
4429 * extra rules: you can't attach to it after it's been created, and upon
4430 * completion of resilvering, the first disk (the one being replaced)
4431 * is automatically detached.
4432 */
4433int
4434spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4435{
428870ff 4436 uint64_t txg, dtl_max_txg;
34dc7c2f
BB
4437 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4438 vdev_ops_t *pvops;
b128c09f
BB
4439 char *oldvdpath, *newvdpath;
4440 int newvd_isspare;
4441 int error;
2e528b49 4442 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
34dc7c2f 4443
572e2857
BB
4444 ASSERT(spa_writeable(spa));
4445
34dc7c2f
BB
4446 txg = spa_vdev_enter(spa);
4447
b128c09f 4448 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4449
4450 if (oldvd == NULL)
4451 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4452
4453 if (!oldvd->vdev_ops->vdev_op_leaf)
4454 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4455
4456 pvd = oldvd->vdev_parent;
4457
4458 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
5ffb9d1d 4459 VDEV_ALLOC_ATTACH)) != 0)
34dc7c2f
BB
4460 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4461
4462 if (newrootvd->vdev_children != 1)
4463 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4464
4465 newvd = newrootvd->vdev_child[0];
4466
4467 if (!newvd->vdev_ops->vdev_op_leaf)
4468 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4469
4470 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4471 return (spa_vdev_exit(spa, newrootvd, txg, error));
4472
4473 /*
4474 * Spares can't replace logs
4475 */
b128c09f 4476 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
34dc7c2f
BB
4477 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4478
4479 if (!replacing) {
4480 /*
4481 * For attach, the only allowable parent is a mirror or the root
4482 * vdev.
4483 */
4484 if (pvd->vdev_ops != &vdev_mirror_ops &&
4485 pvd->vdev_ops != &vdev_root_ops)
4486 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4487
4488 pvops = &vdev_mirror_ops;
4489 } else {
4490 /*
4491 * Active hot spares can only be replaced by inactive hot
4492 * spares.
4493 */
4494 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857 4495 oldvd->vdev_isspare &&
34dc7c2f
BB
4496 !spa_has_spare(spa, newvd->vdev_guid))
4497 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4498
4499 /*
4500 * If the source is a hot spare, and the parent isn't already a
4501 * spare, then we want to create a new hot spare. Otherwise, we
4502 * want to create a replacing vdev. The user is not allowed to
4503 * attach to a spared vdev child unless the 'isspare' state is
4504 * the same (spare replaces spare, non-spare replaces
4505 * non-spare).
4506 */
572e2857
BB
4507 if (pvd->vdev_ops == &vdev_replacing_ops &&
4508 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
34dc7c2f 4509 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4510 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4511 newvd->vdev_isspare != oldvd->vdev_isspare) {
34dc7c2f 4512 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4513 }
4514
4515 if (newvd->vdev_isspare)
34dc7c2f
BB
4516 pvops = &vdev_spare_ops;
4517 else
4518 pvops = &vdev_replacing_ops;
4519 }
4520
4521 /*
9babb374 4522 * Make sure the new device is big enough.
34dc7c2f 4523 */
9babb374 4524 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
34dc7c2f
BB
4525 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4526
4527 /*
4528 * The new device cannot have a higher alignment requirement
4529 * than the top-level vdev.
4530 */
4531 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4532 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4533
4534 /*
4535 * If this is an in-place replacement, update oldvd's path and devid
4536 * to make it distinguishable from newvd, and unopenable from now on.
4537 */
4538 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4539 spa_strfree(oldvd->vdev_path);
4540 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
b8d06fca 4541 KM_PUSHPAGE);
34dc7c2f
BB
4542 (void) sprintf(oldvd->vdev_path, "%s/%s",
4543 newvd->vdev_path, "old");
4544 if (oldvd->vdev_devid != NULL) {
4545 spa_strfree(oldvd->vdev_devid);
4546 oldvd->vdev_devid = NULL;
4547 }
4548 }
4549
572e2857 4550 /* mark the device being resilvered */
5d1f7fb6 4551 newvd->vdev_resilver_txg = txg;
572e2857 4552
34dc7c2f
BB
4553 /*
4554 * If the parent is not a mirror, or if we're replacing, insert the new
4555 * mirror/replacing/spare vdev above oldvd.
4556 */
4557 if (pvd->vdev_ops != pvops)
4558 pvd = vdev_add_parent(oldvd, pvops);
4559
4560 ASSERT(pvd->vdev_top->vdev_parent == rvd);
4561 ASSERT(pvd->vdev_ops == pvops);
4562 ASSERT(oldvd->vdev_parent == pvd);
4563
4564 /*
4565 * Extract the new device from its root and add it to pvd.
4566 */
4567 vdev_remove_child(newrootvd, newvd);
4568 newvd->vdev_id = pvd->vdev_children;
428870ff 4569 newvd->vdev_crtxg = oldvd->vdev_crtxg;
34dc7c2f
BB
4570 vdev_add_child(pvd, newvd);
4571
34dc7c2f
BB
4572 tvd = newvd->vdev_top;
4573 ASSERT(pvd->vdev_top == tvd);
4574 ASSERT(tvd->vdev_parent == rvd);
4575
4576 vdev_config_dirty(tvd);
4577
4578 /*
428870ff
BB
4579 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4580 * for any dmu_sync-ed blocks. It will propagate upward when
4581 * spa_vdev_exit() calls vdev_dtl_reassess().
34dc7c2f 4582 */
428870ff 4583 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
34dc7c2f 4584
428870ff
BB
4585 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4586 dtl_max_txg - TXG_INITIAL);
34dc7c2f 4587
9babb374 4588 if (newvd->vdev_isspare) {
34dc7c2f 4589 spa_spare_activate(newvd);
26685276 4590 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_DEVICE_SPARE);
9babb374
BB
4591 }
4592
b128c09f
BB
4593 oldvdpath = spa_strdup(oldvd->vdev_path);
4594 newvdpath = spa_strdup(newvd->vdev_path);
4595 newvd_isspare = newvd->vdev_isspare;
34dc7c2f
BB
4596
4597 /*
4598 * Mark newvd's DTL dirty in this txg.
4599 */
4600 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4601
428870ff 4602 /*
93cf2076
GW
4603 * Schedule the resilver to restart in the future. We do this to
4604 * ensure that dmu_sync-ed blocks have been stitched into the
4605 * respective datasets.
428870ff
BB
4606 */
4607 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4608
4609 /*
4610 * Commit the config
4611 */
4612 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
34dc7c2f 4613
6f1ffb06 4614 spa_history_log_internal(spa, "vdev attach", NULL,
428870ff 4615 "%s vdev=%s %s vdev=%s",
45d1cae3
BB
4616 replacing && newvd_isspare ? "spare in" :
4617 replacing ? "replace" : "attach", newvdpath,
4618 replacing ? "for" : "to", oldvdpath);
b128c09f
BB
4619
4620 spa_strfree(oldvdpath);
4621 spa_strfree(newvdpath);
4622
572e2857 4623 if (spa->spa_bootfs)
26685276 4624 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_BOOTFS_VDEV_ATTACH);
572e2857 4625
34dc7c2f
BB
4626 return (0);
4627}
4628
4629/*
4630 * Detach a device from a mirror or replacing vdev.
d3cc8b15 4631 *
34dc7c2f
BB
4632 * If 'replace_done' is specified, only detach if the parent
4633 * is a replacing vdev.
4634 */
4635int
fb5f0bc8 4636spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
34dc7c2f
BB
4637{
4638 uint64_t txg;
fb5f0bc8 4639 int error;
34dc7c2f
BB
4640 vdev_t *vd, *pvd, *cvd, *tvd;
4641 boolean_t unspare = B_FALSE;
d4ed6673 4642 uint64_t unspare_guid = 0;
428870ff 4643 char *vdpath;
d6320ddb 4644 int c, t;
2e528b49 4645 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
572e2857
BB
4646 ASSERT(spa_writeable(spa));
4647
34dc7c2f
BB
4648 txg = spa_vdev_enter(spa);
4649
b128c09f 4650 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4651
4652 if (vd == NULL)
4653 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4654
4655 if (!vd->vdev_ops->vdev_op_leaf)
4656 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4657
4658 pvd = vd->vdev_parent;
4659
fb5f0bc8
BB
4660 /*
4661 * If the parent/child relationship is not as expected, don't do it.
4662 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4663 * vdev that's replacing B with C. The user's intent in replacing
4664 * is to go from M(A,B) to M(A,C). If the user decides to cancel
4665 * the replace by detaching C, the expected behavior is to end up
4666 * M(A,B). But suppose that right after deciding to detach C,
4667 * the replacement of B completes. We would have M(A,C), and then
4668 * ask to detach C, which would leave us with just A -- not what
4669 * the user wanted. To prevent this, we make sure that the
4670 * parent/child relationship hasn't changed -- in this example,
4671 * that C's parent is still the replacing vdev R.
4672 */
4673 if (pvd->vdev_guid != pguid && pguid != 0)
4674 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4675
34dc7c2f 4676 /*
572e2857 4677 * Only 'replacing' or 'spare' vdevs can be replaced.
34dc7c2f 4678 */
572e2857
BB
4679 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4680 pvd->vdev_ops != &vdev_spare_ops)
4681 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
34dc7c2f
BB
4682
4683 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4684 spa_version(spa) >= SPA_VERSION_SPARES);
4685
4686 /*
4687 * Only mirror, replacing, and spare vdevs support detach.
4688 */
4689 if (pvd->vdev_ops != &vdev_replacing_ops &&
4690 pvd->vdev_ops != &vdev_mirror_ops &&
4691 pvd->vdev_ops != &vdev_spare_ops)
4692 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4693
4694 /*
fb5f0bc8
BB
4695 * If this device has the only valid copy of some data,
4696 * we cannot safely detach it.
34dc7c2f 4697 */
fb5f0bc8 4698 if (vdev_dtl_required(vd))
34dc7c2f
BB
4699 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4700
fb5f0bc8 4701 ASSERT(pvd->vdev_children >= 2);
34dc7c2f 4702
b128c09f
BB
4703 /*
4704 * If we are detaching the second disk from a replacing vdev, then
4705 * check to see if we changed the original vdev's path to have "/old"
4706 * at the end in spa_vdev_attach(). If so, undo that change now.
4707 */
572e2857
BB
4708 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4709 vd->vdev_path != NULL) {
4710 size_t len = strlen(vd->vdev_path);
4711
d6320ddb 4712 for (c = 0; c < pvd->vdev_children; c++) {
572e2857
BB
4713 cvd = pvd->vdev_child[c];
4714
4715 if (cvd == vd || cvd->vdev_path == NULL)
4716 continue;
4717
4718 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4719 strcmp(cvd->vdev_path + len, "/old") == 0) {
4720 spa_strfree(cvd->vdev_path);
4721 cvd->vdev_path = spa_strdup(vd->vdev_path);
4722 break;
4723 }
b128c09f
BB
4724 }
4725 }
4726
34dc7c2f
BB
4727 /*
4728 * If we are detaching the original disk from a spare, then it implies
4729 * that the spare should become a real disk, and be removed from the
4730 * active spare list for the pool.
4731 */
4732 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857
BB
4733 vd->vdev_id == 0 &&
4734 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
34dc7c2f
BB
4735 unspare = B_TRUE;
4736
4737 /*
4738 * Erase the disk labels so the disk can be used for other things.
4739 * This must be done after all other error cases are handled,
4740 * but before we disembowel vd (so we can still do I/O to it).
4741 * But if we can't do it, don't treat the error as fatal --
4742 * it may be that the unwritability of the disk is the reason
4743 * it's being detached!
4744 */
4745 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4746
4747 /*
4748 * Remove vd from its parent and compact the parent's children.
4749 */
4750 vdev_remove_child(pvd, vd);
4751 vdev_compact_children(pvd);
4752
4753 /*
4754 * Remember one of the remaining children so we can get tvd below.
4755 */
572e2857 4756 cvd = pvd->vdev_child[pvd->vdev_children - 1];
34dc7c2f
BB
4757
4758 /*
4759 * If we need to remove the remaining child from the list of hot spares,
fb5f0bc8
BB
4760 * do it now, marking the vdev as no longer a spare in the process.
4761 * We must do this before vdev_remove_parent(), because that can
4762 * change the GUID if it creates a new toplevel GUID. For a similar
4763 * reason, we must remove the spare now, in the same txg as the detach;
4764 * otherwise someone could attach a new sibling, change the GUID, and
4765 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
34dc7c2f
BB
4766 */
4767 if (unspare) {
4768 ASSERT(cvd->vdev_isspare);
4769 spa_spare_remove(cvd);
4770 unspare_guid = cvd->vdev_guid;
fb5f0bc8 4771 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
572e2857 4772 cvd->vdev_unspare = B_TRUE;
34dc7c2f
BB
4773 }
4774
428870ff
BB
4775 /*
4776 * If the parent mirror/replacing vdev only has one child,
4777 * the parent is no longer needed. Remove it from the tree.
4778 */
572e2857
BB
4779 if (pvd->vdev_children == 1) {
4780 if (pvd->vdev_ops == &vdev_spare_ops)
4781 cvd->vdev_unspare = B_FALSE;
428870ff 4782 vdev_remove_parent(cvd);
572e2857
BB
4783 }
4784
428870ff
BB
4785
4786 /*
4787 * We don't set tvd until now because the parent we just removed
4788 * may have been the previous top-level vdev.
4789 */
4790 tvd = cvd->vdev_top;
4791 ASSERT(tvd->vdev_parent == rvd);
4792
4793 /*
4794 * Reevaluate the parent vdev state.
4795 */
4796 vdev_propagate_state(cvd);
4797
4798 /*
4799 * If the 'autoexpand' property is set on the pool then automatically
4800 * try to expand the size of the pool. For example if the device we
4801 * just detached was smaller than the others, it may be possible to
4802 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4803 * first so that we can obtain the updated sizes of the leaf vdevs.
4804 */
4805 if (spa->spa_autoexpand) {
4806 vdev_reopen(tvd);
4807 vdev_expand(tvd, txg);
4808 }
4809
4810 vdev_config_dirty(tvd);
4811
4812 /*
4813 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
4814 * vd->vdev_detached is set and free vd's DTL object in syncing context.
4815 * But first make sure we're not on any *other* txg's DTL list, to
4816 * prevent vd from being accessed after it's freed.
4817 */
4818 vdpath = spa_strdup(vd->vdev_path);
d6320ddb 4819 for (t = 0; t < TXG_SIZE; t++)
428870ff
BB
4820 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4821 vd->vdev_detached = B_TRUE;
4822 vdev_dirty(tvd, VDD_DTL, vd, txg);
4823
26685276 4824 spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_REMOVE);
428870ff 4825
572e2857
BB
4826 /* hang on to the spa before we release the lock */
4827 spa_open_ref(spa, FTAG);
4828
428870ff
BB
4829 error = spa_vdev_exit(spa, vd, txg, 0);
4830
6f1ffb06 4831 spa_history_log_internal(spa, "detach", NULL,
428870ff
BB
4832 "vdev=%s", vdpath);
4833 spa_strfree(vdpath);
4834
4835 /*
4836 * If this was the removal of the original device in a hot spare vdev,
4837 * then we want to go through and remove the device from the hot spare
4838 * list of every other pool.
4839 */
4840 if (unspare) {
572e2857
BB
4841 spa_t *altspa = NULL;
4842
428870ff 4843 mutex_enter(&spa_namespace_lock);
572e2857
BB
4844 while ((altspa = spa_next(altspa)) != NULL) {
4845 if (altspa->spa_state != POOL_STATE_ACTIVE ||
4846 altspa == spa)
428870ff 4847 continue;
572e2857
BB
4848
4849 spa_open_ref(altspa, FTAG);
428870ff 4850 mutex_exit(&spa_namespace_lock);
572e2857 4851 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
428870ff 4852 mutex_enter(&spa_namespace_lock);
572e2857 4853 spa_close(altspa, FTAG);
428870ff
BB
4854 }
4855 mutex_exit(&spa_namespace_lock);
572e2857
BB
4856
4857 /* search the rest of the vdevs for spares to remove */
4858 spa_vdev_resilver_done(spa);
428870ff
BB
4859 }
4860
572e2857
BB
4861 /* all done with the spa; OK to release */
4862 mutex_enter(&spa_namespace_lock);
4863 spa_close(spa, FTAG);
4864 mutex_exit(&spa_namespace_lock);
4865
428870ff
BB
4866 return (error);
4867}
4868
4869/*
4870 * Split a set of devices from their mirrors, and create a new pool from them.
4871 */
4872int
4873spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4874 nvlist_t *props, boolean_t exp)
4875{
4876 int error = 0;
4877 uint64_t txg, *glist;
4878 spa_t *newspa;
4879 uint_t c, children, lastlog;
4880 nvlist_t **child, *nvl, *tmp;
4881 dmu_tx_t *tx;
4882 char *altroot = NULL;
4883 vdev_t *rvd, **vml = NULL; /* vdev modify list */
4884 boolean_t activate_slog;
4885
572e2857 4886 ASSERT(spa_writeable(spa));
428870ff
BB
4887
4888 txg = spa_vdev_enter(spa);
4889
4890 /* clear the log and flush everything up to now */
4891 activate_slog = spa_passivate_log(spa);
4892 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4893 error = spa_offline_log(spa);
4894 txg = spa_vdev_config_enter(spa);
4895
4896 if (activate_slog)
4897 spa_activate_log(spa);
4898
4899 if (error != 0)
4900 return (spa_vdev_exit(spa, NULL, txg, error));
4901
4902 /* check new spa name before going any further */
4903 if (spa_lookup(newname) != NULL)
4904 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
4905
4906 /*
4907 * scan through all the children to ensure they're all mirrors
4908 */
4909 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
4910 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
4911 &children) != 0)
4912 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4913
4914 /* first, check to ensure we've got the right child count */
4915 rvd = spa->spa_root_vdev;
4916 lastlog = 0;
4917 for (c = 0; c < rvd->vdev_children; c++) {
4918 vdev_t *vd = rvd->vdev_child[c];
4919
4920 /* don't count the holes & logs as children */
4921 if (vd->vdev_islog || vd->vdev_ishole) {
4922 if (lastlog == 0)
4923 lastlog = c;
4924 continue;
4925 }
4926
4927 lastlog = 0;
4928 }
4929 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
4930 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4931
4932 /* next, ensure no spare or cache devices are part of the split */
4933 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
4934 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
4935 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4936
b8d06fca
RY
4937 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_PUSHPAGE);
4938 glist = kmem_zalloc(children * sizeof (uint64_t), KM_PUSHPAGE);
428870ff
BB
4939
4940 /* then, loop over each vdev and validate it */
4941 for (c = 0; c < children; c++) {
4942 uint64_t is_hole = 0;
4943
4944 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4945 &is_hole);
4946
4947 if (is_hole != 0) {
4948 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
4949 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
4950 continue;
4951 } else {
2e528b49 4952 error = SET_ERROR(EINVAL);
428870ff
BB
4953 break;
4954 }
4955 }
4956
4957 /* which disk is going to be split? */
4958 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4959 &glist[c]) != 0) {
2e528b49 4960 error = SET_ERROR(EINVAL);
428870ff
BB
4961 break;
4962 }
4963
4964 /* look it up in the spa */
4965 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
4966 if (vml[c] == NULL) {
2e528b49 4967 error = SET_ERROR(ENODEV);
428870ff
BB
4968 break;
4969 }
4970
4971 /* make sure there's nothing stopping the split */
4972 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
4973 vml[c]->vdev_islog ||
4974 vml[c]->vdev_ishole ||
4975 vml[c]->vdev_isspare ||
4976 vml[c]->vdev_isl2cache ||
4977 !vdev_writeable(vml[c]) ||
4978 vml[c]->vdev_children != 0 ||
4979 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
4980 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
2e528b49 4981 error = SET_ERROR(EINVAL);
428870ff
BB
4982 break;
4983 }
4984
4985 if (vdev_dtl_required(vml[c])) {
2e528b49 4986 error = SET_ERROR(EBUSY);
428870ff
BB
4987 break;
4988 }
4989
4990 /* we need certain info from the top level */
4991 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
4992 vml[c]->vdev_top->vdev_ms_array) == 0);
4993 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
4994 vml[c]->vdev_top->vdev_ms_shift) == 0);
4995 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
4996 vml[c]->vdev_top->vdev_asize) == 0);
4997 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
4998 vml[c]->vdev_top->vdev_ashift) == 0);
4999 }
5000
5001 if (error != 0) {
5002 kmem_free(vml, children * sizeof (vdev_t *));
5003 kmem_free(glist, children * sizeof (uint64_t));
5004 return (spa_vdev_exit(spa, NULL, txg, error));
5005 }
5006
5007 /* stop writers from using the disks */
5008 for (c = 0; c < children; c++) {
5009 if (vml[c] != NULL)
5010 vml[c]->vdev_offline = B_TRUE;
5011 }
5012 vdev_reopen(spa->spa_root_vdev);
34dc7c2f
BB
5013
5014 /*
428870ff
BB
5015 * Temporarily record the splitting vdevs in the spa config. This
5016 * will disappear once the config is regenerated.
34dc7c2f 5017 */
b8d06fca 5018 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
428870ff
BB
5019 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5020 glist, children) == 0);
5021 kmem_free(glist, children * sizeof (uint64_t));
34dc7c2f 5022
428870ff
BB
5023 mutex_enter(&spa->spa_props_lock);
5024 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5025 nvl) == 0);
5026 mutex_exit(&spa->spa_props_lock);
5027 spa->spa_config_splitting = nvl;
5028 vdev_config_dirty(spa->spa_root_vdev);
5029
5030 /* configure and create the new pool */
5031 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5032 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5033 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5034 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5035 spa_version(spa)) == 0);
5036 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5037 spa->spa_config_txg) == 0);
5038 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5039 spa_generate_guid(NULL)) == 0);
5040 (void) nvlist_lookup_string(props,
5041 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
34dc7c2f 5042
428870ff
BB
5043 /* add the new pool to the namespace */
5044 newspa = spa_add(newname, config, altroot);
5045 newspa->spa_config_txg = spa->spa_config_txg;
5046 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5047
5048 /* release the spa config lock, retaining the namespace lock */
5049 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5050
5051 if (zio_injection_enabled)
5052 zio_handle_panic_injection(spa, FTAG, 1);
5053
5054 spa_activate(newspa, spa_mode_global);
5055 spa_async_suspend(newspa);
5056
5057 /* create the new pool from the disks of the original pool */
5058 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5059 if (error)
5060 goto out;
5061
5062 /* if that worked, generate a real config for the new pool */
5063 if (newspa->spa_root_vdev != NULL) {
5064 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
b8d06fca 5065 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
428870ff
BB
5066 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5067 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5068 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5069 B_TRUE));
9babb374 5070 }
34dc7c2f 5071
428870ff
BB
5072 /* set the props */
5073 if (props != NULL) {
5074 spa_configfile_set(newspa, props, B_FALSE);
5075 error = spa_prop_set(newspa, props);
5076 if (error)
5077 goto out;
5078 }
34dc7c2f 5079
428870ff
BB
5080 /* flush everything */
5081 txg = spa_vdev_config_enter(newspa);
5082 vdev_config_dirty(newspa->spa_root_vdev);
5083 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
34dc7c2f 5084
428870ff
BB
5085 if (zio_injection_enabled)
5086 zio_handle_panic_injection(spa, FTAG, 2);
34dc7c2f 5087
428870ff 5088 spa_async_resume(newspa);
34dc7c2f 5089
428870ff
BB
5090 /* finally, update the original pool's config */
5091 txg = spa_vdev_config_enter(spa);
5092 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5093 error = dmu_tx_assign(tx, TXG_WAIT);
5094 if (error != 0)
5095 dmu_tx_abort(tx);
5096 for (c = 0; c < children; c++) {
5097 if (vml[c] != NULL) {
5098 vdev_split(vml[c]);
5099 if (error == 0)
6f1ffb06
MA
5100 spa_history_log_internal(spa, "detach", tx,
5101 "vdev=%s", vml[c]->vdev_path);
428870ff 5102 vdev_free(vml[c]);
34dc7c2f 5103 }
34dc7c2f 5104 }
428870ff
BB
5105 vdev_config_dirty(spa->spa_root_vdev);
5106 spa->spa_config_splitting = NULL;
5107 nvlist_free(nvl);
5108 if (error == 0)
5109 dmu_tx_commit(tx);
5110 (void) spa_vdev_exit(spa, NULL, txg, 0);
5111
5112 if (zio_injection_enabled)
5113 zio_handle_panic_injection(spa, FTAG, 3);
5114
5115 /* split is complete; log a history record */
6f1ffb06
MA
5116 spa_history_log_internal(newspa, "split", NULL,
5117 "from pool %s", spa_name(spa));
428870ff
BB
5118
5119 kmem_free(vml, children * sizeof (vdev_t *));
5120
5121 /* if we're not going to mount the filesystems in userland, export */
5122 if (exp)
5123 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5124 B_FALSE, B_FALSE);
5125
5126 return (error);
5127
5128out:
5129 spa_unload(newspa);
5130 spa_deactivate(newspa);
5131 spa_remove(newspa);
5132
5133 txg = spa_vdev_config_enter(spa);
5134
5135 /* re-online all offlined disks */
5136 for (c = 0; c < children; c++) {
5137 if (vml[c] != NULL)
5138 vml[c]->vdev_offline = B_FALSE;
5139 }
5140 vdev_reopen(spa->spa_root_vdev);
5141
5142 nvlist_free(spa->spa_config_splitting);
5143 spa->spa_config_splitting = NULL;
5144 (void) spa_vdev_exit(spa, NULL, txg, error);
34dc7c2f 5145
428870ff 5146 kmem_free(vml, children * sizeof (vdev_t *));
34dc7c2f
BB
5147 return (error);
5148}
5149
b128c09f
BB
5150static nvlist_t *
5151spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
34dc7c2f 5152{
d6320ddb
BB
5153 int i;
5154
5155 for (i = 0; i < count; i++) {
b128c09f 5156 uint64_t guid;
34dc7c2f 5157
b128c09f
BB
5158 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5159 &guid) == 0);
34dc7c2f 5160
b128c09f
BB
5161 if (guid == target_guid)
5162 return (nvpp[i]);
34dc7c2f
BB
5163 }
5164
b128c09f 5165 return (NULL);
34dc7c2f
BB
5166}
5167
b128c09f
BB
5168static void
5169spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5170 nvlist_t *dev_to_remove)
34dc7c2f 5171{
b128c09f 5172 nvlist_t **newdev = NULL;
d6320ddb 5173 int i, j;
34dc7c2f 5174
b128c09f 5175 if (count > 1)
b8d06fca 5176 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_PUSHPAGE);
34dc7c2f 5177
d6320ddb 5178 for (i = 0, j = 0; i < count; i++) {
b128c09f
BB
5179 if (dev[i] == dev_to_remove)
5180 continue;
b8d06fca 5181 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_PUSHPAGE) == 0);
34dc7c2f
BB
5182 }
5183
b128c09f
BB
5184 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5185 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
34dc7c2f 5186
d6320ddb 5187 for (i = 0; i < count - 1; i++)
b128c09f 5188 nvlist_free(newdev[i]);
34dc7c2f 5189
b128c09f
BB
5190 if (count > 1)
5191 kmem_free(newdev, (count - 1) * sizeof (void *));
34dc7c2f
BB
5192}
5193
428870ff
BB
5194/*
5195 * Evacuate the device.
5196 */
5197static int
5198spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5199{
5200 uint64_t txg;
5201 int error = 0;
5202
5203 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5204 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5205 ASSERT(vd == vd->vdev_top);
5206
5207 /*
5208 * Evacuate the device. We don't hold the config lock as writer
5209 * since we need to do I/O but we do keep the
5210 * spa_namespace_lock held. Once this completes the device
5211 * should no longer have any blocks allocated on it.
5212 */
5213 if (vd->vdev_islog) {
5214 if (vd->vdev_stat.vs_alloc != 0)
5215 error = spa_offline_log(spa);
5216 } else {
2e528b49 5217 error = SET_ERROR(ENOTSUP);
428870ff
BB
5218 }
5219
5220 if (error)
5221 return (error);
5222
5223 /*
5224 * The evacuation succeeded. Remove any remaining MOS metadata
5225 * associated with this vdev, and wait for these changes to sync.
5226 */
c99c9001 5227 ASSERT0(vd->vdev_stat.vs_alloc);
428870ff
BB
5228 txg = spa_vdev_config_enter(spa);
5229 vd->vdev_removing = B_TRUE;
93cf2076 5230 vdev_dirty_leaves(vd, VDD_DTL, txg);
428870ff
BB
5231 vdev_config_dirty(vd);
5232 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5233
5234 return (0);
5235}
5236
5237/*
5238 * Complete the removal by cleaning up the namespace.
5239 */
5240static void
5241spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5242{
5243 vdev_t *rvd = spa->spa_root_vdev;
5244 uint64_t id = vd->vdev_id;
5245 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5246
5247 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5248 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5249 ASSERT(vd == vd->vdev_top);
5250
5251 /*
5252 * Only remove any devices which are empty.
5253 */
5254 if (vd->vdev_stat.vs_alloc != 0)
5255 return;
5256
5257 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5258
5259 if (list_link_active(&vd->vdev_state_dirty_node))
5260 vdev_state_clean(vd);
5261 if (list_link_active(&vd->vdev_config_dirty_node))
5262 vdev_config_clean(vd);
5263
5264 vdev_free(vd);
5265
5266 if (last_vdev) {
5267 vdev_compact_children(rvd);
5268 } else {
5269 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5270 vdev_add_child(rvd, vd);
5271 }
5272 vdev_config_dirty(rvd);
5273
5274 /*
5275 * Reassess the health of our root vdev.
5276 */
5277 vdev_reopen(rvd);
5278}
5279
5280/*
5281 * Remove a device from the pool -
5282 *
5283 * Removing a device from the vdev namespace requires several steps
5284 * and can take a significant amount of time. As a result we use
5285 * the spa_vdev_config_[enter/exit] functions which allow us to
5286 * grab and release the spa_config_lock while still holding the namespace
5287 * lock. During each step the configuration is synced out.
d3cc8b15
WA
5288 *
5289 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5290 * devices.
34dc7c2f
BB
5291 */
5292int
5293spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5294{
5295 vdev_t *vd;
428870ff 5296 metaslab_group_t *mg;
b128c09f 5297 nvlist_t **spares, **l2cache, *nv;
fb5f0bc8 5298 uint64_t txg = 0;
428870ff 5299 uint_t nspares, nl2cache;
34dc7c2f 5300 int error = 0;
fb5f0bc8 5301 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
34dc7c2f 5302
572e2857
BB
5303 ASSERT(spa_writeable(spa));
5304
fb5f0bc8
BB
5305 if (!locked)
5306 txg = spa_vdev_enter(spa);
34dc7c2f 5307
b128c09f 5308 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
5309
5310 if (spa->spa_spares.sav_vdevs != NULL &&
34dc7c2f 5311 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
b128c09f
BB
5312 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5313 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5314 /*
5315 * Only remove the hot spare if it's not currently in use
5316 * in this pool.
5317 */
5318 if (vd == NULL || unspare) {
5319 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5320 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5321 spa_load_spares(spa);
5322 spa->spa_spares.sav_sync = B_TRUE;
5323 } else {
2e528b49 5324 error = SET_ERROR(EBUSY);
b128c09f
BB
5325 }
5326 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
34dc7c2f 5327 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
b128c09f
BB
5328 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5329 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5330 /*
5331 * Cache devices can always be removed.
5332 */
5333 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5334 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
34dc7c2f
BB
5335 spa_load_l2cache(spa);
5336 spa->spa_l2cache.sav_sync = B_TRUE;
428870ff
BB
5337 } else if (vd != NULL && vd->vdev_islog) {
5338 ASSERT(!locked);
5339 ASSERT(vd == vd->vdev_top);
5340
428870ff
BB
5341 mg = vd->vdev_mg;
5342
5343 /*
5344 * Stop allocating from this vdev.
5345 */
5346 metaslab_group_passivate(mg);
5347
5348 /*
5349 * Wait for the youngest allocations and frees to sync,
5350 * and then wait for the deferral of those frees to finish.
5351 */
5352 spa_vdev_config_exit(spa, NULL,
5353 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5354
5355 /*
5356 * Attempt to evacuate the vdev.
5357 */
5358 error = spa_vdev_remove_evacuate(spa, vd);
5359
5360 txg = spa_vdev_config_enter(spa);
5361
5362 /*
5363 * If we couldn't evacuate the vdev, unwind.
5364 */
5365 if (error) {
5366 metaslab_group_activate(mg);
5367 return (spa_vdev_exit(spa, NULL, txg, error));
5368 }
5369
5370 /*
5371 * Clean up the vdev namespace.
5372 */
5373 spa_vdev_remove_from_namespace(spa, vd);
5374
b128c09f
BB
5375 } else if (vd != NULL) {
5376 /*
5377 * Normal vdevs cannot be removed (yet).
5378 */
2e528b49 5379 error = SET_ERROR(ENOTSUP);
b128c09f
BB
5380 } else {
5381 /*
5382 * There is no vdev of any kind with the specified guid.
5383 */
2e528b49 5384 error = SET_ERROR(ENOENT);
34dc7c2f
BB
5385 }
5386
fb5f0bc8
BB
5387 if (!locked)
5388 return (spa_vdev_exit(spa, NULL, txg, error));
5389
5390 return (error);
34dc7c2f
BB
5391}
5392
5393/*
5394 * Find any device that's done replacing, or a vdev marked 'unspare' that's
d3cc8b15 5395 * currently spared, so we can detach it.
34dc7c2f
BB
5396 */
5397static vdev_t *
5398spa_vdev_resilver_done_hunt(vdev_t *vd)
5399{
5400 vdev_t *newvd, *oldvd;
d6320ddb 5401 int c;
34dc7c2f 5402
d6320ddb 5403 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f
BB
5404 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5405 if (oldvd != NULL)
5406 return (oldvd);
5407 }
5408
5409 /*
572e2857
BB
5410 * Check for a completed replacement. We always consider the first
5411 * vdev in the list to be the oldest vdev, and the last one to be
5412 * the newest (see spa_vdev_attach() for how that works). In
5413 * the case where the newest vdev is faulted, we will not automatically
5414 * remove it after a resilver completes. This is OK as it will require
5415 * user intervention to determine which disk the admin wishes to keep.
34dc7c2f 5416 */
572e2857
BB
5417 if (vd->vdev_ops == &vdev_replacing_ops) {
5418 ASSERT(vd->vdev_children > 1);
5419
5420 newvd = vd->vdev_child[vd->vdev_children - 1];
34dc7c2f 5421 oldvd = vd->vdev_child[0];
34dc7c2f 5422
fb5f0bc8 5423 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5424 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
fb5f0bc8 5425 !vdev_dtl_required(oldvd))
34dc7c2f 5426 return (oldvd);
34dc7c2f
BB
5427 }
5428
5429 /*
5430 * Check for a completed resilver with the 'unspare' flag set.
5431 */
572e2857
BB
5432 if (vd->vdev_ops == &vdev_spare_ops) {
5433 vdev_t *first = vd->vdev_child[0];
5434 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5435
5436 if (last->vdev_unspare) {
5437 oldvd = first;
5438 newvd = last;
5439 } else if (first->vdev_unspare) {
5440 oldvd = last;
5441 newvd = first;
5442 } else {
5443 oldvd = NULL;
5444 }
34dc7c2f 5445
572e2857 5446 if (oldvd != NULL &&
fb5f0bc8 5447 vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5448 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
572e2857 5449 !vdev_dtl_required(oldvd))
34dc7c2f 5450 return (oldvd);
572e2857
BB
5451
5452 /*
5453 * If there are more than two spares attached to a disk,
5454 * and those spares are not required, then we want to
5455 * attempt to free them up now so that they can be used
5456 * by other pools. Once we're back down to a single
5457 * disk+spare, we stop removing them.
5458 */
5459 if (vd->vdev_children > 2) {
5460 newvd = vd->vdev_child[1];
5461
5462 if (newvd->vdev_isspare && last->vdev_isspare &&
5463 vdev_dtl_empty(last, DTL_MISSING) &&
5464 vdev_dtl_empty(last, DTL_OUTAGE) &&
5465 !vdev_dtl_required(newvd))
5466 return (newvd);
34dc7c2f 5467 }
34dc7c2f
BB
5468 }
5469
5470 return (NULL);
5471}
5472
5473static void
5474spa_vdev_resilver_done(spa_t *spa)
5475{
fb5f0bc8
BB
5476 vdev_t *vd, *pvd, *ppvd;
5477 uint64_t guid, sguid, pguid, ppguid;
34dc7c2f 5478
fb5f0bc8 5479 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5480
5481 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
fb5f0bc8
BB
5482 pvd = vd->vdev_parent;
5483 ppvd = pvd->vdev_parent;
34dc7c2f 5484 guid = vd->vdev_guid;
fb5f0bc8
BB
5485 pguid = pvd->vdev_guid;
5486 ppguid = ppvd->vdev_guid;
5487 sguid = 0;
34dc7c2f
BB
5488 /*
5489 * If we have just finished replacing a hot spared device, then
5490 * we need to detach the parent's first child (the original hot
5491 * spare) as well.
5492 */
572e2857
BB
5493 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5494 ppvd->vdev_children == 2) {
34dc7c2f 5495 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
fb5f0bc8 5496 sguid = ppvd->vdev_child[1]->vdev_guid;
34dc7c2f 5497 }
5d1f7fb6
GW
5498 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5499
fb5f0bc8
BB
5500 spa_config_exit(spa, SCL_ALL, FTAG);
5501 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
34dc7c2f 5502 return;
fb5f0bc8 5503 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
34dc7c2f 5504 return;
fb5f0bc8 5505 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5506 }
5507
fb5f0bc8 5508 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
5509}
5510
5511/*
428870ff 5512 * Update the stored path or FRU for this vdev.
34dc7c2f
BB
5513 */
5514int
9babb374
BB
5515spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5516 boolean_t ispath)
34dc7c2f 5517{
b128c09f 5518 vdev_t *vd;
428870ff 5519 boolean_t sync = B_FALSE;
34dc7c2f 5520
572e2857
BB
5521 ASSERT(spa_writeable(spa));
5522
428870ff 5523 spa_vdev_state_enter(spa, SCL_ALL);
34dc7c2f 5524
9babb374 5525 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
428870ff 5526 return (spa_vdev_state_exit(spa, NULL, ENOENT));
34dc7c2f
BB
5527
5528 if (!vd->vdev_ops->vdev_op_leaf)
428870ff 5529 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 5530
9babb374 5531 if (ispath) {
428870ff
BB
5532 if (strcmp(value, vd->vdev_path) != 0) {
5533 spa_strfree(vd->vdev_path);
5534 vd->vdev_path = spa_strdup(value);
5535 sync = B_TRUE;
5536 }
9babb374 5537 } else {
428870ff
BB
5538 if (vd->vdev_fru == NULL) {
5539 vd->vdev_fru = spa_strdup(value);
5540 sync = B_TRUE;
5541 } else if (strcmp(value, vd->vdev_fru) != 0) {
9babb374 5542 spa_strfree(vd->vdev_fru);
428870ff
BB
5543 vd->vdev_fru = spa_strdup(value);
5544 sync = B_TRUE;
5545 }
9babb374 5546 }
34dc7c2f 5547
428870ff 5548 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
34dc7c2f
BB
5549}
5550
9babb374
BB
5551int
5552spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5553{
5554 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5555}
5556
5557int
5558spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5559{
5560 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5561}
5562
34dc7c2f
BB
5563/*
5564 * ==========================================================================
428870ff 5565 * SPA Scanning
34dc7c2f
BB
5566 * ==========================================================================
5567 */
5568
34dc7c2f 5569int
428870ff
BB
5570spa_scan_stop(spa_t *spa)
5571{
5572 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5573 if (dsl_scan_resilvering(spa->spa_dsl_pool))
2e528b49 5574 return (SET_ERROR(EBUSY));
428870ff
BB
5575 return (dsl_scan_cancel(spa->spa_dsl_pool));
5576}
5577
5578int
5579spa_scan(spa_t *spa, pool_scan_func_t func)
34dc7c2f 5580{
b128c09f 5581 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
34dc7c2f 5582
428870ff 5583 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
2e528b49 5584 return (SET_ERROR(ENOTSUP));
34dc7c2f 5585
34dc7c2f 5586 /*
b128c09f
BB
5587 * If a resilver was requested, but there is no DTL on a
5588 * writeable leaf device, we have nothing to do.
34dc7c2f 5589 */
428870ff 5590 if (func == POOL_SCAN_RESILVER &&
b128c09f
BB
5591 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5592 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
34dc7c2f
BB
5593 return (0);
5594 }
5595
428870ff 5596 return (dsl_scan(spa->spa_dsl_pool, func));
34dc7c2f
BB
5597}
5598
5599/*
5600 * ==========================================================================
5601 * SPA async task processing
5602 * ==========================================================================
5603 */
5604
5605static void
5606spa_async_remove(spa_t *spa, vdev_t *vd)
5607{
d6320ddb
BB
5608 int c;
5609
b128c09f 5610 if (vd->vdev_remove_wanted) {
428870ff
BB
5611 vd->vdev_remove_wanted = B_FALSE;
5612 vd->vdev_delayed_close = B_FALSE;
b128c09f 5613 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
428870ff
BB
5614
5615 /*
5616 * We want to clear the stats, but we don't want to do a full
5617 * vdev_clear() as that will cause us to throw away
5618 * degraded/faulted state as well as attempt to reopen the
5619 * device, all of which is a waste.
5620 */
5621 vd->vdev_stat.vs_read_errors = 0;
5622 vd->vdev_stat.vs_write_errors = 0;
5623 vd->vdev_stat.vs_checksum_errors = 0;
5624
b128c09f
BB
5625 vdev_state_dirty(vd->vdev_top);
5626 }
34dc7c2f 5627
d6320ddb 5628 for (c = 0; c < vd->vdev_children; c++)
b128c09f
BB
5629 spa_async_remove(spa, vd->vdev_child[c]);
5630}
5631
5632static void
5633spa_async_probe(spa_t *spa, vdev_t *vd)
5634{
d6320ddb
BB
5635 int c;
5636
b128c09f 5637 if (vd->vdev_probe_wanted) {
428870ff 5638 vd->vdev_probe_wanted = B_FALSE;
b128c09f 5639 vdev_reopen(vd); /* vdev_open() does the actual probe */
34dc7c2f 5640 }
b128c09f 5641
d6320ddb 5642 for (c = 0; c < vd->vdev_children; c++)
b128c09f 5643 spa_async_probe(spa, vd->vdev_child[c]);
34dc7c2f
BB
5644}
5645
9babb374
BB
5646static void
5647spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5648{
d6320ddb 5649 int c;
9babb374
BB
5650
5651 if (!spa->spa_autoexpand)
5652 return;
5653
d6320ddb 5654 for (c = 0; c < vd->vdev_children; c++) {
9babb374
BB
5655 vdev_t *cvd = vd->vdev_child[c];
5656 spa_async_autoexpand(spa, cvd);
5657 }
5658
5659 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5660 return;
5661
26685276 5662 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_AUTOEXPAND);
9babb374
BB
5663}
5664
34dc7c2f
BB
5665static void
5666spa_async_thread(spa_t *spa)
5667{
d6320ddb 5668 int tasks, i;
34dc7c2f
BB
5669
5670 ASSERT(spa->spa_sync_on);
5671
5672 mutex_enter(&spa->spa_async_lock);
5673 tasks = spa->spa_async_tasks;
5674 spa->spa_async_tasks = 0;
5675 mutex_exit(&spa->spa_async_lock);
5676
5677 /*
5678 * See if the config needs to be updated.
5679 */
5680 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
428870ff 5681 uint64_t old_space, new_space;
9babb374 5682
34dc7c2f 5683 mutex_enter(&spa_namespace_lock);
428870ff 5684 old_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5685 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
428870ff 5686 new_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5687 mutex_exit(&spa_namespace_lock);
9babb374
BB
5688
5689 /*
5690 * If the pool grew as a result of the config update,
5691 * then log an internal history event.
5692 */
428870ff 5693 if (new_space != old_space) {
6f1ffb06 5694 spa_history_log_internal(spa, "vdev online", NULL,
45d1cae3 5695 "pool '%s' size: %llu(+%llu)",
428870ff 5696 spa_name(spa), new_space, new_space - old_space);
9babb374 5697 }
34dc7c2f
BB
5698 }
5699
5700 /*
5701 * See if any devices need to be marked REMOVED.
34dc7c2f 5702 */
b128c09f 5703 if (tasks & SPA_ASYNC_REMOVE) {
428870ff 5704 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 5705 spa_async_remove(spa, spa->spa_root_vdev);
d6320ddb 5706 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
b128c09f 5707 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
d6320ddb 5708 for (i = 0; i < spa->spa_spares.sav_count; i++)
b128c09f
BB
5709 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5710 (void) spa_vdev_state_exit(spa, NULL, 0);
34dc7c2f
BB
5711 }
5712
9babb374
BB
5713 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5714 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5715 spa_async_autoexpand(spa, spa->spa_root_vdev);
5716 spa_config_exit(spa, SCL_CONFIG, FTAG);
5717 }
5718
34dc7c2f 5719 /*
b128c09f 5720 * See if any devices need to be probed.
34dc7c2f 5721 */
b128c09f 5722 if (tasks & SPA_ASYNC_PROBE) {
428870ff 5723 spa_vdev_state_enter(spa, SCL_NONE);
b128c09f
BB
5724 spa_async_probe(spa, spa->spa_root_vdev);
5725 (void) spa_vdev_state_exit(spa, NULL, 0);
5726 }
34dc7c2f
BB
5727
5728 /*
b128c09f 5729 * If any devices are done replacing, detach them.
34dc7c2f 5730 */
b128c09f
BB
5731 if (tasks & SPA_ASYNC_RESILVER_DONE)
5732 spa_vdev_resilver_done(spa);
34dc7c2f
BB
5733
5734 /*
5735 * Kick off a resilver.
5736 */
b128c09f 5737 if (tasks & SPA_ASYNC_RESILVER)
428870ff 5738 dsl_resilver_restart(spa->spa_dsl_pool, 0);
34dc7c2f
BB
5739
5740 /*
5741 * Let the world know that we're done.
5742 */
5743 mutex_enter(&spa->spa_async_lock);
5744 spa->spa_async_thread = NULL;
5745 cv_broadcast(&spa->spa_async_cv);
5746 mutex_exit(&spa->spa_async_lock);
5747 thread_exit();
5748}
5749
5750void
5751spa_async_suspend(spa_t *spa)
5752{
5753 mutex_enter(&spa->spa_async_lock);
5754 spa->spa_async_suspended++;
5755 while (spa->spa_async_thread != NULL)
5756 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5757 mutex_exit(&spa->spa_async_lock);
5758}
5759
5760void
5761spa_async_resume(spa_t *spa)
5762{
5763 mutex_enter(&spa->spa_async_lock);
5764 ASSERT(spa->spa_async_suspended != 0);
5765 spa->spa_async_suspended--;
5766 mutex_exit(&spa->spa_async_lock);
5767}
5768
5769static void
5770spa_async_dispatch(spa_t *spa)
5771{
5772 mutex_enter(&spa->spa_async_lock);
5773 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
5774 spa->spa_async_thread == NULL &&
5775 rootdir != NULL && !vn_is_readonly(rootdir))
5776 spa->spa_async_thread = thread_create(NULL, 0,
5777 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5778 mutex_exit(&spa->spa_async_lock);
5779}
5780
5781void
5782spa_async_request(spa_t *spa, int task)
5783{
428870ff 5784 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
34dc7c2f
BB
5785 mutex_enter(&spa->spa_async_lock);
5786 spa->spa_async_tasks |= task;
5787 mutex_exit(&spa->spa_async_lock);
5788}
5789
5790/*
5791 * ==========================================================================
5792 * SPA syncing routines
5793 * ==========================================================================
5794 */
5795
428870ff
BB
5796static int
5797bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
34dc7c2f 5798{
428870ff
BB
5799 bpobj_t *bpo = arg;
5800 bpobj_enqueue(bpo, bp, tx);
5801 return (0);
5802}
34dc7c2f 5803
428870ff
BB
5804static int
5805spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5806{
5807 zio_t *zio = arg;
34dc7c2f 5808
428870ff
BB
5809 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5810 zio->io_flags));
5811 return (0);
34dc7c2f
BB
5812}
5813
e8b96c60
MA
5814/*
5815 * Note: this simple function is not inlined to make it easier to dtrace the
5816 * amount of time spent syncing frees.
5817 */
5818static void
5819spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
5820{
5821 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5822 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
5823 VERIFY(zio_wait(zio) == 0);
5824}
5825
5826/*
5827 * Note: this simple function is not inlined to make it easier to dtrace the
5828 * amount of time spent syncing deferred frees.
5829 */
5830static void
5831spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
5832{
5833 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5834 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
5835 spa_free_sync_cb, zio, tx), ==, 0);
5836 VERIFY0(zio_wait(zio));
5837}
5838
34dc7c2f
BB
5839static void
5840spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5841{
5842 char *packed = NULL;
b128c09f 5843 size_t bufsize;
34dc7c2f
BB
5844 size_t nvsize = 0;
5845 dmu_buf_t *db;
5846
5847 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5848
b128c09f
BB
5849 /*
5850 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
b0bc7a84 5851 * information. This avoids the dmu_buf_will_dirty() path and
b128c09f
BB
5852 * saves us a pre-read to get data we don't actually care about.
5853 */
9ae529ec 5854 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
b8d06fca 5855 packed = vmem_alloc(bufsize, KM_PUSHPAGE);
34dc7c2f
BB
5856
5857 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
b8d06fca 5858 KM_PUSHPAGE) == 0);
b128c09f 5859 bzero(packed + nvsize, bufsize - nvsize);
34dc7c2f 5860
b128c09f 5861 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
34dc7c2f 5862
00b46022 5863 vmem_free(packed, bufsize);
34dc7c2f
BB
5864
5865 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5866 dmu_buf_will_dirty(db, tx);
5867 *(uint64_t *)db->db_data = nvsize;
5868 dmu_buf_rele(db, FTAG);
5869}
5870
5871static void
5872spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5873 const char *config, const char *entry)
5874{
5875 nvlist_t *nvroot;
5876 nvlist_t **list;
5877 int i;
5878
5879 if (!sav->sav_sync)
5880 return;
5881
5882 /*
5883 * Update the MOS nvlist describing the list of available devices.
5884 * spa_validate_aux() will have already made sure this nvlist is
5885 * valid and the vdevs are labeled appropriately.
5886 */
5887 if (sav->sav_object == 0) {
5888 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5889 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5890 sizeof (uint64_t), tx);
5891 VERIFY(zap_update(spa->spa_meta_objset,
5892 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5893 &sav->sav_object, tx) == 0);
5894 }
5895
b8d06fca 5896 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
5897 if (sav->sav_count == 0) {
5898 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5899 } else {
d1d7e268 5900 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
5901 for (i = 0; i < sav->sav_count; i++)
5902 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
428870ff 5903 B_FALSE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
5904 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
5905 sav->sav_count) == 0);
5906 for (i = 0; i < sav->sav_count; i++)
5907 nvlist_free(list[i]);
5908 kmem_free(list, sav->sav_count * sizeof (void *));
5909 }
5910
5911 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
5912 nvlist_free(nvroot);
5913
5914 sav->sav_sync = B_FALSE;
5915}
5916
5917static void
5918spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
5919{
5920 nvlist_t *config;
5921
b128c09f 5922 if (list_is_empty(&spa->spa_config_dirty_list))
34dc7c2f
BB
5923 return;
5924
b128c09f
BB
5925 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5926
5927 config = spa_config_generate(spa, spa->spa_root_vdev,
5928 dmu_tx_get_txg(tx), B_FALSE);
5929
ea0b2538
GW
5930 /*
5931 * If we're upgrading the spa version then make sure that
5932 * the config object gets updated with the correct version.
5933 */
5934 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
5935 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5936 spa->spa_uberblock.ub_version);
5937
b128c09f 5938 spa_config_exit(spa, SCL_STATE, FTAG);
34dc7c2f
BB
5939
5940 if (spa->spa_config_syncing)
5941 nvlist_free(spa->spa_config_syncing);
5942 spa->spa_config_syncing = config;
5943
5944 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5945}
5946
9ae529ec 5947static void
13fe0198 5948spa_sync_version(void *arg, dmu_tx_t *tx)
9ae529ec 5949{
13fe0198
MA
5950 uint64_t *versionp = arg;
5951 uint64_t version = *versionp;
5952 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9ae529ec
CS
5953
5954 /*
5955 * Setting the version is special cased when first creating the pool.
5956 */
5957 ASSERT(tx->tx_txg != TXG_INITIAL);
5958
8dca0a9a 5959 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
9ae529ec
CS
5960 ASSERT(version >= spa_version(spa));
5961
5962 spa->spa_uberblock.ub_version = version;
5963 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06 5964 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
9ae529ec
CS
5965}
5966
34dc7c2f
BB
5967/*
5968 * Set zpool properties.
5969 */
5970static void
13fe0198 5971spa_sync_props(void *arg, dmu_tx_t *tx)
34dc7c2f 5972{
13fe0198
MA
5973 nvlist_t *nvp = arg;
5974 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
34dc7c2f 5975 objset_t *mos = spa->spa_meta_objset;
9ae529ec 5976 nvpair_t *elem = NULL;
b128c09f
BB
5977
5978 mutex_enter(&spa->spa_props_lock);
34dc7c2f 5979
34dc7c2f 5980 while ((elem = nvlist_next_nvpair(nvp, elem))) {
9ae529ec
CS
5981 uint64_t intval;
5982 char *strval, *fname;
5983 zpool_prop_t prop;
5984 const char *propname;
5985 zprop_type_t proptype;
fa86b5db 5986 spa_feature_t fid;
9ae529ec
CS
5987
5988 prop = zpool_name_to_prop(nvpair_name(elem));
5989 switch ((int)prop) {
5990 case ZPROP_INVAL:
5991 /*
5992 * We checked this earlier in spa_prop_validate().
5993 */
5994 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5995
5996 fname = strchr(nvpair_name(elem), '@') + 1;
fa86b5db 5997 VERIFY0(zfeature_lookup_name(fname, &fid));
9ae529ec 5998
fa86b5db 5999 spa_feature_enable(spa, fid, tx);
6f1ffb06
MA
6000 spa_history_log_internal(spa, "set", tx,
6001 "%s=enabled", nvpair_name(elem));
9ae529ec
CS
6002 break;
6003
34dc7c2f 6004 case ZPOOL_PROP_VERSION:
93cf2076 6005 intval = fnvpair_value_uint64(elem);
34dc7c2f 6006 /*
9ae529ec
CS
6007 * The version is synced seperatly before other
6008 * properties and should be correct by now.
34dc7c2f 6009 */
9ae529ec 6010 ASSERT3U(spa_version(spa), >=, intval);
34dc7c2f
BB
6011 break;
6012
6013 case ZPOOL_PROP_ALTROOT:
6014 /*
6015 * 'altroot' is a non-persistent property. It should
6016 * have been set temporarily at creation or import time.
6017 */
6018 ASSERT(spa->spa_root != NULL);
6019 break;
6020
572e2857 6021 case ZPOOL_PROP_READONLY:
34dc7c2f
BB
6022 case ZPOOL_PROP_CACHEFILE:
6023 /*
572e2857
BB
6024 * 'readonly' and 'cachefile' are also non-persisitent
6025 * properties.
34dc7c2f 6026 */
34dc7c2f 6027 break;
d96eb2b1 6028 case ZPOOL_PROP_COMMENT:
93cf2076 6029 strval = fnvpair_value_string(elem);
d96eb2b1
DM
6030 if (spa->spa_comment != NULL)
6031 spa_strfree(spa->spa_comment);
6032 spa->spa_comment = spa_strdup(strval);
6033 /*
6034 * We need to dirty the configuration on all the vdevs
6035 * so that their labels get updated. It's unnecessary
6036 * to do this for pool creation since the vdev's
6037 * configuratoin has already been dirtied.
6038 */
6039 if (tx->tx_txg != TXG_INITIAL)
6040 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06
MA
6041 spa_history_log_internal(spa, "set", tx,
6042 "%s=%s", nvpair_name(elem), strval);
d96eb2b1 6043 break;
34dc7c2f
BB
6044 default:
6045 /*
6046 * Set pool property values in the poolprops mos object.
6047 */
34dc7c2f 6048 if (spa->spa_pool_props_object == 0) {
9ae529ec
CS
6049 spa->spa_pool_props_object =
6050 zap_create_link(mos, DMU_OT_POOL_PROPS,
34dc7c2f 6051 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
9ae529ec 6052 tx);
34dc7c2f 6053 }
34dc7c2f
BB
6054
6055 /* normalize the property name */
6056 propname = zpool_prop_to_name(prop);
6057 proptype = zpool_prop_get_type(prop);
6058
6059 if (nvpair_type(elem) == DATA_TYPE_STRING) {
6060 ASSERT(proptype == PROP_TYPE_STRING);
93cf2076
GW
6061 strval = fnvpair_value_string(elem);
6062 VERIFY0(zap_update(mos,
34dc7c2f 6063 spa->spa_pool_props_object, propname,
93cf2076 6064 1, strlen(strval) + 1, strval, tx));
6f1ffb06
MA
6065 spa_history_log_internal(spa, "set", tx,
6066 "%s=%s", nvpair_name(elem), strval);
34dc7c2f 6067 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
93cf2076 6068 intval = fnvpair_value_uint64(elem);
34dc7c2f
BB
6069
6070 if (proptype == PROP_TYPE_INDEX) {
6071 const char *unused;
93cf2076
GW
6072 VERIFY0(zpool_prop_index_to_string(
6073 prop, intval, &unused));
34dc7c2f 6074 }
93cf2076 6075 VERIFY0(zap_update(mos,
34dc7c2f 6076 spa->spa_pool_props_object, propname,
93cf2076 6077 8, 1, &intval, tx));
6f1ffb06
MA
6078 spa_history_log_internal(spa, "set", tx,
6079 "%s=%lld", nvpair_name(elem), intval);
34dc7c2f
BB
6080 } else {
6081 ASSERT(0); /* not allowed */
6082 }
6083
6084 switch (prop) {
6085 case ZPOOL_PROP_DELEGATION:
6086 spa->spa_delegation = intval;
6087 break;
6088 case ZPOOL_PROP_BOOTFS:
6089 spa->spa_bootfs = intval;
6090 break;
6091 case ZPOOL_PROP_FAILUREMODE:
6092 spa->spa_failmode = intval;
6093 break;
9babb374
BB
6094 case ZPOOL_PROP_AUTOEXPAND:
6095 spa->spa_autoexpand = intval;
428870ff
BB
6096 if (tx->tx_txg != TXG_INITIAL)
6097 spa_async_request(spa,
6098 SPA_ASYNC_AUTOEXPAND);
6099 break;
6100 case ZPOOL_PROP_DEDUPDITTO:
6101 spa->spa_dedup_ditto = intval;
9babb374 6102 break;
34dc7c2f
BB
6103 default:
6104 break;
6105 }
6106 }
6107
34dc7c2f 6108 }
b128c09f
BB
6109
6110 mutex_exit(&spa->spa_props_lock);
34dc7c2f
BB
6111}
6112
428870ff
BB
6113/*
6114 * Perform one-time upgrade on-disk changes. spa_version() does not
6115 * reflect the new version this txg, so there must be no changes this
6116 * txg to anything that the upgrade code depends on after it executes.
6117 * Therefore this must be called after dsl_pool_sync() does the sync
6118 * tasks.
6119 */
6120static void
6121spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6122{
6123 dsl_pool_t *dp = spa->spa_dsl_pool;
6124
6125 ASSERT(spa->spa_sync_pass == 1);
6126
13fe0198
MA
6127 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6128
428870ff
BB
6129 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6130 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6131 dsl_pool_create_origin(dp, tx);
6132
6133 /* Keeping the origin open increases spa_minref */
6134 spa->spa_minref += 3;
6135 }
6136
6137 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6138 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6139 dsl_pool_upgrade_clones(dp, tx);
6140 }
6141
6142 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6143 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6144 dsl_pool_upgrade_dir_clones(dp, tx);
6145
6146 /* Keeping the freedir open increases spa_minref */
6147 spa->spa_minref += 3;
6148 }
9ae529ec
CS
6149
6150 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6151 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6152 spa_feature_create_zap_objects(spa, tx);
6153 }
13fe0198 6154 rrw_exit(&dp->dp_config_rwlock, FTAG);
428870ff
BB
6155}
6156
34dc7c2f
BB
6157/*
6158 * Sync the specified transaction group. New blocks may be dirtied as
6159 * part of the process, so we iterate until it converges.
6160 */
6161void
6162spa_sync(spa_t *spa, uint64_t txg)
6163{
6164 dsl_pool_t *dp = spa->spa_dsl_pool;
6165 objset_t *mos = spa->spa_meta_objset;
428870ff 6166 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
34dc7c2f
BB
6167 vdev_t *rvd = spa->spa_root_vdev;
6168 vdev_t *vd;
34dc7c2f 6169 dmu_tx_t *tx;
b128c09f 6170 int error;
d6320ddb 6171 int c;
34dc7c2f 6172
572e2857
BB
6173 VERIFY(spa_writeable(spa));
6174
34dc7c2f
BB
6175 /*
6176 * Lock out configuration changes.
6177 */
b128c09f 6178 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
6179
6180 spa->spa_syncing_txg = txg;
6181 spa->spa_sync_pass = 0;
6182
b128c09f
BB
6183 /*
6184 * If there are any pending vdev state changes, convert them
6185 * into config changes that go out with this transaction group.
6186 */
6187 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
fb5f0bc8
BB
6188 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6189 /*
6190 * We need the write lock here because, for aux vdevs,
6191 * calling vdev_config_dirty() modifies sav_config.
6192 * This is ugly and will become unnecessary when we
6193 * eliminate the aux vdev wart by integrating all vdevs
6194 * into the root vdev tree.
6195 */
6196 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6197 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6198 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6199 vdev_state_clean(vd);
6200 vdev_config_dirty(vd);
6201 }
6202 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6203 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
b128c09f
BB
6204 }
6205 spa_config_exit(spa, SCL_STATE, FTAG);
6206
34dc7c2f
BB
6207 tx = dmu_tx_create_assigned(dp, txg);
6208
cc92e9d0
GW
6209 spa->spa_sync_starttime = gethrtime();
6210 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6211 spa->spa_deadman_tqid = taskq_dispatch_delay(system_taskq,
cbfa294d 6212 spa_deadman, spa, TQ_PUSHPAGE, ddi_get_lbolt() +
cc92e9d0
GW
6213 NSEC_TO_TICK(spa->spa_deadman_synctime));
6214
34dc7c2f
BB
6215 /*
6216 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6217 * set spa_deflate if we have no raid-z vdevs.
6218 */
6219 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6220 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6221 int i;
6222
6223 for (i = 0; i < rvd->vdev_children; i++) {
6224 vd = rvd->vdev_child[i];
6225 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6226 break;
6227 }
6228 if (i == rvd->vdev_children) {
6229 spa->spa_deflate = TRUE;
6230 VERIFY(0 == zap_add(spa->spa_meta_objset,
6231 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6232 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6233 }
6234 }
6235
6236 /*
428870ff
BB
6237 * If anything has changed in this txg, or if someone is waiting
6238 * for this txg to sync (eg, spa_vdev_remove()), push the
6239 * deferred frees from the previous txg. If not, leave them
6240 * alone so that we don't generate work on an otherwise idle
6241 * system.
34dc7c2f
BB
6242 */
6243 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
6244 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
428870ff
BB
6245 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
6246 ((dsl_scan_active(dp->dp_scan) ||
6247 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
e8b96c60 6248 spa_sync_deferred_frees(spa, tx);
428870ff 6249 }
34dc7c2f
BB
6250
6251 /*
6252 * Iterate to convergence.
6253 */
6254 do {
428870ff 6255 int pass = ++spa->spa_sync_pass;
34dc7c2f
BB
6256
6257 spa_sync_config_object(spa, tx);
6258 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6259 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6260 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6261 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6262 spa_errlog_sync(spa, txg);
6263 dsl_pool_sync(dp, txg);
6264
55d85d5a 6265 if (pass < zfs_sync_pass_deferred_free) {
e8b96c60 6266 spa_sync_frees(spa, free_bpl, tx);
428870ff
BB
6267 } else {
6268 bplist_iterate(free_bpl, bpobj_enqueue_cb,
e8b96c60 6269 &spa->spa_deferred_bpobj, tx);
34dc7c2f
BB
6270 }
6271
428870ff
BB
6272 ddt_sync(spa, txg);
6273 dsl_scan_sync(dp, tx);
34dc7c2f 6274
c65aa5b2 6275 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
428870ff
BB
6276 vdev_sync(vd, txg);
6277
6278 if (pass == 1)
6279 spa_sync_upgrades(spa, tx);
34dc7c2f 6280
428870ff 6281 } while (dmu_objset_is_dirty(mos, txg));
34dc7c2f
BB
6282
6283 /*
6284 * Rewrite the vdev configuration (which includes the uberblock)
6285 * to commit the transaction group.
6286 *
6287 * If there are no dirty vdevs, we sync the uberblock to a few
6288 * random top-level vdevs that are known to be visible in the
b128c09f
BB
6289 * config cache (see spa_vdev_add() for a complete description).
6290 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
34dc7c2f 6291 */
b128c09f
BB
6292 for (;;) {
6293 /*
6294 * We hold SCL_STATE to prevent vdev open/close/etc.
6295 * while we're attempting to write the vdev labels.
6296 */
6297 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6298
6299 if (list_is_empty(&spa->spa_config_dirty_list)) {
6300 vdev_t *svd[SPA_DVAS_PER_BP];
6301 int svdcount = 0;
6302 int children = rvd->vdev_children;
6303 int c0 = spa_get_random(children);
b128c09f 6304
d6320ddb 6305 for (c = 0; c < children; c++) {
b128c09f
BB
6306 vd = rvd->vdev_child[(c0 + c) % children];
6307 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6308 continue;
6309 svd[svdcount++] = vd;
6310 if (svdcount == SPA_DVAS_PER_BP)
6311 break;
6312 }
9babb374
BB
6313 error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
6314 if (error != 0)
6315 error = vdev_config_sync(svd, svdcount, txg,
6316 B_TRUE);
b128c09f
BB
6317 } else {
6318 error = vdev_config_sync(rvd->vdev_child,
9babb374
BB
6319 rvd->vdev_children, txg, B_FALSE);
6320 if (error != 0)
6321 error = vdev_config_sync(rvd->vdev_child,
6322 rvd->vdev_children, txg, B_TRUE);
34dc7c2f 6323 }
34dc7c2f 6324
3bc7e0fb
GW
6325 if (error == 0)
6326 spa->spa_last_synced_guid = rvd->vdev_guid;
6327
b128c09f
BB
6328 spa_config_exit(spa, SCL_STATE, FTAG);
6329
6330 if (error == 0)
6331 break;
6332 zio_suspend(spa, NULL);
6333 zio_resume_wait(spa);
6334 }
34dc7c2f
BB
6335 dmu_tx_commit(tx);
6336
cc92e9d0
GW
6337 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6338 spa->spa_deadman_tqid = 0;
6339
34dc7c2f
BB
6340 /*
6341 * Clear the dirty config list.
6342 */
b128c09f 6343 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
34dc7c2f
BB
6344 vdev_config_clean(vd);
6345
6346 /*
6347 * Now that the new config has synced transactionally,
6348 * let it become visible to the config cache.
6349 */
6350 if (spa->spa_config_syncing != NULL) {
6351 spa_config_set(spa, spa->spa_config_syncing);
6352 spa->spa_config_txg = txg;
6353 spa->spa_config_syncing = NULL;
6354 }
6355
34dc7c2f 6356 spa->spa_ubsync = spa->spa_uberblock;
34dc7c2f 6357
428870ff 6358 dsl_pool_sync_done(dp, txg);
34dc7c2f
BB
6359
6360 /*
6361 * Update usable space statistics.
6362 */
c65aa5b2 6363 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
34dc7c2f
BB
6364 vdev_sync_done(vd, txg);
6365
428870ff
BB
6366 spa_update_dspace(spa);
6367
34dc7c2f
BB
6368 /*
6369 * It had better be the case that we didn't dirty anything
6370 * since vdev_config_sync().
6371 */
6372 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
6373 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6374 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
428870ff
BB
6375
6376 spa->spa_sync_pass = 0;
34dc7c2f 6377
b128c09f 6378 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 6379
428870ff
BB
6380 spa_handle_ignored_writes(spa);
6381
34dc7c2f
BB
6382 /*
6383 * If any async tasks have been requested, kick them off.
6384 */
6385 spa_async_dispatch(spa);
6386}
6387
6388/*
6389 * Sync all pools. We don't want to hold the namespace lock across these
6390 * operations, so we take a reference on the spa_t and drop the lock during the
6391 * sync.
6392 */
6393void
6394spa_sync_allpools(void)
6395{
6396 spa_t *spa = NULL;
6397 mutex_enter(&spa_namespace_lock);
6398 while ((spa = spa_next(spa)) != NULL) {
572e2857
BB
6399 if (spa_state(spa) != POOL_STATE_ACTIVE ||
6400 !spa_writeable(spa) || spa_suspended(spa))
34dc7c2f
BB
6401 continue;
6402 spa_open_ref(spa, FTAG);
6403 mutex_exit(&spa_namespace_lock);
6404 txg_wait_synced(spa_get_dsl(spa), 0);
6405 mutex_enter(&spa_namespace_lock);
6406 spa_close(spa, FTAG);
6407 }
6408 mutex_exit(&spa_namespace_lock);
6409}
6410
6411/*
6412 * ==========================================================================
6413 * Miscellaneous routines
6414 * ==========================================================================
6415 */
6416
6417/*
6418 * Remove all pools in the system.
6419 */
6420void
6421spa_evict_all(void)
6422{
6423 spa_t *spa;
6424
6425 /*
6426 * Remove all cached state. All pools should be closed now,
6427 * so every spa in the AVL tree should be unreferenced.
6428 */
6429 mutex_enter(&spa_namespace_lock);
6430 while ((spa = spa_next(NULL)) != NULL) {
6431 /*
6432 * Stop async tasks. The async thread may need to detach
6433 * a device that's been replaced, which requires grabbing
6434 * spa_namespace_lock, so we must drop it here.
6435 */
6436 spa_open_ref(spa, FTAG);
6437 mutex_exit(&spa_namespace_lock);
6438 spa_async_suspend(spa);
6439 mutex_enter(&spa_namespace_lock);
34dc7c2f
BB
6440 spa_close(spa, FTAG);
6441
6442 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6443 spa_unload(spa);
6444 spa_deactivate(spa);
6445 }
6446 spa_remove(spa);
6447 }
6448 mutex_exit(&spa_namespace_lock);
6449}
6450
6451vdev_t *
9babb374 6452spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
34dc7c2f 6453{
b128c09f
BB
6454 vdev_t *vd;
6455 int i;
6456
6457 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
6458 return (vd);
6459
9babb374 6460 if (aux) {
b128c09f
BB
6461 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
6462 vd = spa->spa_l2cache.sav_vdevs[i];
9babb374
BB
6463 if (vd->vdev_guid == guid)
6464 return (vd);
6465 }
6466
6467 for (i = 0; i < spa->spa_spares.sav_count; i++) {
6468 vd = spa->spa_spares.sav_vdevs[i];
b128c09f
BB
6469 if (vd->vdev_guid == guid)
6470 return (vd);
6471 }
6472 }
6473
6474 return (NULL);
34dc7c2f
BB
6475}
6476
6477void
6478spa_upgrade(spa_t *spa, uint64_t version)
6479{
572e2857
BB
6480 ASSERT(spa_writeable(spa));
6481
b128c09f 6482 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
6483
6484 /*
6485 * This should only be called for a non-faulted pool, and since a
6486 * future version would result in an unopenable pool, this shouldn't be
6487 * possible.
6488 */
8dca0a9a 6489 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
9b67f605 6490 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
34dc7c2f
BB
6491
6492 spa->spa_uberblock.ub_version = version;
6493 vdev_config_dirty(spa->spa_root_vdev);
6494
b128c09f 6495 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
6496
6497 txg_wait_synced(spa_get_dsl(spa), 0);
6498}
6499
6500boolean_t
6501spa_has_spare(spa_t *spa, uint64_t guid)
6502{
6503 int i;
6504 uint64_t spareguid;
6505 spa_aux_vdev_t *sav = &spa->spa_spares;
6506
6507 for (i = 0; i < sav->sav_count; i++)
6508 if (sav->sav_vdevs[i]->vdev_guid == guid)
6509 return (B_TRUE);
6510
6511 for (i = 0; i < sav->sav_npending; i++) {
6512 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
6513 &spareguid) == 0 && spareguid == guid)
6514 return (B_TRUE);
6515 }
6516
6517 return (B_FALSE);
6518}
6519
b128c09f
BB
6520/*
6521 * Check if a pool has an active shared spare device.
6522 * Note: reference count of an active spare is 2, as a spare and as a replace
6523 */
6524static boolean_t
6525spa_has_active_shared_spare(spa_t *spa)
6526{
6527 int i, refcnt;
6528 uint64_t pool;
6529 spa_aux_vdev_t *sav = &spa->spa_spares;
6530
6531 for (i = 0; i < sav->sav_count; i++) {
6532 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
6533 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
6534 refcnt > 2)
6535 return (B_TRUE);
6536 }
6537
6538 return (B_FALSE);
6539}
6540
34dc7c2f 6541/*
26685276 6542 * Post a FM_EREPORT_ZFS_* event from sys/fm/fs/zfs.h. The payload will be
34dc7c2f
BB
6543 * filled in from the spa and (optionally) the vdev. This doesn't do anything
6544 * in the userland libzpool, as we don't want consumers to misinterpret ztest
6545 * or zdb as real changes.
6546 */
6547void
6548spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
6549{
6550#ifdef _KERNEL
26685276 6551 zfs_ereport_post(name, spa, vd, NULL, 0, 0);
34dc7c2f
BB
6552#endif
6553}
c28b2279
BB
6554
6555#if defined(_KERNEL) && defined(HAVE_SPL)
6556/* state manipulation functions */
6557EXPORT_SYMBOL(spa_open);
6558EXPORT_SYMBOL(spa_open_rewind);
6559EXPORT_SYMBOL(spa_get_stats);
6560EXPORT_SYMBOL(spa_create);
6561EXPORT_SYMBOL(spa_import_rootpool);
6562EXPORT_SYMBOL(spa_import);
6563EXPORT_SYMBOL(spa_tryimport);
6564EXPORT_SYMBOL(spa_destroy);
6565EXPORT_SYMBOL(spa_export);
6566EXPORT_SYMBOL(spa_reset);
6567EXPORT_SYMBOL(spa_async_request);
6568EXPORT_SYMBOL(spa_async_suspend);
6569EXPORT_SYMBOL(spa_async_resume);
6570EXPORT_SYMBOL(spa_inject_addref);
6571EXPORT_SYMBOL(spa_inject_delref);
6572EXPORT_SYMBOL(spa_scan_stat_init);
6573EXPORT_SYMBOL(spa_scan_get_stats);
6574
6575/* device maniion */
6576EXPORT_SYMBOL(spa_vdev_add);
6577EXPORT_SYMBOL(spa_vdev_attach);
6578EXPORT_SYMBOL(spa_vdev_detach);
6579EXPORT_SYMBOL(spa_vdev_remove);
6580EXPORT_SYMBOL(spa_vdev_setpath);
6581EXPORT_SYMBOL(spa_vdev_setfru);
6582EXPORT_SYMBOL(spa_vdev_split_mirror);
6583
6584/* spare statech is global across all pools) */
6585EXPORT_SYMBOL(spa_spare_add);
6586EXPORT_SYMBOL(spa_spare_remove);
6587EXPORT_SYMBOL(spa_spare_exists);
6588EXPORT_SYMBOL(spa_spare_activate);
6589
6590/* L2ARC statech is global across all pools) */
6591EXPORT_SYMBOL(spa_l2cache_add);
6592EXPORT_SYMBOL(spa_l2cache_remove);
6593EXPORT_SYMBOL(spa_l2cache_exists);
6594EXPORT_SYMBOL(spa_l2cache_activate);
6595EXPORT_SYMBOL(spa_l2cache_drop);
6596
6597/* scanning */
6598EXPORT_SYMBOL(spa_scan);
6599EXPORT_SYMBOL(spa_scan_stop);
6600
6601/* spa syncing */
6602EXPORT_SYMBOL(spa_sync); /* only for DMU use */
6603EXPORT_SYMBOL(spa_sync_allpools);
6604
6605/* properties */
6606EXPORT_SYMBOL(spa_prop_set);
6607EXPORT_SYMBOL(spa_prop_get);
6608EXPORT_SYMBOL(spa_prop_clear_bootfs);
6609
6610/* asynchronous event notification */
6611EXPORT_SYMBOL(spa_event_notify);
6612#endif