]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/spa.c
Illumos #3955
[mirror_zfs.git] / module / zfs / spa.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2e528b49 24 * Copyright (c) 2013 by Delphix. All rights reserved.
7011fb60 25 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
a38718a6 26 */
34dc7c2f 27
34dc7c2f 28/*
e49f1e20
WA
29 * SPA: Storage Pool Allocator
30 *
34dc7c2f
BB
31 * This file contains all the routines used when modifying on-disk SPA state.
32 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 * pool.
34 */
35
36#include <sys/zfs_context.h>
37#include <sys/fm/fs/zfs.h>
38#include <sys/spa_impl.h>
39#include <sys/zio.h>
40#include <sys/zio_checksum.h>
34dc7c2f
BB
41#include <sys/dmu.h>
42#include <sys/dmu_tx.h>
43#include <sys/zap.h>
44#include <sys/zil.h>
428870ff 45#include <sys/ddt.h>
34dc7c2f 46#include <sys/vdev_impl.h>
c28b2279 47#include <sys/vdev_disk.h>
34dc7c2f 48#include <sys/metaslab.h>
428870ff 49#include <sys/metaslab_impl.h>
34dc7c2f
BB
50#include <sys/uberblock_impl.h>
51#include <sys/txg.h>
52#include <sys/avl.h>
53#include <sys/dmu_traverse.h>
54#include <sys/dmu_objset.h>
55#include <sys/unique.h>
56#include <sys/dsl_pool.h>
57#include <sys/dsl_dataset.h>
58#include <sys/dsl_dir.h>
59#include <sys/dsl_prop.h>
60#include <sys/dsl_synctask.h>
61#include <sys/fs/zfs.h>
62#include <sys/arc.h>
63#include <sys/callb.h>
64#include <sys/systeminfo.h>
34dc7c2f 65#include <sys/spa_boot.h>
9babb374 66#include <sys/zfs_ioctl.h>
428870ff 67#include <sys/dsl_scan.h>
9ae529ec 68#include <sys/zfeature.h>
13fe0198 69#include <sys/dsl_destroy.h>
526af785 70#include <sys/zvol.h>
34dc7c2f 71
d164b209 72#ifdef _KERNEL
428870ff
BB
73#include <sys/bootprops.h>
74#include <sys/callb.h>
75#include <sys/cpupart.h>
76#include <sys/pool.h>
77#include <sys/sysdc.h>
d164b209
BB
78#include <sys/zone.h>
79#endif /* _KERNEL */
80
34dc7c2f
BB
81#include "zfs_prop.h"
82#include "zfs_comutil.h"
83
428870ff 84typedef enum zti_modes {
7ef5e54e
AL
85 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
86 ZTI_MODE_ONLINE_PERCENT, /* value is % of online CPUs */
87 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
88 ZTI_MODE_NULL, /* don't create a taskq */
89 ZTI_NMODES
428870ff 90} zti_modes_t;
34dc7c2f 91
7ef5e54e
AL
92#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
93#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
94#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
95#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
9babb374 96
7ef5e54e
AL
97#define ZTI_N(n) ZTI_P(n, 1)
98#define ZTI_ONE ZTI_N(1)
9babb374
BB
99
100typedef struct zio_taskq_info {
7ef5e54e 101 zti_modes_t zti_mode;
428870ff 102 uint_t zti_value;
7ef5e54e 103 uint_t zti_count;
9babb374
BB
104} zio_taskq_info_t;
105
106static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
451041db 107 "iss", "iss_h", "int", "int_h"
9babb374
BB
108};
109
428870ff 110/*
7ef5e54e
AL
111 * This table defines the taskq settings for each ZFS I/O type. When
112 * initializing a pool, we use this table to create an appropriately sized
113 * taskq. Some operations are low volume and therefore have a small, static
114 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
115 * macros. Other operations process a large amount of data; the ZTI_BATCH
116 * macro causes us to create a taskq oriented for throughput. Some operations
117 * are so high frequency and short-lived that the taskq itself can become a a
118 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
119 * additional degree of parallelism specified by the number of threads per-
120 * taskq and the number of taskqs; when dispatching an event in this case, the
121 * particular taskq is chosen at random.
122 *
123 * The different taskq priorities are to handle the different contexts (issue
124 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
125 * need to be handled with minimum delay.
428870ff
BB
126 */
127const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
128 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
7ef5e54e
AL
129 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
130 { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */
131 { ZTI_BATCH, ZTI_N(5), ZTI_N(16), ZTI_N(5) }, /* WRITE */
132 { ZTI_P(4, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
133 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
134 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
9babb374
BB
135};
136
13fe0198
MA
137static void spa_sync_version(void *arg, dmu_tx_t *tx);
138static void spa_sync_props(void *arg, dmu_tx_t *tx);
b128c09f 139static boolean_t spa_has_active_shared_spare(spa_t *spa);
bf701a83 140static inline int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
428870ff
BB
141 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
142 char **ereport);
572e2857 143static void spa_vdev_resilver_done(spa_t *spa);
428870ff
BB
144
145uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */
146id_t zio_taskq_psrset_bind = PS_NONE;
147boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
148uint_t zio_taskq_basedc = 80; /* base duty cycle */
149
150boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
151
152/*
153 * This (illegal) pool name is used when temporarily importing a spa_t in order
154 * to get the vdev stats associated with the imported devices.
155 */
156#define TRYIMPORT_NAME "$import"
34dc7c2f
BB
157
158/*
159 * ==========================================================================
160 * SPA properties routines
161 * ==========================================================================
162 */
163
164/*
165 * Add a (source=src, propname=propval) list to an nvlist.
166 */
167static void
168spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
169 uint64_t intval, zprop_source_t src)
170{
171 const char *propname = zpool_prop_to_name(prop);
172 nvlist_t *propval;
173
b8d06fca 174 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
175 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
176
177 if (strval != NULL)
178 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
179 else
180 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
181
182 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
183 nvlist_free(propval);
184}
185
186/*
187 * Get property values from the spa configuration.
188 */
189static void
190spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
191{
1bd201e7 192 vdev_t *rvd = spa->spa_root_vdev;
9ae529ec 193 dsl_pool_t *pool = spa->spa_dsl_pool;
d164b209 194 uint64_t size;
428870ff 195 uint64_t alloc;
1bd201e7 196 uint64_t space;
34dc7c2f
BB
197 uint64_t cap, version;
198 zprop_source_t src = ZPROP_SRC_NONE;
b128c09f 199 spa_config_dirent_t *dp;
1bd201e7 200 int c;
b128c09f
BB
201
202 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
34dc7c2f 203
1bd201e7 204 if (rvd != NULL) {
428870ff
BB
205 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
206 size = metaslab_class_get_space(spa_normal_class(spa));
d164b209
BB
207 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
208 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
428870ff
BB
209 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
210 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
211 size - alloc, src);
1bd201e7
CS
212
213 space = 0;
214 for (c = 0; c < rvd->vdev_children; c++) {
215 vdev_t *tvd = rvd->vdev_child[c];
216 space += tvd->vdev_max_asize - tvd->vdev_asize;
217 }
218 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space,
219 src);
220
572e2857
BB
221 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
222 (spa_mode(spa) == FREAD), src);
d164b209 223
428870ff 224 cap = (size == 0) ? 0 : (alloc * 100 / size);
d164b209
BB
225 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
226
428870ff
BB
227 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
228 ddt_get_pool_dedup_ratio(spa), src);
229
d164b209 230 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
1bd201e7 231 rvd->vdev_state, src);
d164b209
BB
232
233 version = spa_version(spa);
234 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
235 src = ZPROP_SRC_DEFAULT;
236 else
237 src = ZPROP_SRC_LOCAL;
238 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
239 }
34dc7c2f 240
9ae529ec
CS
241 if (pool != NULL) {
242 dsl_dir_t *freedir = pool->dp_free_dir;
243
244 /*
245 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
246 * when opening pools before this version freedir will be NULL.
247 */
248 if (freedir != NULL) {
249 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
250 freedir->dd_phys->dd_used_bytes, src);
251 } else {
252 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
253 NULL, 0, src);
254 }
255 }
256
34dc7c2f 257 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
34dc7c2f 258
d96eb2b1
DM
259 if (spa->spa_comment != NULL) {
260 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
261 0, ZPROP_SRC_LOCAL);
262 }
263
34dc7c2f
BB
264 if (spa->spa_root != NULL)
265 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
266 0, ZPROP_SRC_LOCAL);
267
b128c09f
BB
268 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
269 if (dp->scd_path == NULL) {
34dc7c2f 270 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f
BB
271 "none", 0, ZPROP_SRC_LOCAL);
272 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
34dc7c2f 273 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f 274 dp->scd_path, 0, ZPROP_SRC_LOCAL);
34dc7c2f
BB
275 }
276 }
277}
278
279/*
280 * Get zpool property values.
281 */
282int
283spa_prop_get(spa_t *spa, nvlist_t **nvp)
284{
428870ff 285 objset_t *mos = spa->spa_meta_objset;
34dc7c2f
BB
286 zap_cursor_t zc;
287 zap_attribute_t za;
34dc7c2f
BB
288 int err;
289
b8d06fca 290 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE);
c28b2279
BB
291 if (err)
292 return err;
34dc7c2f 293
b128c09f
BB
294 mutex_enter(&spa->spa_props_lock);
295
34dc7c2f
BB
296 /*
297 * Get properties from the spa config.
298 */
299 spa_prop_get_config(spa, nvp);
300
34dc7c2f 301 /* If no pool property object, no more prop to get. */
428870ff 302 if (mos == NULL || spa->spa_pool_props_object == 0) {
34dc7c2f 303 mutex_exit(&spa->spa_props_lock);
c28b2279 304 goto out;
34dc7c2f
BB
305 }
306
307 /*
308 * Get properties from the MOS pool property object.
309 */
310 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
311 (err = zap_cursor_retrieve(&zc, &za)) == 0;
312 zap_cursor_advance(&zc)) {
313 uint64_t intval = 0;
314 char *strval = NULL;
315 zprop_source_t src = ZPROP_SRC_DEFAULT;
316 zpool_prop_t prop;
317
318 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
319 continue;
320
321 switch (za.za_integer_length) {
322 case 8:
323 /* integer property */
324 if (za.za_first_integer !=
325 zpool_prop_default_numeric(prop))
326 src = ZPROP_SRC_LOCAL;
327
328 if (prop == ZPOOL_PROP_BOOTFS) {
329 dsl_pool_t *dp;
330 dsl_dataset_t *ds = NULL;
331
332 dp = spa_get_dsl(spa);
13fe0198 333 dsl_pool_config_enter(dp, FTAG);
c65aa5b2
BB
334 if ((err = dsl_dataset_hold_obj(dp,
335 za.za_first_integer, FTAG, &ds))) {
13fe0198 336 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
337 break;
338 }
339
340 strval = kmem_alloc(
341 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
b8d06fca 342 KM_PUSHPAGE);
34dc7c2f 343 dsl_dataset_name(ds, strval);
b128c09f 344 dsl_dataset_rele(ds, FTAG);
13fe0198 345 dsl_pool_config_exit(dp, FTAG);
34dc7c2f
BB
346 } else {
347 strval = NULL;
348 intval = za.za_first_integer;
349 }
350
351 spa_prop_add_list(*nvp, prop, strval, intval, src);
352
353 if (strval != NULL)
354 kmem_free(strval,
355 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
356
357 break;
358
359 case 1:
360 /* string property */
b8d06fca 361 strval = kmem_alloc(za.za_num_integers, KM_PUSHPAGE);
34dc7c2f
BB
362 err = zap_lookup(mos, spa->spa_pool_props_object,
363 za.za_name, 1, za.za_num_integers, strval);
364 if (err) {
365 kmem_free(strval, za.za_num_integers);
366 break;
367 }
368 spa_prop_add_list(*nvp, prop, strval, 0, src);
369 kmem_free(strval, za.za_num_integers);
370 break;
371
372 default:
373 break;
374 }
375 }
376 zap_cursor_fini(&zc);
377 mutex_exit(&spa->spa_props_lock);
378out:
379 if (err && err != ENOENT) {
380 nvlist_free(*nvp);
381 *nvp = NULL;
382 return (err);
383 }
384
385 return (0);
386}
387
388/*
389 * Validate the given pool properties nvlist and modify the list
390 * for the property values to be set.
391 */
392static int
393spa_prop_validate(spa_t *spa, nvlist_t *props)
394{
395 nvpair_t *elem;
396 int error = 0, reset_bootfs = 0;
d4ed6673 397 uint64_t objnum = 0;
9ae529ec 398 boolean_t has_feature = B_FALSE;
34dc7c2f
BB
399
400 elem = NULL;
401 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
34dc7c2f 402 uint64_t intval;
9ae529ec
CS
403 char *strval, *slash, *check, *fname;
404 const char *propname = nvpair_name(elem);
405 zpool_prop_t prop = zpool_name_to_prop(propname);
406
407 switch ((int)prop) {
408 case ZPROP_INVAL:
409 if (!zpool_prop_feature(propname)) {
2e528b49 410 error = SET_ERROR(EINVAL);
9ae529ec
CS
411 break;
412 }
413
414 /*
415 * Sanitize the input.
416 */
417 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
2e528b49 418 error = SET_ERROR(EINVAL);
9ae529ec
CS
419 break;
420 }
421
422 if (nvpair_value_uint64(elem, &intval) != 0) {
2e528b49 423 error = SET_ERROR(EINVAL);
9ae529ec
CS
424 break;
425 }
34dc7c2f 426
9ae529ec 427 if (intval != 0) {
2e528b49 428 error = SET_ERROR(EINVAL);
9ae529ec
CS
429 break;
430 }
34dc7c2f 431
9ae529ec
CS
432 fname = strchr(propname, '@') + 1;
433 if (zfeature_lookup_name(fname, NULL) != 0) {
2e528b49 434 error = SET_ERROR(EINVAL);
9ae529ec
CS
435 break;
436 }
437
438 has_feature = B_TRUE;
439 break;
34dc7c2f 440
34dc7c2f
BB
441 case ZPOOL_PROP_VERSION:
442 error = nvpair_value_uint64(elem, &intval);
443 if (!error &&
9ae529ec
CS
444 (intval < spa_version(spa) ||
445 intval > SPA_VERSION_BEFORE_FEATURES ||
446 has_feature))
2e528b49 447 error = SET_ERROR(EINVAL);
34dc7c2f
BB
448 break;
449
450 case ZPOOL_PROP_DELEGATION:
451 case ZPOOL_PROP_AUTOREPLACE:
b128c09f 452 case ZPOOL_PROP_LISTSNAPS:
9babb374 453 case ZPOOL_PROP_AUTOEXPAND:
34dc7c2f
BB
454 error = nvpair_value_uint64(elem, &intval);
455 if (!error && intval > 1)
2e528b49 456 error = SET_ERROR(EINVAL);
34dc7c2f
BB
457 break;
458
459 case ZPOOL_PROP_BOOTFS:
9babb374
BB
460 /*
461 * If the pool version is less than SPA_VERSION_BOOTFS,
462 * or the pool is still being created (version == 0),
463 * the bootfs property cannot be set.
464 */
34dc7c2f 465 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
2e528b49 466 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
467 break;
468 }
469
470 /*
b128c09f 471 * Make sure the vdev config is bootable
34dc7c2f 472 */
b128c09f 473 if (!vdev_is_bootable(spa->spa_root_vdev)) {
2e528b49 474 error = SET_ERROR(ENOTSUP);
34dc7c2f
BB
475 break;
476 }
477
478 reset_bootfs = 1;
479
480 error = nvpair_value_string(elem, &strval);
481
482 if (!error) {
9ae529ec 483 objset_t *os;
b128c09f
BB
484 uint64_t compress;
485
34dc7c2f
BB
486 if (strval == NULL || strval[0] == '\0') {
487 objnum = zpool_prop_default_numeric(
488 ZPOOL_PROP_BOOTFS);
489 break;
490 }
491
c65aa5b2 492 if ((error = dmu_objset_hold(strval,FTAG,&os)))
34dc7c2f 493 break;
b128c09f 494
428870ff
BB
495 /* Must be ZPL and not gzip compressed. */
496
497 if (dmu_objset_type(os) != DMU_OST_ZFS) {
2e528b49 498 error = SET_ERROR(ENOTSUP);
13fe0198
MA
499 } else if ((error =
500 dsl_prop_get_int_ds(dmu_objset_ds(os),
b128c09f 501 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
13fe0198 502 &compress)) == 0 &&
b128c09f 503 !BOOTFS_COMPRESS_VALID(compress)) {
2e528b49 504 error = SET_ERROR(ENOTSUP);
b128c09f
BB
505 } else {
506 objnum = dmu_objset_id(os);
507 }
428870ff 508 dmu_objset_rele(os, FTAG);
34dc7c2f
BB
509 }
510 break;
b128c09f 511
34dc7c2f
BB
512 case ZPOOL_PROP_FAILUREMODE:
513 error = nvpair_value_uint64(elem, &intval);
514 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
515 intval > ZIO_FAILURE_MODE_PANIC))
2e528b49 516 error = SET_ERROR(EINVAL);
34dc7c2f
BB
517
518 /*
519 * This is a special case which only occurs when
520 * the pool has completely failed. This allows
521 * the user to change the in-core failmode property
522 * without syncing it out to disk (I/Os might
523 * currently be blocked). We do this by returning
524 * EIO to the caller (spa_prop_set) to trick it
525 * into thinking we encountered a property validation
526 * error.
527 */
b128c09f 528 if (!error && spa_suspended(spa)) {
34dc7c2f 529 spa->spa_failmode = intval;
2e528b49 530 error = SET_ERROR(EIO);
34dc7c2f
BB
531 }
532 break;
533
534 case ZPOOL_PROP_CACHEFILE:
535 if ((error = nvpair_value_string(elem, &strval)) != 0)
536 break;
537
538 if (strval[0] == '\0')
539 break;
540
541 if (strcmp(strval, "none") == 0)
542 break;
543
544 if (strval[0] != '/') {
2e528b49 545 error = SET_ERROR(EINVAL);
34dc7c2f
BB
546 break;
547 }
548
549 slash = strrchr(strval, '/');
550 ASSERT(slash != NULL);
551
552 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
553 strcmp(slash, "/..") == 0)
2e528b49 554 error = SET_ERROR(EINVAL);
34dc7c2f 555 break;
428870ff 556
d96eb2b1
DM
557 case ZPOOL_PROP_COMMENT:
558 if ((error = nvpair_value_string(elem, &strval)) != 0)
559 break;
560 for (check = strval; *check != '\0'; check++) {
561 if (!isprint(*check)) {
2e528b49 562 error = SET_ERROR(EINVAL);
d96eb2b1
DM
563 break;
564 }
565 check++;
566 }
567 if (strlen(strval) > ZPROP_MAX_COMMENT)
2e528b49 568 error = SET_ERROR(E2BIG);
d96eb2b1
DM
569 break;
570
428870ff
BB
571 case ZPOOL_PROP_DEDUPDITTO:
572 if (spa_version(spa) < SPA_VERSION_DEDUP)
2e528b49 573 error = SET_ERROR(ENOTSUP);
428870ff
BB
574 else
575 error = nvpair_value_uint64(elem, &intval);
576 if (error == 0 &&
577 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
2e528b49 578 error = SET_ERROR(EINVAL);
428870ff 579 break;
e75c13c3
BB
580
581 default:
582 break;
34dc7c2f
BB
583 }
584
585 if (error)
586 break;
587 }
588
589 if (!error && reset_bootfs) {
590 error = nvlist_remove(props,
591 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
592
593 if (!error) {
594 error = nvlist_add_uint64(props,
595 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
596 }
597 }
598
599 return (error);
600}
601
d164b209
BB
602void
603spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
604{
605 char *cachefile;
606 spa_config_dirent_t *dp;
607
608 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
609 &cachefile) != 0)
610 return;
611
612 dp = kmem_alloc(sizeof (spa_config_dirent_t),
b8d06fca 613 KM_PUSHPAGE);
d164b209
BB
614
615 if (cachefile[0] == '\0')
616 dp->scd_path = spa_strdup(spa_config_path);
617 else if (strcmp(cachefile, "none") == 0)
618 dp->scd_path = NULL;
619 else
620 dp->scd_path = spa_strdup(cachefile);
621
622 list_insert_head(&spa->spa_config_list, dp);
623 if (need_sync)
624 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
625}
626
34dc7c2f
BB
627int
628spa_prop_set(spa_t *spa, nvlist_t *nvp)
629{
630 int error;
9ae529ec 631 nvpair_t *elem = NULL;
d164b209 632 boolean_t need_sync = B_FALSE;
34dc7c2f
BB
633
634 if ((error = spa_prop_validate(spa, nvp)) != 0)
635 return (error);
636
d164b209 637 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
9ae529ec 638 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
d164b209 639
572e2857
BB
640 if (prop == ZPOOL_PROP_CACHEFILE ||
641 prop == ZPOOL_PROP_ALTROOT ||
642 prop == ZPOOL_PROP_READONLY)
d164b209
BB
643 continue;
644
9ae529ec
CS
645 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
646 uint64_t ver;
647
648 if (prop == ZPOOL_PROP_VERSION) {
649 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
650 } else {
651 ASSERT(zpool_prop_feature(nvpair_name(elem)));
652 ver = SPA_VERSION_FEATURES;
653 need_sync = B_TRUE;
654 }
655
656 /* Save time if the version is already set. */
657 if (ver == spa_version(spa))
658 continue;
659
660 /*
661 * In addition to the pool directory object, we might
662 * create the pool properties object, the features for
663 * read object, the features for write object, or the
664 * feature descriptions object.
665 */
13fe0198
MA
666 error = dsl_sync_task(spa->spa_name, NULL,
667 spa_sync_version, &ver, 6);
9ae529ec
CS
668 if (error)
669 return (error);
670 continue;
671 }
672
d164b209
BB
673 need_sync = B_TRUE;
674 break;
675 }
676
9ae529ec 677 if (need_sync) {
13fe0198
MA
678 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
679 nvp, 6));
9ae529ec
CS
680 }
681
682 return (0);
34dc7c2f
BB
683}
684
685/*
686 * If the bootfs property value is dsobj, clear it.
687 */
688void
689spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
690{
691 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
692 VERIFY(zap_remove(spa->spa_meta_objset,
693 spa->spa_pool_props_object,
694 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
695 spa->spa_bootfs = 0;
696 }
697}
698
3bc7e0fb
GW
699/*ARGSUSED*/
700static int
13fe0198 701spa_change_guid_check(void *arg, dmu_tx_t *tx)
3bc7e0fb 702{
13fe0198 703 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
704 vdev_t *rvd = spa->spa_root_vdev;
705 uint64_t vdev_state;
13fe0198 706 ASSERTV(uint64_t *newguid = arg);
3bc7e0fb
GW
707
708 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
709 vdev_state = rvd->vdev_state;
710 spa_config_exit(spa, SCL_STATE, FTAG);
711
712 if (vdev_state != VDEV_STATE_HEALTHY)
2e528b49 713 return (SET_ERROR(ENXIO));
3bc7e0fb
GW
714
715 ASSERT3U(spa_guid(spa), !=, *newguid);
716
717 return (0);
718}
719
720static void
13fe0198 721spa_change_guid_sync(void *arg, dmu_tx_t *tx)
3bc7e0fb 722{
13fe0198
MA
723 uint64_t *newguid = arg;
724 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3bc7e0fb
GW
725 uint64_t oldguid;
726 vdev_t *rvd = spa->spa_root_vdev;
727
728 oldguid = spa_guid(spa);
729
730 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
731 rvd->vdev_guid = *newguid;
732 rvd->vdev_guid_sum += (*newguid - oldguid);
733 vdev_config_dirty(rvd);
734 spa_config_exit(spa, SCL_STATE, FTAG);
735
6f1ffb06
MA
736 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
737 oldguid, *newguid);
3bc7e0fb
GW
738}
739
3541dc6d
GA
740/*
741 * Change the GUID for the pool. This is done so that we can later
742 * re-import a pool built from a clone of our own vdevs. We will modify
743 * the root vdev's guid, our own pool guid, and then mark all of our
744 * vdevs dirty. Note that we must make sure that all our vdevs are
745 * online when we do this, or else any vdevs that weren't present
746 * would be orphaned from our pool. We are also going to issue a
747 * sysevent to update any watchers.
748 */
749int
750spa_change_guid(spa_t *spa)
751{
3bc7e0fb
GW
752 int error;
753 uint64_t guid;
3541dc6d 754
3bc7e0fb
GW
755 mutex_enter(&spa_namespace_lock);
756 guid = spa_generate_guid(NULL);
3541dc6d 757
13fe0198
MA
758 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
759 spa_change_guid_sync, &guid, 5);
3541dc6d 760
3bc7e0fb
GW
761 if (error == 0) {
762 spa_config_sync(spa, B_FALSE, B_TRUE);
763 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_REGUID);
764 }
3541dc6d 765
3bc7e0fb 766 mutex_exit(&spa_namespace_lock);
3541dc6d 767
3bc7e0fb 768 return (error);
3541dc6d
GA
769}
770
34dc7c2f
BB
771/*
772 * ==========================================================================
773 * SPA state manipulation (open/create/destroy/import/export)
774 * ==========================================================================
775 */
776
777static int
778spa_error_entry_compare(const void *a, const void *b)
779{
780 spa_error_entry_t *sa = (spa_error_entry_t *)a;
781 spa_error_entry_t *sb = (spa_error_entry_t *)b;
782 int ret;
783
784 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
785 sizeof (zbookmark_t));
786
787 if (ret < 0)
788 return (-1);
789 else if (ret > 0)
790 return (1);
791 else
792 return (0);
793}
794
795/*
796 * Utility function which retrieves copies of the current logs and
797 * re-initializes them in the process.
798 */
799void
800spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
801{
802 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
803
804 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
805 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
806
807 avl_create(&spa->spa_errlist_scrub,
808 spa_error_entry_compare, sizeof (spa_error_entry_t),
809 offsetof(spa_error_entry_t, se_avl));
810 avl_create(&spa->spa_errlist_last,
811 spa_error_entry_compare, sizeof (spa_error_entry_t),
812 offsetof(spa_error_entry_t, se_avl));
813}
814
7ef5e54e
AL
815static void
816spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
34dc7c2f 817{
7ef5e54e
AL
818 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
819 enum zti_modes mode = ztip->zti_mode;
820 uint_t value = ztip->zti_value;
821 uint_t count = ztip->zti_count;
822 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
823 char name[32];
824 uint_t i, flags = 0;
428870ff 825 boolean_t batch = B_FALSE;
34dc7c2f 826
7ef5e54e
AL
827 if (mode == ZTI_MODE_NULL) {
828 tqs->stqs_count = 0;
829 tqs->stqs_taskq = NULL;
830 return;
831 }
428870ff 832
7ef5e54e 833 ASSERT3U(count, >, 0);
428870ff 834
7ef5e54e
AL
835 tqs->stqs_count = count;
836 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
428870ff 837
7ef5e54e
AL
838 for (i = 0; i < count; i++) {
839 taskq_t *tq;
428870ff 840
7ef5e54e
AL
841 switch (mode) {
842 case ZTI_MODE_FIXED:
843 ASSERT3U(value, >=, 1);
844 value = MAX(value, 1);
845 break;
846
847 case ZTI_MODE_BATCH:
848 batch = B_TRUE;
849 flags |= TASKQ_THREADS_CPU_PCT;
850 value = zio_taskq_batch_pct;
851 break;
852
853 case ZTI_MODE_ONLINE_PERCENT:
854 flags |= TASKQ_THREADS_CPU_PCT;
855 break;
856
857 default:
858 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
859 "spa_activate()",
860 zio_type_name[t], zio_taskq_types[q], mode, value);
861 break;
862 }
863
864 if (count > 1) {
865 (void) snprintf(name, sizeof (name), "%s_%s_%u",
866 zio_type_name[t], zio_taskq_types[q], i);
867 } else {
868 (void) snprintf(name, sizeof (name), "%s_%s",
869 zio_type_name[t], zio_taskq_types[q]);
870 }
871
872 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
873 if (batch)
874 flags |= TASKQ_DC_BATCH;
875
876 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
877 spa->spa_proc, zio_taskq_basedc, flags);
878 } else {
879 tq = taskq_create_proc(name, value, maxclsyspri, 50,
880 INT_MAX, spa->spa_proc, flags);
881 }
882
883 tqs->stqs_taskq[i] = tq;
884 }
885}
886
887static void
888spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
889{
890 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
891 uint_t i;
892
893 if (tqs->stqs_taskq == NULL) {
894 ASSERT3U(tqs->stqs_count, ==, 0);
895 return;
896 }
897
898 for (i = 0; i < tqs->stqs_count; i++) {
899 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
900 taskq_destroy(tqs->stqs_taskq[i]);
428870ff 901 }
34dc7c2f 902
7ef5e54e
AL
903 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
904 tqs->stqs_taskq = NULL;
905}
34dc7c2f 906
7ef5e54e
AL
907/*
908 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
909 * Note that a type may have multiple discrete taskqs to avoid lock contention
910 * on the taskq itself. In that case we choose which taskq at random by using
911 * the low bits of gethrtime().
912 */
913void
914spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
915 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
916{
917 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
918 taskq_t *tq;
919
920 ASSERT3P(tqs->stqs_taskq, !=, NULL);
921 ASSERT3U(tqs->stqs_count, !=, 0);
922
923 if (tqs->stqs_count == 1) {
924 tq = tqs->stqs_taskq[0];
925 } else {
c12936b1 926 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
428870ff 927 }
7ef5e54e
AL
928
929 taskq_dispatch_ent(tq, func, arg, flags, ent);
428870ff
BB
930}
931
044baf00
BB
932/*
933 * Same as spa_taskq_dispatch_ent() but block on the task until completion.
934 */
935void
936spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
937 task_func_t *func, void *arg, uint_t flags)
938{
939 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
940 taskq_t *tq;
941 taskqid_t id;
942
943 ASSERT3P(tqs->stqs_taskq, !=, NULL);
944 ASSERT3U(tqs->stqs_count, !=, 0);
945
946 if (tqs->stqs_count == 1) {
947 tq = tqs->stqs_taskq[0];
948 } else {
c12936b1 949 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
044baf00
BB
950 }
951
952 id = taskq_dispatch(tq, func, arg, flags);
953 if (id)
954 taskq_wait_id(tq, id);
955}
956
428870ff
BB
957static void
958spa_create_zio_taskqs(spa_t *spa)
959{
d6320ddb
BB
960 int t, q;
961
962 for (t = 0; t < ZIO_TYPES; t++) {
963 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 964 spa_taskqs_init(spa, t, q);
428870ff
BB
965 }
966 }
967}
9babb374 968
7b89a549 969#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
428870ff
BB
970static void
971spa_thread(void *arg)
972{
973 callb_cpr_t cprinfo;
9babb374 974
428870ff
BB
975 spa_t *spa = arg;
976 user_t *pu = PTOU(curproc);
9babb374 977
428870ff
BB
978 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
979 spa->spa_name);
9babb374 980
428870ff
BB
981 ASSERT(curproc != &p0);
982 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
983 "zpool-%s", spa->spa_name);
984 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
985
986 /* bind this thread to the requested psrset */
987 if (zio_taskq_psrset_bind != PS_NONE) {
988 pool_lock();
989 mutex_enter(&cpu_lock);
990 mutex_enter(&pidlock);
991 mutex_enter(&curproc->p_lock);
992
993 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
994 0, NULL, NULL) == 0) {
995 curthread->t_bind_pset = zio_taskq_psrset_bind;
996 } else {
997 cmn_err(CE_WARN,
998 "Couldn't bind process for zfs pool \"%s\" to "
999 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1000 }
1001
1002 mutex_exit(&curproc->p_lock);
1003 mutex_exit(&pidlock);
1004 mutex_exit(&cpu_lock);
1005 pool_unlock();
1006 }
1007
1008 if (zio_taskq_sysdc) {
1009 sysdc_thread_enter(curthread, 100, 0);
1010 }
1011
1012 spa->spa_proc = curproc;
1013 spa->spa_did = curthread->t_did;
1014
1015 spa_create_zio_taskqs(spa);
1016
1017 mutex_enter(&spa->spa_proc_lock);
1018 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1019
1020 spa->spa_proc_state = SPA_PROC_ACTIVE;
1021 cv_broadcast(&spa->spa_proc_cv);
1022
1023 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1024 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1025 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1026 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1027
1028 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1029 spa->spa_proc_state = SPA_PROC_GONE;
1030 spa->spa_proc = &p0;
1031 cv_broadcast(&spa->spa_proc_cv);
1032 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1033
1034 mutex_enter(&curproc->p_lock);
1035 lwp_exit();
1036}
1037#endif
1038
1039/*
1040 * Activate an uninitialized pool.
1041 */
1042static void
1043spa_activate(spa_t *spa, int mode)
1044{
1045 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1046
1047 spa->spa_state = POOL_STATE_ACTIVE;
1048 spa->spa_mode = mode;
1049
1050 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1051 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1052
1053 /* Try to create a covering process */
1054 mutex_enter(&spa->spa_proc_lock);
1055 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1056 ASSERT(spa->spa_proc == &p0);
1057 spa->spa_did = 0;
1058
7b89a549 1059#ifdef HAVE_SPA_THREAD
428870ff
BB
1060 /* Only create a process if we're going to be around a while. */
1061 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1062 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1063 NULL, 0) == 0) {
1064 spa->spa_proc_state = SPA_PROC_CREATED;
1065 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1066 cv_wait(&spa->spa_proc_cv,
1067 &spa->spa_proc_lock);
9babb374 1068 }
428870ff
BB
1069 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1070 ASSERT(spa->spa_proc != &p0);
1071 ASSERT(spa->spa_did != 0);
1072 } else {
1073#ifdef _KERNEL
1074 cmn_err(CE_WARN,
1075 "Couldn't create process for zfs pool \"%s\"\n",
1076 spa->spa_name);
1077#endif
b128c09f 1078 }
34dc7c2f 1079 }
7b89a549 1080#endif /* HAVE_SPA_THREAD */
428870ff
BB
1081 mutex_exit(&spa->spa_proc_lock);
1082
1083 /* If we didn't create a process, we need to create our taskqs. */
1084 if (spa->spa_proc == &p0) {
1085 spa_create_zio_taskqs(spa);
1086 }
34dc7c2f 1087
b128c09f
BB
1088 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1089 offsetof(vdev_t, vdev_config_dirty_node));
1090 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1091 offsetof(vdev_t, vdev_state_dirty_node));
34dc7c2f
BB
1092
1093 txg_list_create(&spa->spa_vdev_txg_list,
1094 offsetof(struct vdev, vdev_txg_node));
1095
1096 avl_create(&spa->spa_errlist_scrub,
1097 spa_error_entry_compare, sizeof (spa_error_entry_t),
1098 offsetof(spa_error_entry_t, se_avl));
1099 avl_create(&spa->spa_errlist_last,
1100 spa_error_entry_compare, sizeof (spa_error_entry_t),
1101 offsetof(spa_error_entry_t, se_avl));
1102}
1103
1104/*
1105 * Opposite of spa_activate().
1106 */
1107static void
1108spa_deactivate(spa_t *spa)
1109{
d6320ddb
BB
1110 int t, q;
1111
34dc7c2f
BB
1112 ASSERT(spa->spa_sync_on == B_FALSE);
1113 ASSERT(spa->spa_dsl_pool == NULL);
1114 ASSERT(spa->spa_root_vdev == NULL);
9babb374 1115 ASSERT(spa->spa_async_zio_root == NULL);
34dc7c2f
BB
1116 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1117
1118 txg_list_destroy(&spa->spa_vdev_txg_list);
1119
b128c09f
BB
1120 list_destroy(&spa->spa_config_dirty_list);
1121 list_destroy(&spa->spa_state_dirty_list);
34dc7c2f 1122
cc92e9d0
GW
1123 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
1124
d6320ddb
BB
1125 for (t = 0; t < ZIO_TYPES; t++) {
1126 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 1127 spa_taskqs_fini(spa, t, q);
b128c09f 1128 }
34dc7c2f
BB
1129 }
1130
1131 metaslab_class_destroy(spa->spa_normal_class);
1132 spa->spa_normal_class = NULL;
1133
1134 metaslab_class_destroy(spa->spa_log_class);
1135 spa->spa_log_class = NULL;
1136
1137 /*
1138 * If this was part of an import or the open otherwise failed, we may
1139 * still have errors left in the queues. Empty them just in case.
1140 */
1141 spa_errlog_drain(spa);
1142
1143 avl_destroy(&spa->spa_errlist_scrub);
1144 avl_destroy(&spa->spa_errlist_last);
1145
1146 spa->spa_state = POOL_STATE_UNINITIALIZED;
428870ff
BB
1147
1148 mutex_enter(&spa->spa_proc_lock);
1149 if (spa->spa_proc_state != SPA_PROC_NONE) {
1150 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1151 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1152 cv_broadcast(&spa->spa_proc_cv);
1153 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1154 ASSERT(spa->spa_proc != &p0);
1155 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1156 }
1157 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1158 spa->spa_proc_state = SPA_PROC_NONE;
1159 }
1160 ASSERT(spa->spa_proc == &p0);
1161 mutex_exit(&spa->spa_proc_lock);
1162
1163 /*
1164 * We want to make sure spa_thread() has actually exited the ZFS
1165 * module, so that the module can't be unloaded out from underneath
1166 * it.
1167 */
1168 if (spa->spa_did != 0) {
1169 thread_join(spa->spa_did);
1170 spa->spa_did = 0;
1171 }
34dc7c2f
BB
1172}
1173
1174/*
1175 * Verify a pool configuration, and construct the vdev tree appropriately. This
1176 * will create all the necessary vdevs in the appropriate layout, with each vdev
1177 * in the CLOSED state. This will prep the pool before open/creation/import.
1178 * All vdev validation is done by the vdev_alloc() routine.
1179 */
1180static int
1181spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1182 uint_t id, int atype)
1183{
1184 nvlist_t **child;
9babb374 1185 uint_t children;
34dc7c2f 1186 int error;
d6320ddb 1187 int c;
34dc7c2f
BB
1188
1189 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1190 return (error);
1191
1192 if ((*vdp)->vdev_ops->vdev_op_leaf)
1193 return (0);
1194
b128c09f
BB
1195 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1196 &child, &children);
1197
1198 if (error == ENOENT)
1199 return (0);
1200
1201 if (error) {
34dc7c2f
BB
1202 vdev_free(*vdp);
1203 *vdp = NULL;
2e528b49 1204 return (SET_ERROR(EINVAL));
34dc7c2f
BB
1205 }
1206
d6320ddb 1207 for (c = 0; c < children; c++) {
34dc7c2f
BB
1208 vdev_t *vd;
1209 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1210 atype)) != 0) {
1211 vdev_free(*vdp);
1212 *vdp = NULL;
1213 return (error);
1214 }
1215 }
1216
1217 ASSERT(*vdp != NULL);
1218
1219 return (0);
1220}
1221
1222/*
1223 * Opposite of spa_load().
1224 */
1225static void
1226spa_unload(spa_t *spa)
1227{
1228 int i;
1229
b128c09f
BB
1230 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1231
34dc7c2f
BB
1232 /*
1233 * Stop async tasks.
1234 */
1235 spa_async_suspend(spa);
1236
1237 /*
1238 * Stop syncing.
1239 */
1240 if (spa->spa_sync_on) {
1241 txg_sync_stop(spa->spa_dsl_pool);
1242 spa->spa_sync_on = B_FALSE;
1243 }
1244
1245 /*
b128c09f 1246 * Wait for any outstanding async I/O to complete.
34dc7c2f 1247 */
9babb374
BB
1248 if (spa->spa_async_zio_root != NULL) {
1249 (void) zio_wait(spa->spa_async_zio_root);
1250 spa->spa_async_zio_root = NULL;
1251 }
34dc7c2f 1252
428870ff
BB
1253 bpobj_close(&spa->spa_deferred_bpobj);
1254
34dc7c2f
BB
1255 /*
1256 * Close the dsl pool.
1257 */
1258 if (spa->spa_dsl_pool) {
1259 dsl_pool_close(spa->spa_dsl_pool);
1260 spa->spa_dsl_pool = NULL;
428870ff 1261 spa->spa_meta_objset = NULL;
34dc7c2f
BB
1262 }
1263
428870ff
BB
1264 ddt_unload(spa);
1265
fb5f0bc8
BB
1266 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1267
1268 /*
1269 * Drop and purge level 2 cache
1270 */
1271 spa_l2cache_drop(spa);
1272
34dc7c2f
BB
1273 /*
1274 * Close all vdevs.
1275 */
1276 if (spa->spa_root_vdev)
1277 vdev_free(spa->spa_root_vdev);
1278 ASSERT(spa->spa_root_vdev == NULL);
1279
1280 for (i = 0; i < spa->spa_spares.sav_count; i++)
1281 vdev_free(spa->spa_spares.sav_vdevs[i]);
1282 if (spa->spa_spares.sav_vdevs) {
1283 kmem_free(spa->spa_spares.sav_vdevs,
1284 spa->spa_spares.sav_count * sizeof (void *));
1285 spa->spa_spares.sav_vdevs = NULL;
1286 }
1287 if (spa->spa_spares.sav_config) {
1288 nvlist_free(spa->spa_spares.sav_config);
1289 spa->spa_spares.sav_config = NULL;
1290 }
b128c09f 1291 spa->spa_spares.sav_count = 0;
34dc7c2f 1292
5ffb9d1d
GW
1293 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1294 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
34dc7c2f 1295 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
5ffb9d1d 1296 }
34dc7c2f
BB
1297 if (spa->spa_l2cache.sav_vdevs) {
1298 kmem_free(spa->spa_l2cache.sav_vdevs,
1299 spa->spa_l2cache.sav_count * sizeof (void *));
1300 spa->spa_l2cache.sav_vdevs = NULL;
1301 }
1302 if (spa->spa_l2cache.sav_config) {
1303 nvlist_free(spa->spa_l2cache.sav_config);
1304 spa->spa_l2cache.sav_config = NULL;
1305 }
b128c09f 1306 spa->spa_l2cache.sav_count = 0;
34dc7c2f
BB
1307
1308 spa->spa_async_suspended = 0;
fb5f0bc8 1309
d96eb2b1
DM
1310 if (spa->spa_comment != NULL) {
1311 spa_strfree(spa->spa_comment);
1312 spa->spa_comment = NULL;
1313 }
1314
fb5f0bc8 1315 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
1316}
1317
1318/*
1319 * Load (or re-load) the current list of vdevs describing the active spares for
1320 * this pool. When this is called, we have some form of basic information in
1321 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1322 * then re-generate a more complete list including status information.
1323 */
1324static void
1325spa_load_spares(spa_t *spa)
1326{
1327 nvlist_t **spares;
1328 uint_t nspares;
1329 int i;
1330 vdev_t *vd, *tvd;
1331
b128c09f
BB
1332 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1333
34dc7c2f
BB
1334 /*
1335 * First, close and free any existing spare vdevs.
1336 */
1337 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1338 vd = spa->spa_spares.sav_vdevs[i];
1339
1340 /* Undo the call to spa_activate() below */
b128c09f
BB
1341 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1342 B_FALSE)) != NULL && tvd->vdev_isspare)
34dc7c2f
BB
1343 spa_spare_remove(tvd);
1344 vdev_close(vd);
1345 vdev_free(vd);
1346 }
1347
1348 if (spa->spa_spares.sav_vdevs)
1349 kmem_free(spa->spa_spares.sav_vdevs,
1350 spa->spa_spares.sav_count * sizeof (void *));
1351
1352 if (spa->spa_spares.sav_config == NULL)
1353 nspares = 0;
1354 else
1355 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1356 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1357
1358 spa->spa_spares.sav_count = (int)nspares;
1359 spa->spa_spares.sav_vdevs = NULL;
1360
1361 if (nspares == 0)
1362 return;
1363
1364 /*
1365 * Construct the array of vdevs, opening them to get status in the
1366 * process. For each spare, there is potentially two different vdev_t
1367 * structures associated with it: one in the list of spares (used only
1368 * for basic validation purposes) and one in the active vdev
1369 * configuration (if it's spared in). During this phase we open and
1370 * validate each vdev on the spare list. If the vdev also exists in the
1371 * active configuration, then we also mark this vdev as an active spare.
1372 */
1373 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
b8d06fca 1374 KM_PUSHPAGE);
34dc7c2f
BB
1375 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1376 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1377 VDEV_ALLOC_SPARE) == 0);
1378 ASSERT(vd != NULL);
1379
1380 spa->spa_spares.sav_vdevs[i] = vd;
1381
b128c09f
BB
1382 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1383 B_FALSE)) != NULL) {
34dc7c2f
BB
1384 if (!tvd->vdev_isspare)
1385 spa_spare_add(tvd);
1386
1387 /*
1388 * We only mark the spare active if we were successfully
1389 * able to load the vdev. Otherwise, importing a pool
1390 * with a bad active spare would result in strange
1391 * behavior, because multiple pool would think the spare
1392 * is actively in use.
1393 *
1394 * There is a vulnerability here to an equally bizarre
1395 * circumstance, where a dead active spare is later
1396 * brought back to life (onlined or otherwise). Given
1397 * the rarity of this scenario, and the extra complexity
1398 * it adds, we ignore the possibility.
1399 */
1400 if (!vdev_is_dead(tvd))
1401 spa_spare_activate(tvd);
1402 }
1403
b128c09f 1404 vd->vdev_top = vd;
9babb374 1405 vd->vdev_aux = &spa->spa_spares;
b128c09f 1406
34dc7c2f
BB
1407 if (vdev_open(vd) != 0)
1408 continue;
1409
34dc7c2f
BB
1410 if (vdev_validate_aux(vd) == 0)
1411 spa_spare_add(vd);
1412 }
1413
1414 /*
1415 * Recompute the stashed list of spares, with status information
1416 * this time.
1417 */
1418 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1419 DATA_TYPE_NVLIST_ARRAY) == 0);
1420
1421 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
b8d06fca 1422 KM_PUSHPAGE);
34dc7c2f
BB
1423 for (i = 0; i < spa->spa_spares.sav_count; i++)
1424 spares[i] = vdev_config_generate(spa,
428870ff 1425 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
34dc7c2f
BB
1426 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1427 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1428 for (i = 0; i < spa->spa_spares.sav_count; i++)
1429 nvlist_free(spares[i]);
1430 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1431}
1432
1433/*
1434 * Load (or re-load) the current list of vdevs describing the active l2cache for
1435 * this pool. When this is called, we have some form of basic information in
1436 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1437 * then re-generate a more complete list including status information.
1438 * Devices which are already active have their details maintained, and are
1439 * not re-opened.
1440 */
1441static void
1442spa_load_l2cache(spa_t *spa)
1443{
1444 nvlist_t **l2cache;
1445 uint_t nl2cache;
1446 int i, j, oldnvdevs;
9babb374 1447 uint64_t guid;
a117a6d6 1448 vdev_t *vd, **oldvdevs, **newvdevs;
34dc7c2f
BB
1449 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1450
b128c09f
BB
1451 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1452
34dc7c2f
BB
1453 if (sav->sav_config != NULL) {
1454 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1455 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
b8d06fca 1456 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
1457 } else {
1458 nl2cache = 0;
a117a6d6 1459 newvdevs = NULL;
34dc7c2f
BB
1460 }
1461
1462 oldvdevs = sav->sav_vdevs;
1463 oldnvdevs = sav->sav_count;
1464 sav->sav_vdevs = NULL;
1465 sav->sav_count = 0;
1466
1467 /*
1468 * Process new nvlist of vdevs.
1469 */
1470 for (i = 0; i < nl2cache; i++) {
1471 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1472 &guid) == 0);
1473
1474 newvdevs[i] = NULL;
1475 for (j = 0; j < oldnvdevs; j++) {
1476 vd = oldvdevs[j];
1477 if (vd != NULL && guid == vd->vdev_guid) {
1478 /*
1479 * Retain previous vdev for add/remove ops.
1480 */
1481 newvdevs[i] = vd;
1482 oldvdevs[j] = NULL;
1483 break;
1484 }
1485 }
1486
1487 if (newvdevs[i] == NULL) {
1488 /*
1489 * Create new vdev
1490 */
1491 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1492 VDEV_ALLOC_L2CACHE) == 0);
1493 ASSERT(vd != NULL);
1494 newvdevs[i] = vd;
1495
1496 /*
1497 * Commit this vdev as an l2cache device,
1498 * even if it fails to open.
1499 */
1500 spa_l2cache_add(vd);
1501
b128c09f
BB
1502 vd->vdev_top = vd;
1503 vd->vdev_aux = sav;
1504
1505 spa_l2cache_activate(vd);
1506
34dc7c2f
BB
1507 if (vdev_open(vd) != 0)
1508 continue;
1509
34dc7c2f
BB
1510 (void) vdev_validate_aux(vd);
1511
9babb374
BB
1512 if (!vdev_is_dead(vd))
1513 l2arc_add_vdev(spa, vd);
34dc7c2f
BB
1514 }
1515 }
1516
1517 /*
1518 * Purge vdevs that were dropped
1519 */
1520 for (i = 0; i < oldnvdevs; i++) {
1521 uint64_t pool;
1522
1523 vd = oldvdevs[i];
1524 if (vd != NULL) {
5ffb9d1d
GW
1525 ASSERT(vd->vdev_isl2cache);
1526
fb5f0bc8
BB
1527 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1528 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 1529 l2arc_remove_vdev(vd);
5ffb9d1d
GW
1530 vdev_clear_stats(vd);
1531 vdev_free(vd);
34dc7c2f
BB
1532 }
1533 }
1534
1535 if (oldvdevs)
1536 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1537
1538 if (sav->sav_config == NULL)
1539 goto out;
1540
1541 sav->sav_vdevs = newvdevs;
1542 sav->sav_count = (int)nl2cache;
1543
1544 /*
1545 * Recompute the stashed list of l2cache devices, with status
1546 * information this time.
1547 */
1548 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1549 DATA_TYPE_NVLIST_ARRAY) == 0);
1550
b8d06fca 1551 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
1552 for (i = 0; i < sav->sav_count; i++)
1553 l2cache[i] = vdev_config_generate(spa,
428870ff 1554 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
1555 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1556 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1557out:
1558 for (i = 0; i < sav->sav_count; i++)
1559 nvlist_free(l2cache[i]);
1560 if (sav->sav_count)
1561 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1562}
1563
1564static int
1565load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1566{
1567 dmu_buf_t *db;
1568 char *packed = NULL;
1569 size_t nvsize = 0;
1570 int error;
1571 *value = NULL;
1572
c3275b56
BB
1573 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1574 if (error)
1575 return (error);
1576
34dc7c2f
BB
1577 nvsize = *(uint64_t *)db->db_data;
1578 dmu_buf_rele(db, FTAG);
1579
b8d06fca 1580 packed = kmem_alloc(nvsize, KM_PUSHPAGE | KM_NODEBUG);
9babb374
BB
1581 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1582 DMU_READ_PREFETCH);
34dc7c2f
BB
1583 if (error == 0)
1584 error = nvlist_unpack(packed, nvsize, value, 0);
1585 kmem_free(packed, nvsize);
1586
1587 return (error);
1588}
1589
1590/*
1591 * Checks to see if the given vdev could not be opened, in which case we post a
1592 * sysevent to notify the autoreplace code that the device has been removed.
1593 */
1594static void
1595spa_check_removed(vdev_t *vd)
1596{
d6320ddb
BB
1597 int c;
1598
1599 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
1600 spa_check_removed(vd->vdev_child[c]);
1601
7011fb60
YP
1602 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1603 !vd->vdev_ishole) {
26685276
BB
1604 zfs_ereport_post(FM_EREPORT_RESOURCE_AUTOREPLACE,
1605 vd->vdev_spa, vd, NULL, 0, 0);
1606 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_CHECK);
34dc7c2f
BB
1607 }
1608}
1609
9babb374 1610/*
572e2857 1611 * Validate the current config against the MOS config
9babb374 1612 */
572e2857
BB
1613static boolean_t
1614spa_config_valid(spa_t *spa, nvlist_t *config)
9babb374 1615{
572e2857
BB
1616 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1617 nvlist_t *nv;
d6320ddb 1618 int c, i;
572e2857
BB
1619
1620 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1621
1622 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1623 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1624
1625 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
9babb374 1626
428870ff 1627 /*
572e2857
BB
1628 * If we're doing a normal import, then build up any additional
1629 * diagnostic information about missing devices in this config.
1630 * We'll pass this up to the user for further processing.
428870ff 1631 */
572e2857
BB
1632 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1633 nvlist_t **child, *nv;
1634 uint64_t idx = 0;
1635
1636 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
b8d06fca
RY
1637 KM_PUSHPAGE);
1638 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
572e2857 1639
d6320ddb 1640 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1641 vdev_t *tvd = rvd->vdev_child[c];
1642 vdev_t *mtvd = mrvd->vdev_child[c];
1643
1644 if (tvd->vdev_ops == &vdev_missing_ops &&
1645 mtvd->vdev_ops != &vdev_missing_ops &&
1646 mtvd->vdev_islog)
1647 child[idx++] = vdev_config_generate(spa, mtvd,
1648 B_FALSE, 0);
1649 }
9babb374 1650
572e2857
BB
1651 if (idx) {
1652 VERIFY(nvlist_add_nvlist_array(nv,
1653 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1654 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1655 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1656
d6320ddb 1657 for (i = 0; i < idx; i++)
572e2857
BB
1658 nvlist_free(child[i]);
1659 }
1660 nvlist_free(nv);
1661 kmem_free(child, rvd->vdev_children * sizeof (char **));
1662 }
1663
1664 /*
1665 * Compare the root vdev tree with the information we have
1666 * from the MOS config (mrvd). Check each top-level vdev
1667 * with the corresponding MOS config top-level (mtvd).
1668 */
d6320ddb 1669 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1670 vdev_t *tvd = rvd->vdev_child[c];
1671 vdev_t *mtvd = mrvd->vdev_child[c];
1672
1673 /*
1674 * Resolve any "missing" vdevs in the current configuration.
1675 * If we find that the MOS config has more accurate information
1676 * about the top-level vdev then use that vdev instead.
1677 */
1678 if (tvd->vdev_ops == &vdev_missing_ops &&
1679 mtvd->vdev_ops != &vdev_missing_ops) {
1680
1681 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1682 continue;
1683
1684 /*
1685 * Device specific actions.
1686 */
1687 if (mtvd->vdev_islog) {
1688 spa_set_log_state(spa, SPA_LOG_CLEAR);
1689 } else {
1690 /*
1691 * XXX - once we have 'readonly' pool
1692 * support we should be able to handle
1693 * missing data devices by transitioning
1694 * the pool to readonly.
1695 */
1696 continue;
1697 }
1698
1699 /*
1700 * Swap the missing vdev with the data we were
1701 * able to obtain from the MOS config.
1702 */
1703 vdev_remove_child(rvd, tvd);
1704 vdev_remove_child(mrvd, mtvd);
1705
1706 vdev_add_child(rvd, mtvd);
1707 vdev_add_child(mrvd, tvd);
1708
1709 spa_config_exit(spa, SCL_ALL, FTAG);
1710 vdev_load(mtvd);
1711 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1712
1713 vdev_reopen(rvd);
1714 } else if (mtvd->vdev_islog) {
1715 /*
1716 * Load the slog device's state from the MOS config
1717 * since it's possible that the label does not
1718 * contain the most up-to-date information.
1719 */
1720 vdev_load_log_state(tvd, mtvd);
1721 vdev_reopen(tvd);
1722 }
9babb374 1723 }
572e2857 1724 vdev_free(mrvd);
428870ff 1725 spa_config_exit(spa, SCL_ALL, FTAG);
572e2857
BB
1726
1727 /*
1728 * Ensure we were able to validate the config.
1729 */
1730 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
9babb374
BB
1731}
1732
b128c09f
BB
1733/*
1734 * Check for missing log devices
1735 */
13fe0198 1736static boolean_t
b128c09f
BB
1737spa_check_logs(spa_t *spa)
1738{
13fe0198
MA
1739 boolean_t rv = B_FALSE;
1740
b128c09f 1741 switch (spa->spa_log_state) {
e75c13c3
BB
1742 default:
1743 break;
b128c09f
BB
1744 case SPA_LOG_MISSING:
1745 /* need to recheck in case slog has been restored */
1746 case SPA_LOG_UNKNOWN:
13fe0198
MA
1747 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1748 NULL, DS_FIND_CHILDREN) != 0);
1749 if (rv)
428870ff 1750 spa_set_log_state(spa, SPA_LOG_MISSING);
b128c09f 1751 break;
b128c09f 1752 }
13fe0198 1753 return (rv);
b128c09f
BB
1754}
1755
428870ff
BB
1756static boolean_t
1757spa_passivate_log(spa_t *spa)
34dc7c2f 1758{
428870ff
BB
1759 vdev_t *rvd = spa->spa_root_vdev;
1760 boolean_t slog_found = B_FALSE;
d6320ddb 1761 int c;
b128c09f 1762
428870ff 1763 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
fb5f0bc8 1764
428870ff
BB
1765 if (!spa_has_slogs(spa))
1766 return (B_FALSE);
34dc7c2f 1767
d6320ddb 1768 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1769 vdev_t *tvd = rvd->vdev_child[c];
1770 metaslab_group_t *mg = tvd->vdev_mg;
34dc7c2f 1771
428870ff
BB
1772 if (tvd->vdev_islog) {
1773 metaslab_group_passivate(mg);
1774 slog_found = B_TRUE;
1775 }
34dc7c2f
BB
1776 }
1777
428870ff
BB
1778 return (slog_found);
1779}
34dc7c2f 1780
428870ff
BB
1781static void
1782spa_activate_log(spa_t *spa)
1783{
1784 vdev_t *rvd = spa->spa_root_vdev;
d6320ddb 1785 int c;
34dc7c2f 1786
428870ff
BB
1787 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1788
d6320ddb 1789 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1790 vdev_t *tvd = rvd->vdev_child[c];
1791 metaslab_group_t *mg = tvd->vdev_mg;
1792
1793 if (tvd->vdev_islog)
1794 metaslab_group_activate(mg);
34dc7c2f 1795 }
428870ff 1796}
34dc7c2f 1797
428870ff
BB
1798int
1799spa_offline_log(spa_t *spa)
1800{
13fe0198 1801 int error;
9babb374 1802
13fe0198
MA
1803 error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1804 NULL, DS_FIND_CHILDREN);
1805 if (error == 0) {
428870ff
BB
1806 /*
1807 * We successfully offlined the log device, sync out the
1808 * current txg so that the "stubby" block can be removed
1809 * by zil_sync().
1810 */
1811 txg_wait_synced(spa->spa_dsl_pool, 0);
1812 }
1813 return (error);
1814}
34dc7c2f 1815
428870ff
BB
1816static void
1817spa_aux_check_removed(spa_aux_vdev_t *sav)
1818{
d6320ddb
BB
1819 int i;
1820
1821 for (i = 0; i < sav->sav_count; i++)
428870ff
BB
1822 spa_check_removed(sav->sav_vdevs[i]);
1823}
34dc7c2f 1824
428870ff
BB
1825void
1826spa_claim_notify(zio_t *zio)
1827{
1828 spa_t *spa = zio->io_spa;
34dc7c2f 1829
428870ff
BB
1830 if (zio->io_error)
1831 return;
34dc7c2f 1832
428870ff
BB
1833 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1834 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1835 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1836 mutex_exit(&spa->spa_props_lock);
1837}
34dc7c2f 1838
428870ff
BB
1839typedef struct spa_load_error {
1840 uint64_t sle_meta_count;
1841 uint64_t sle_data_count;
1842} spa_load_error_t;
34dc7c2f 1843
428870ff
BB
1844static void
1845spa_load_verify_done(zio_t *zio)
1846{
1847 blkptr_t *bp = zio->io_bp;
1848 spa_load_error_t *sle = zio->io_private;
1849 dmu_object_type_t type = BP_GET_TYPE(bp);
1850 int error = zio->io_error;
34dc7c2f 1851
428870ff 1852 if (error) {
9ae529ec 1853 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
428870ff
BB
1854 type != DMU_OT_INTENT_LOG)
1855 atomic_add_64(&sle->sle_meta_count, 1);
1856 else
1857 atomic_add_64(&sle->sle_data_count, 1);
34dc7c2f 1858 }
428870ff
BB
1859 zio_data_buf_free(zio->io_data, zio->io_size);
1860}
34dc7c2f 1861
428870ff
BB
1862/*ARGSUSED*/
1863static int
1864spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
294f6806 1865 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
428870ff
BB
1866{
1867 if (bp != NULL) {
1868 zio_t *rio = arg;
1869 size_t size = BP_GET_PSIZE(bp);
1870 void *data = zio_data_buf_alloc(size);
34dc7c2f 1871
428870ff
BB
1872 zio_nowait(zio_read(rio, spa, bp, data, size,
1873 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1874 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1875 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
34dc7c2f 1876 }
428870ff
BB
1877 return (0);
1878}
34dc7c2f 1879
428870ff
BB
1880static int
1881spa_load_verify(spa_t *spa)
1882{
1883 zio_t *rio;
1884 spa_load_error_t sle = { 0 };
1885 zpool_rewind_policy_t policy;
1886 boolean_t verify_ok = B_FALSE;
1887 int error;
34dc7c2f 1888
428870ff 1889 zpool_get_rewind_policy(spa->spa_config, &policy);
34dc7c2f 1890
428870ff
BB
1891 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1892 return (0);
34dc7c2f 1893
428870ff
BB
1894 rio = zio_root(spa, NULL, &sle,
1895 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
34dc7c2f 1896
428870ff
BB
1897 error = traverse_pool(spa, spa->spa_verify_min_txg,
1898 TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio);
1899
1900 (void) zio_wait(rio);
1901
1902 spa->spa_load_meta_errors = sle.sle_meta_count;
1903 spa->spa_load_data_errors = sle.sle_data_count;
1904
1905 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1906 sle.sle_data_count <= policy.zrp_maxdata) {
572e2857
BB
1907 int64_t loss = 0;
1908
428870ff
BB
1909 verify_ok = B_TRUE;
1910 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1911 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
572e2857
BB
1912
1913 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1914 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1915 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1916 VERIFY(nvlist_add_int64(spa->spa_load_info,
1917 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1918 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1919 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
428870ff
BB
1920 } else {
1921 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1922 }
1923
1924 if (error) {
1925 if (error != ENXIO && error != EIO)
2e528b49 1926 error = SET_ERROR(EIO);
428870ff
BB
1927 return (error);
1928 }
1929
1930 return (verify_ok ? 0 : EIO);
1931}
1932
1933/*
1934 * Find a value in the pool props object.
1935 */
1936static void
1937spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1938{
1939 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1940 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1941}
1942
1943/*
1944 * Find a value in the pool directory object.
1945 */
1946static int
1947spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1948{
1949 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1950 name, sizeof (uint64_t), 1, val));
1951}
1952
1953static int
1954spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
1955{
1956 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
1957 return (err);
1958}
1959
1960/*
1961 * Fix up config after a partly-completed split. This is done with the
1962 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
1963 * pool have that entry in their config, but only the splitting one contains
1964 * a list of all the guids of the vdevs that are being split off.
1965 *
1966 * This function determines what to do with that list: either rejoin
1967 * all the disks to the pool, or complete the splitting process. To attempt
1968 * the rejoin, each disk that is offlined is marked online again, and
1969 * we do a reopen() call. If the vdev label for every disk that was
1970 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
1971 * then we call vdev_split() on each disk, and complete the split.
1972 *
1973 * Otherwise we leave the config alone, with all the vdevs in place in
1974 * the original pool.
1975 */
1976static void
1977spa_try_repair(spa_t *spa, nvlist_t *config)
1978{
1979 uint_t extracted;
1980 uint64_t *glist;
1981 uint_t i, gcount;
1982 nvlist_t *nvl;
1983 vdev_t **vd;
1984 boolean_t attempt_reopen;
1985
1986 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
1987 return;
1988
1989 /* check that the config is complete */
1990 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
1991 &glist, &gcount) != 0)
1992 return;
1993
b8d06fca 1994 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_PUSHPAGE);
428870ff
BB
1995
1996 /* attempt to online all the vdevs & validate */
1997 attempt_reopen = B_TRUE;
1998 for (i = 0; i < gcount; i++) {
1999 if (glist[i] == 0) /* vdev is hole */
2000 continue;
2001
2002 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2003 if (vd[i] == NULL) {
2004 /*
2005 * Don't bother attempting to reopen the disks;
2006 * just do the split.
2007 */
2008 attempt_reopen = B_FALSE;
2009 } else {
2010 /* attempt to re-online it */
2011 vd[i]->vdev_offline = B_FALSE;
2012 }
2013 }
2014
2015 if (attempt_reopen) {
2016 vdev_reopen(spa->spa_root_vdev);
2017
2018 /* check each device to see what state it's in */
2019 for (extracted = 0, i = 0; i < gcount; i++) {
2020 if (vd[i] != NULL &&
2021 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2022 break;
2023 ++extracted;
2024 }
2025 }
2026
2027 /*
2028 * If every disk has been moved to the new pool, or if we never
2029 * even attempted to look at them, then we split them off for
2030 * good.
2031 */
2032 if (!attempt_reopen || gcount == extracted) {
2033 for (i = 0; i < gcount; i++)
2034 if (vd[i] != NULL)
2035 vdev_split(vd[i]);
2036 vdev_reopen(spa->spa_root_vdev);
2037 }
2038
2039 kmem_free(vd, gcount * sizeof (vdev_t *));
2040}
2041
2042static int
2043spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2044 boolean_t mosconfig)
2045{
2046 nvlist_t *config = spa->spa_config;
2047 char *ereport = FM_EREPORT_ZFS_POOL;
d96eb2b1 2048 char *comment;
428870ff
BB
2049 int error;
2050 uint64_t pool_guid;
2051 nvlist_t *nvl;
2052
2053 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2e528b49 2054 return (SET_ERROR(EINVAL));
428870ff 2055
d96eb2b1
DM
2056 ASSERT(spa->spa_comment == NULL);
2057 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2058 spa->spa_comment = spa_strdup(comment);
2059
428870ff
BB
2060 /*
2061 * Versioning wasn't explicitly added to the label until later, so if
2062 * it's not present treat it as the initial version.
2063 */
2064 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2065 &spa->spa_ubsync.ub_version) != 0)
2066 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2067
2068 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2069 &spa->spa_config_txg);
2070
2071 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2072 spa_guid_exists(pool_guid, 0)) {
2e528b49 2073 error = SET_ERROR(EEXIST);
428870ff 2074 } else {
3541dc6d 2075 spa->spa_config_guid = pool_guid;
428870ff
BB
2076
2077 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2078 &nvl) == 0) {
2079 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
b8d06fca 2080 KM_PUSHPAGE) == 0);
428870ff
BB
2081 }
2082
9ae529ec
CS
2083 nvlist_free(spa->spa_load_info);
2084 spa->spa_load_info = fnvlist_alloc();
2085
572e2857 2086 gethrestime(&spa->spa_loaded_ts);
428870ff
BB
2087 error = spa_load_impl(spa, pool_guid, config, state, type,
2088 mosconfig, &ereport);
2089 }
2090
2091 spa->spa_minref = refcount_count(&spa->spa_refcount);
572e2857
BB
2092 if (error) {
2093 if (error != EEXIST) {
2094 spa->spa_loaded_ts.tv_sec = 0;
2095 spa->spa_loaded_ts.tv_nsec = 0;
2096 }
2097 if (error != EBADF) {
2098 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2099 }
2100 }
428870ff
BB
2101 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2102 spa->spa_ena = 0;
2103
2104 return (error);
2105}
2106
2107/*
2108 * Load an existing storage pool, using the pool's builtin spa_config as a
2109 * source of configuration information.
2110 */
bf701a83
BB
2111__attribute__((always_inline))
2112static inline int
428870ff
BB
2113spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2114 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2115 char **ereport)
2116{
2117 int error = 0;
2118 nvlist_t *nvroot = NULL;
9ae529ec 2119 nvlist_t *label;
428870ff
BB
2120 vdev_t *rvd;
2121 uberblock_t *ub = &spa->spa_uberblock;
572e2857 2122 uint64_t children, config_cache_txg = spa->spa_config_txg;
428870ff
BB
2123 int orig_mode = spa->spa_mode;
2124 int parse;
2125 uint64_t obj;
9ae529ec 2126 boolean_t missing_feat_write = B_FALSE;
428870ff
BB
2127
2128 /*
2129 * If this is an untrusted config, access the pool in read-only mode.
2130 * This prevents things like resilvering recently removed devices.
2131 */
2132 if (!mosconfig)
2133 spa->spa_mode = FREAD;
2134
2135 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2136
2137 spa->spa_load_state = state;
2138
2139 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2e528b49 2140 return (SET_ERROR(EINVAL));
428870ff
BB
2141
2142 parse = (type == SPA_IMPORT_EXISTING ?
2143 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2144
2145 /*
2146 * Create "The Godfather" zio to hold all async IOs
2147 */
2148 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2149 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2150
2151 /*
2152 * Parse the configuration into a vdev tree. We explicitly set the
2153 * value that will be returned by spa_version() since parsing the
2154 * configuration requires knowing the version number.
2155 */
2156 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2157 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2158 spa_config_exit(spa, SCL_ALL, FTAG);
2159
2160 if (error != 0)
2161 return (error);
2162
2163 ASSERT(spa->spa_root_vdev == rvd);
2164
2165 if (type != SPA_IMPORT_ASSEMBLE) {
2166 ASSERT(spa_guid(spa) == pool_guid);
2167 }
2168
2169 /*
2170 * Try to open all vdevs, loading each label in the process.
2171 */
2172 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2173 error = vdev_open(rvd);
2174 spa_config_exit(spa, SCL_ALL, FTAG);
2175 if (error != 0)
2176 return (error);
2177
2178 /*
2179 * We need to validate the vdev labels against the configuration that
2180 * we have in hand, which is dependent on the setting of mosconfig. If
2181 * mosconfig is true then we're validating the vdev labels based on
2182 * that config. Otherwise, we're validating against the cached config
2183 * (zpool.cache) that was read when we loaded the zfs module, and then
2184 * later we will recursively call spa_load() and validate against
2185 * the vdev config.
2186 *
2187 * If we're assembling a new pool that's been split off from an
2188 * existing pool, the labels haven't yet been updated so we skip
2189 * validation for now.
2190 */
2191 if (type != SPA_IMPORT_ASSEMBLE) {
2192 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
c7f2d69d 2193 error = vdev_validate(rvd, mosconfig);
428870ff
BB
2194 spa_config_exit(spa, SCL_ALL, FTAG);
2195
2196 if (error != 0)
2197 return (error);
2198
2199 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2e528b49 2200 return (SET_ERROR(ENXIO));
428870ff
BB
2201 }
2202
2203 /*
2204 * Find the best uberblock.
2205 */
9ae529ec 2206 vdev_uberblock_load(rvd, ub, &label);
428870ff
BB
2207
2208 /*
2209 * If we weren't able to find a single valid uberblock, return failure.
2210 */
9ae529ec
CS
2211 if (ub->ub_txg == 0) {
2212 nvlist_free(label);
428870ff 2213 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
9ae529ec 2214 }
428870ff
BB
2215
2216 /*
9ae529ec 2217 * If the pool has an unsupported version we can't open it.
428870ff 2218 */
9ae529ec
CS
2219 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2220 nvlist_free(label);
428870ff 2221 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
9ae529ec
CS
2222 }
2223
2224 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2225 nvlist_t *features;
2226
2227 /*
2228 * If we weren't able to find what's necessary for reading the
2229 * MOS in the label, return failure.
2230 */
2231 if (label == NULL || nvlist_lookup_nvlist(label,
2232 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2233 nvlist_free(label);
2234 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2235 ENXIO));
2236 }
2237
2238 /*
2239 * Update our in-core representation with the definitive values
2240 * from the label.
2241 */
2242 nvlist_free(spa->spa_label_features);
2243 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2244 }
2245
2246 nvlist_free(label);
2247
2248 /*
2249 * Look through entries in the label nvlist's features_for_read. If
2250 * there is a feature listed there which we don't understand then we
2251 * cannot open a pool.
2252 */
2253 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2254 nvlist_t *unsup_feat;
2255 nvpair_t *nvp;
2256
2257 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2258 0);
2259
2260 for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL);
2261 nvp != NULL;
2262 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2263 if (!zfeature_is_supported(nvpair_name(nvp))) {
2264 VERIFY(nvlist_add_string(unsup_feat,
2265 nvpair_name(nvp), "") == 0);
2266 }
2267 }
2268
2269 if (!nvlist_empty(unsup_feat)) {
2270 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2271 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2272 nvlist_free(unsup_feat);
2273 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2274 ENOTSUP));
2275 }
2276
2277 nvlist_free(unsup_feat);
2278 }
428870ff
BB
2279
2280 /*
2281 * If the vdev guid sum doesn't match the uberblock, we have an
572e2857
BB
2282 * incomplete configuration. We first check to see if the pool
2283 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2284 * If it is, defer the vdev_guid_sum check till later so we
2285 * can handle missing vdevs.
428870ff 2286 */
572e2857
BB
2287 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2288 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
428870ff
BB
2289 rvd->vdev_guid_sum != ub->ub_guid_sum)
2290 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2291
2292 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2293 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2294 spa_try_repair(spa, config);
2295 spa_config_exit(spa, SCL_ALL, FTAG);
2296 nvlist_free(spa->spa_config_splitting);
2297 spa->spa_config_splitting = NULL;
2298 }
2299
2300 /*
2301 * Initialize internal SPA structures.
2302 */
2303 spa->spa_state = POOL_STATE_ACTIVE;
2304 spa->spa_ubsync = spa->spa_uberblock;
2305 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2306 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2307 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2308 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2309 spa->spa_claim_max_txg = spa->spa_first_txg;
2310 spa->spa_prev_software_version = ub->ub_software_version;
2311
9ae529ec 2312 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
428870ff
BB
2313 if (error)
2314 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2315 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2316
2317 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2318 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2319
9ae529ec
CS
2320 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2321 boolean_t missing_feat_read = B_FALSE;
b9b24bb4 2322 nvlist_t *unsup_feat, *enabled_feat;
9ae529ec
CS
2323
2324 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2325 &spa->spa_feat_for_read_obj) != 0) {
2326 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2327 }
2328
2329 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2330 &spa->spa_feat_for_write_obj) != 0) {
2331 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2332 }
2333
2334 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2335 &spa->spa_feat_desc_obj) != 0) {
2336 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2337 }
2338
b9b24bb4
CS
2339 enabled_feat = fnvlist_alloc();
2340 unsup_feat = fnvlist_alloc();
9ae529ec
CS
2341
2342 if (!feature_is_supported(spa->spa_meta_objset,
2343 spa->spa_feat_for_read_obj, spa->spa_feat_desc_obj,
b9b24bb4 2344 unsup_feat, enabled_feat))
9ae529ec
CS
2345 missing_feat_read = B_TRUE;
2346
2347 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2348 if (!feature_is_supported(spa->spa_meta_objset,
2349 spa->spa_feat_for_write_obj, spa->spa_feat_desc_obj,
b9b24bb4 2350 unsup_feat, enabled_feat)) {
9ae529ec 2351 missing_feat_write = B_TRUE;
b9b24bb4 2352 }
9ae529ec
CS
2353 }
2354
b9b24bb4
CS
2355 fnvlist_add_nvlist(spa->spa_load_info,
2356 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2357
9ae529ec 2358 if (!nvlist_empty(unsup_feat)) {
b9b24bb4
CS
2359 fnvlist_add_nvlist(spa->spa_load_info,
2360 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
9ae529ec
CS
2361 }
2362
b9b24bb4
CS
2363 fnvlist_free(enabled_feat);
2364 fnvlist_free(unsup_feat);
9ae529ec
CS
2365
2366 if (!missing_feat_read) {
2367 fnvlist_add_boolean(spa->spa_load_info,
2368 ZPOOL_CONFIG_CAN_RDONLY);
2369 }
2370
2371 /*
2372 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2373 * twofold: to determine whether the pool is available for
2374 * import in read-write mode and (if it is not) whether the
2375 * pool is available for import in read-only mode. If the pool
2376 * is available for import in read-write mode, it is displayed
2377 * as available in userland; if it is not available for import
2378 * in read-only mode, it is displayed as unavailable in
2379 * userland. If the pool is available for import in read-only
2380 * mode but not read-write mode, it is displayed as unavailable
2381 * in userland with a special note that the pool is actually
2382 * available for open in read-only mode.
2383 *
2384 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2385 * missing a feature for write, we must first determine whether
2386 * the pool can be opened read-only before returning to
2387 * userland in order to know whether to display the
2388 * abovementioned note.
2389 */
2390 if (missing_feat_read || (missing_feat_write &&
2391 spa_writeable(spa))) {
2392 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2393 ENOTSUP));
2394 }
2395 }
2396
2397 spa->spa_is_initializing = B_TRUE;
2398 error = dsl_pool_open(spa->spa_dsl_pool);
2399 spa->spa_is_initializing = B_FALSE;
2400 if (error != 0)
2401 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2402
428870ff
BB
2403 if (!mosconfig) {
2404 uint64_t hostid;
2405 nvlist_t *policy = NULL, *nvconfig;
2406
2407 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2408 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2409
2410 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
b128c09f 2411 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
34dc7c2f
BB
2412 char *hostname;
2413 unsigned long myhostid = 0;
2414
428870ff 2415 VERIFY(nvlist_lookup_string(nvconfig,
34dc7c2f
BB
2416 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2417
d164b209
BB
2418#ifdef _KERNEL
2419 myhostid = zone_get_hostid(NULL);
2420#else /* _KERNEL */
2421 /*
2422 * We're emulating the system's hostid in userland, so
2423 * we can't use zone_get_hostid().
2424 */
34dc7c2f 2425 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
d164b209 2426#endif /* _KERNEL */
34dc7c2f 2427 if (hostid != 0 && myhostid != 0 &&
d164b209 2428 hostid != myhostid) {
428870ff 2429 nvlist_free(nvconfig);
34dc7c2f
BB
2430 cmn_err(CE_WARN, "pool '%s' could not be "
2431 "loaded as it was last accessed by "
b128c09f 2432 "another system (host: %s hostid: 0x%lx). "
3cee2262 2433 "See: http://zfsonlinux.org/msg/ZFS-8000-EY",
b128c09f 2434 spa_name(spa), hostname,
34dc7c2f 2435 (unsigned long)hostid);
2e528b49 2436 return (SET_ERROR(EBADF));
34dc7c2f
BB
2437 }
2438 }
428870ff
BB
2439 if (nvlist_lookup_nvlist(spa->spa_config,
2440 ZPOOL_REWIND_POLICY, &policy) == 0)
2441 VERIFY(nvlist_add_nvlist(nvconfig,
2442 ZPOOL_REWIND_POLICY, policy) == 0);
34dc7c2f 2443
428870ff 2444 spa_config_set(spa, nvconfig);
34dc7c2f
BB
2445 spa_unload(spa);
2446 spa_deactivate(spa);
fb5f0bc8 2447 spa_activate(spa, orig_mode);
34dc7c2f 2448
428870ff 2449 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
34dc7c2f
BB
2450 }
2451
428870ff
BB
2452 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2453 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2454 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2455 if (error != 0)
2456 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2457
2458 /*
2459 * Load the bit that tells us to use the new accounting function
2460 * (raid-z deflation). If we have an older pool, this will not
2461 * be present.
2462 */
428870ff
BB
2463 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2464 if (error != 0 && error != ENOENT)
2465 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2466
2467 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2468 &spa->spa_creation_version);
2469 if (error != 0 && error != ENOENT)
2470 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2471
2472 /*
2473 * Load the persistent error log. If we have an older pool, this will
2474 * not be present.
2475 */
428870ff
BB
2476 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2477 if (error != 0 && error != ENOENT)
2478 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2479
428870ff
BB
2480 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2481 &spa->spa_errlog_scrub);
2482 if (error != 0 && error != ENOENT)
2483 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2484
2485 /*
2486 * Load the history object. If we have an older pool, this
2487 * will not be present.
2488 */
428870ff
BB
2489 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2490 if (error != 0 && error != ENOENT)
2491 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2492
2493 /*
2494 * If we're assembling the pool from the split-off vdevs of
2495 * an existing pool, we don't want to attach the spares & cache
2496 * devices.
2497 */
34dc7c2f
BB
2498
2499 /*
2500 * Load any hot spares for this pool.
2501 */
428870ff
BB
2502 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2503 if (error != 0 && error != ENOENT)
2504 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2505 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2506 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2507 if (load_nvlist(spa, spa->spa_spares.sav_object,
428870ff
BB
2508 &spa->spa_spares.sav_config) != 0)
2509 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2510
b128c09f 2511 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2512 spa_load_spares(spa);
b128c09f 2513 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2514 } else if (error == 0) {
2515 spa->spa_spares.sav_sync = B_TRUE;
34dc7c2f
BB
2516 }
2517
2518 /*
2519 * Load any level 2 ARC devices for this pool.
2520 */
428870ff 2521 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
34dc7c2f 2522 &spa->spa_l2cache.sav_object);
428870ff
BB
2523 if (error != 0 && error != ENOENT)
2524 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2525 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2526 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2527 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
428870ff
BB
2528 &spa->spa_l2cache.sav_config) != 0)
2529 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2530
b128c09f 2531 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2532 spa_load_l2cache(spa);
b128c09f 2533 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2534 } else if (error == 0) {
2535 spa->spa_l2cache.sav_sync = B_TRUE;
b128c09f
BB
2536 }
2537
34dc7c2f
BB
2538 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2539
428870ff
BB
2540 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2541 if (error && error != ENOENT)
2542 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2543
2544 if (error == 0) {
428870ff
BB
2545 uint64_t autoreplace;
2546
2547 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2548 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2549 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2550 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2551 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2552 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2553 &spa->spa_dedup_ditto);
2554
2555 spa->spa_autoreplace = (autoreplace != 0);
34dc7c2f
BB
2556 }
2557
2558 /*
2559 * If the 'autoreplace' property is set, then post a resource notifying
2560 * the ZFS DE that it should not issue any faults for unopenable
2561 * devices. We also iterate over the vdevs, and post a sysevent for any
2562 * unopenable vdevs so that the normal autoreplace handler can take
2563 * over.
2564 */
428870ff 2565 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
34dc7c2f 2566 spa_check_removed(spa->spa_root_vdev);
428870ff
BB
2567 /*
2568 * For the import case, this is done in spa_import(), because
2569 * at this point we're using the spare definitions from
2570 * the MOS config, not necessarily from the userland config.
2571 */
2572 if (state != SPA_LOAD_IMPORT) {
2573 spa_aux_check_removed(&spa->spa_spares);
2574 spa_aux_check_removed(&spa->spa_l2cache);
2575 }
2576 }
34dc7c2f
BB
2577
2578 /*
2579 * Load the vdev state for all toplevel vdevs.
2580 */
2581 vdev_load(rvd);
2582
2583 /*
2584 * Propagate the leaf DTLs we just loaded all the way up the tree.
2585 */
b128c09f 2586 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2587 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
b128c09f 2588 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 2589
428870ff
BB
2590 /*
2591 * Load the DDTs (dedup tables).
2592 */
2593 error = ddt_load(spa);
2594 if (error != 0)
2595 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2596
2597 spa_update_dspace(spa);
2598
428870ff 2599 /*
572e2857
BB
2600 * Validate the config, using the MOS config to fill in any
2601 * information which might be missing. If we fail to validate
2602 * the config then declare the pool unfit for use. If we're
2603 * assembling a pool from a split, the log is not transferred
2604 * over.
428870ff
BB
2605 */
2606 if (type != SPA_IMPORT_ASSEMBLE) {
2607 nvlist_t *nvconfig;
2608
2609 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2610 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2611
572e2857
BB
2612 if (!spa_config_valid(spa, nvconfig)) {
2613 nvlist_free(nvconfig);
2614 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2615 ENXIO));
2616 }
428870ff
BB
2617 nvlist_free(nvconfig);
2618
572e2857 2619 /*
9ae529ec 2620 * Now that we've validated the config, check the state of the
572e2857
BB
2621 * root vdev. If it can't be opened, it indicates one or
2622 * more toplevel vdevs are faulted.
2623 */
2624 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2e528b49 2625 return (SET_ERROR(ENXIO));
572e2857 2626
428870ff
BB
2627 if (spa_check_logs(spa)) {
2628 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2629 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2630 }
2631 }
2632
9ae529ec
CS
2633 if (missing_feat_write) {
2634 ASSERT(state == SPA_LOAD_TRYIMPORT);
2635
2636 /*
2637 * At this point, we know that we can open the pool in
2638 * read-only mode but not read-write mode. We now have enough
2639 * information and can return to userland.
2640 */
2641 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2642 }
2643
572e2857
BB
2644 /*
2645 * We've successfully opened the pool, verify that we're ready
2646 * to start pushing transactions.
2647 */
2648 if (state != SPA_LOAD_TRYIMPORT) {
c65aa5b2 2649 if ((error = spa_load_verify(spa)))
572e2857
BB
2650 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2651 error));
2652 }
2653
428870ff
BB
2654 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2655 spa->spa_load_max_txg == UINT64_MAX)) {
34dc7c2f
BB
2656 dmu_tx_t *tx;
2657 int need_update = B_FALSE;
d6320ddb 2658 int c;
fb5f0bc8
BB
2659
2660 ASSERT(state != SPA_LOAD_TRYIMPORT);
34dc7c2f
BB
2661
2662 /*
2663 * Claim log blocks that haven't been committed yet.
2664 * This must all happen in a single txg.
428870ff
BB
2665 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2666 * invoked from zil_claim_log_block()'s i/o done callback.
2667 * Price of rollback is that we abandon the log.
34dc7c2f 2668 */
428870ff
BB
2669 spa->spa_claiming = B_TRUE;
2670
34dc7c2f
BB
2671 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2672 spa_first_txg(spa));
b128c09f 2673 (void) dmu_objset_find(spa_name(spa),
34dc7c2f
BB
2674 zil_claim, tx, DS_FIND_CHILDREN);
2675 dmu_tx_commit(tx);
2676
428870ff
BB
2677 spa->spa_claiming = B_FALSE;
2678
2679 spa_set_log_state(spa, SPA_LOG_GOOD);
34dc7c2f
BB
2680 spa->spa_sync_on = B_TRUE;
2681 txg_sync_start(spa->spa_dsl_pool);
2682
2683 /*
428870ff
BB
2684 * Wait for all claims to sync. We sync up to the highest
2685 * claimed log block birth time so that claimed log blocks
2686 * don't appear to be from the future. spa_claim_max_txg
2687 * will have been set for us by either zil_check_log_chain()
2688 * (invoked from spa_check_logs()) or zil_claim() above.
34dc7c2f 2689 */
428870ff 2690 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
34dc7c2f
BB
2691
2692 /*
2693 * If the config cache is stale, or we have uninitialized
2694 * metaslabs (see spa_vdev_add()), then update the config.
45d1cae3 2695 *
572e2857 2696 * If this is a verbatim import, trust the current
45d1cae3 2697 * in-core spa_config and update the disk labels.
34dc7c2f
BB
2698 */
2699 if (config_cache_txg != spa->spa_config_txg ||
572e2857
BB
2700 state == SPA_LOAD_IMPORT ||
2701 state == SPA_LOAD_RECOVER ||
2702 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
34dc7c2f
BB
2703 need_update = B_TRUE;
2704
d6320ddb 2705 for (c = 0; c < rvd->vdev_children; c++)
34dc7c2f
BB
2706 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2707 need_update = B_TRUE;
2708
2709 /*
2710 * Update the config cache asychronously in case we're the
2711 * root pool, in which case the config cache isn't writable yet.
2712 */
2713 if (need_update)
2714 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
fb5f0bc8
BB
2715
2716 /*
2717 * Check all DTLs to see if anything needs resilvering.
2718 */
428870ff
BB
2719 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2720 vdev_resilver_needed(rvd, NULL, NULL))
fb5f0bc8 2721 spa_async_request(spa, SPA_ASYNC_RESILVER);
428870ff 2722
6f1ffb06
MA
2723 /*
2724 * Log the fact that we booted up (so that we can detect if
2725 * we rebooted in the middle of an operation).
2726 */
2727 spa_history_log_version(spa, "open");
2728
428870ff
BB
2729 /*
2730 * Delete any inconsistent datasets.
2731 */
2732 (void) dmu_objset_find(spa_name(spa),
2733 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2734
2735 /*
2736 * Clean up any stale temporary dataset userrefs.
2737 */
2738 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
34dc7c2f
BB
2739 }
2740
428870ff
BB
2741 return (0);
2742}
34dc7c2f 2743
428870ff
BB
2744static int
2745spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2746{
572e2857
BB
2747 int mode = spa->spa_mode;
2748
428870ff
BB
2749 spa_unload(spa);
2750 spa_deactivate(spa);
2751
2752 spa->spa_load_max_txg--;
2753
572e2857 2754 spa_activate(spa, mode);
428870ff
BB
2755 spa_async_suspend(spa);
2756
2757 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2758}
2759
9ae529ec
CS
2760/*
2761 * If spa_load() fails this function will try loading prior txg's. If
2762 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2763 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2764 * function will not rewind the pool and will return the same error as
2765 * spa_load().
2766 */
428870ff
BB
2767static int
2768spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2769 uint64_t max_request, int rewind_flags)
2770{
9ae529ec 2771 nvlist_t *loadinfo = NULL;
428870ff
BB
2772 nvlist_t *config = NULL;
2773 int load_error, rewind_error;
2774 uint64_t safe_rewind_txg;
2775 uint64_t min_txg;
2776
2777 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2778 spa->spa_load_max_txg = spa->spa_load_txg;
2779 spa_set_log_state(spa, SPA_LOG_CLEAR);
2780 } else {
2781 spa->spa_load_max_txg = max_request;
2782 }
2783
2784 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2785 mosconfig);
2786 if (load_error == 0)
2787 return (0);
2788
2789 if (spa->spa_root_vdev != NULL)
2790 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2791
2792 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2793 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2794
2795 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2796 nvlist_free(config);
2797 return (load_error);
2798 }
2799
9ae529ec
CS
2800 if (state == SPA_LOAD_RECOVER) {
2801 /* Price of rolling back is discarding txgs, including log */
428870ff 2802 spa_set_log_state(spa, SPA_LOG_CLEAR);
9ae529ec
CS
2803 } else {
2804 /*
2805 * If we aren't rolling back save the load info from our first
2806 * import attempt so that we can restore it after attempting
2807 * to rewind.
2808 */
2809 loadinfo = spa->spa_load_info;
2810 spa->spa_load_info = fnvlist_alloc();
2811 }
428870ff
BB
2812
2813 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2814 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2815 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2816 TXG_INITIAL : safe_rewind_txg;
2817
2818 /*
2819 * Continue as long as we're finding errors, we're still within
2820 * the acceptable rewind range, and we're still finding uberblocks
2821 */
2822 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2823 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2824 if (spa->spa_load_max_txg < safe_rewind_txg)
2825 spa->spa_extreme_rewind = B_TRUE;
2826 rewind_error = spa_load_retry(spa, state, mosconfig);
2827 }
2828
428870ff
BB
2829 spa->spa_extreme_rewind = B_FALSE;
2830 spa->spa_load_max_txg = UINT64_MAX;
2831
2832 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2833 spa_config_set(spa, config);
2834
9ae529ec
CS
2835 if (state == SPA_LOAD_RECOVER) {
2836 ASSERT3P(loadinfo, ==, NULL);
2837 return (rewind_error);
2838 } else {
2839 /* Store the rewind info as part of the initial load info */
2840 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2841 spa->spa_load_info);
2842
2843 /* Restore the initial load info */
2844 fnvlist_free(spa->spa_load_info);
2845 spa->spa_load_info = loadinfo;
2846
2847 return (load_error);
2848 }
34dc7c2f
BB
2849}
2850
2851/*
2852 * Pool Open/Import
2853 *
2854 * The import case is identical to an open except that the configuration is sent
2855 * down from userland, instead of grabbed from the configuration cache. For the
2856 * case of an open, the pool configuration will exist in the
2857 * POOL_STATE_UNINITIALIZED state.
2858 *
2859 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2860 * the same time open the pool, without having to keep around the spa_t in some
2861 * ambiguous state.
2862 */
2863static int
428870ff
BB
2864spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2865 nvlist_t **config)
34dc7c2f
BB
2866{
2867 spa_t *spa;
572e2857 2868 spa_load_state_t state = SPA_LOAD_OPEN;
34dc7c2f 2869 int error;
34dc7c2f 2870 int locked = B_FALSE;
526af785 2871 int firstopen = B_FALSE;
34dc7c2f
BB
2872
2873 *spapp = NULL;
2874
2875 /*
2876 * As disgusting as this is, we need to support recursive calls to this
2877 * function because dsl_dir_open() is called during spa_load(), and ends
2878 * up calling spa_open() again. The real fix is to figure out how to
2879 * avoid dsl_dir_open() calling this in the first place.
2880 */
2881 if (mutex_owner(&spa_namespace_lock) != curthread) {
2882 mutex_enter(&spa_namespace_lock);
2883 locked = B_TRUE;
2884 }
2885
2886 if ((spa = spa_lookup(pool)) == NULL) {
2887 if (locked)
2888 mutex_exit(&spa_namespace_lock);
2e528b49 2889 return (SET_ERROR(ENOENT));
34dc7c2f 2890 }
428870ff 2891
34dc7c2f 2892 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
428870ff
BB
2893 zpool_rewind_policy_t policy;
2894
526af785
PJD
2895 firstopen = B_TRUE;
2896
428870ff
BB
2897 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2898 &policy);
2899 if (policy.zrp_request & ZPOOL_DO_REWIND)
2900 state = SPA_LOAD_RECOVER;
34dc7c2f 2901
fb5f0bc8 2902 spa_activate(spa, spa_mode_global);
34dc7c2f 2903
428870ff
BB
2904 if (state != SPA_LOAD_RECOVER)
2905 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2906
2907 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2908 policy.zrp_request);
34dc7c2f
BB
2909
2910 if (error == EBADF) {
2911 /*
2912 * If vdev_validate() returns failure (indicated by
2913 * EBADF), it indicates that one of the vdevs indicates
2914 * that the pool has been exported or destroyed. If
2915 * this is the case, the config cache is out of sync and
2916 * we should remove the pool from the namespace.
2917 */
34dc7c2f
BB
2918 spa_unload(spa);
2919 spa_deactivate(spa);
b128c09f 2920 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 2921 spa_remove(spa);
34dc7c2f
BB
2922 if (locked)
2923 mutex_exit(&spa_namespace_lock);
2e528b49 2924 return (SET_ERROR(ENOENT));
34dc7c2f
BB
2925 }
2926
2927 if (error) {
2928 /*
2929 * We can't open the pool, but we still have useful
2930 * information: the state of each vdev after the
2931 * attempted vdev_open(). Return this to the user.
2932 */
572e2857 2933 if (config != NULL && spa->spa_config) {
428870ff 2934 VERIFY(nvlist_dup(spa->spa_config, config,
b8d06fca 2935 KM_PUSHPAGE) == 0);
572e2857
BB
2936 VERIFY(nvlist_add_nvlist(*config,
2937 ZPOOL_CONFIG_LOAD_INFO,
2938 spa->spa_load_info) == 0);
2939 }
34dc7c2f
BB
2940 spa_unload(spa);
2941 spa_deactivate(spa);
428870ff 2942 spa->spa_last_open_failed = error;
34dc7c2f
BB
2943 if (locked)
2944 mutex_exit(&spa_namespace_lock);
2945 *spapp = NULL;
2946 return (error);
34dc7c2f 2947 }
34dc7c2f
BB
2948 }
2949
2950 spa_open_ref(spa, tag);
2951
b128c09f 2952 if (config != NULL)
34dc7c2f 2953 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f 2954
572e2857
BB
2955 /*
2956 * If we've recovered the pool, pass back any information we
2957 * gathered while doing the load.
2958 */
2959 if (state == SPA_LOAD_RECOVER) {
2960 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
2961 spa->spa_load_info) == 0);
2962 }
2963
428870ff
BB
2964 if (locked) {
2965 spa->spa_last_open_failed = 0;
2966 spa->spa_last_ubsync_txg = 0;
2967 spa->spa_load_txg = 0;
2968 mutex_exit(&spa_namespace_lock);
2969 }
2970
526af785
PJD
2971#ifdef _KERNEL
2972 if (firstopen)
2973 zvol_create_minors(spa->spa_name);
2974#endif
2975
428870ff
BB
2976 *spapp = spa;
2977
34dc7c2f
BB
2978 return (0);
2979}
2980
428870ff
BB
2981int
2982spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
2983 nvlist_t **config)
2984{
2985 return (spa_open_common(name, spapp, tag, policy, config));
2986}
2987
34dc7c2f
BB
2988int
2989spa_open(const char *name, spa_t **spapp, void *tag)
2990{
428870ff 2991 return (spa_open_common(name, spapp, tag, NULL, NULL));
34dc7c2f
BB
2992}
2993
2994/*
2995 * Lookup the given spa_t, incrementing the inject count in the process,
2996 * preventing it from being exported or destroyed.
2997 */
2998spa_t *
2999spa_inject_addref(char *name)
3000{
3001 spa_t *spa;
3002
3003 mutex_enter(&spa_namespace_lock);
3004 if ((spa = spa_lookup(name)) == NULL) {
3005 mutex_exit(&spa_namespace_lock);
3006 return (NULL);
3007 }
3008 spa->spa_inject_ref++;
3009 mutex_exit(&spa_namespace_lock);
3010
3011 return (spa);
3012}
3013
3014void
3015spa_inject_delref(spa_t *spa)
3016{
3017 mutex_enter(&spa_namespace_lock);
3018 spa->spa_inject_ref--;
3019 mutex_exit(&spa_namespace_lock);
3020}
3021
3022/*
3023 * Add spares device information to the nvlist.
3024 */
3025static void
3026spa_add_spares(spa_t *spa, nvlist_t *config)
3027{
3028 nvlist_t **spares;
3029 uint_t i, nspares;
3030 nvlist_t *nvroot;
3031 uint64_t guid;
3032 vdev_stat_t *vs;
3033 uint_t vsc;
3034 uint64_t pool;
3035
9babb374
BB
3036 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3037
34dc7c2f
BB
3038 if (spa->spa_spares.sav_count == 0)
3039 return;
3040
3041 VERIFY(nvlist_lookup_nvlist(config,
3042 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3043 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3044 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3045 if (nspares != 0) {
3046 VERIFY(nvlist_add_nvlist_array(nvroot,
3047 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3048 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3049 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3050
3051 /*
3052 * Go through and find any spares which have since been
3053 * repurposed as an active spare. If this is the case, update
3054 * their status appropriately.
3055 */
3056 for (i = 0; i < nspares; i++) {
3057 VERIFY(nvlist_lookup_uint64(spares[i],
3058 ZPOOL_CONFIG_GUID, &guid) == 0);
b128c09f
BB
3059 if (spa_spare_exists(guid, &pool, NULL) &&
3060 pool != 0ULL) {
34dc7c2f 3061 VERIFY(nvlist_lookup_uint64_array(
428870ff 3062 spares[i], ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3063 (uint64_t **)&vs, &vsc) == 0);
3064 vs->vs_state = VDEV_STATE_CANT_OPEN;
3065 vs->vs_aux = VDEV_AUX_SPARED;
3066 }
3067 }
3068 }
3069}
3070
3071/*
3072 * Add l2cache device information to the nvlist, including vdev stats.
3073 */
3074static void
3075spa_add_l2cache(spa_t *spa, nvlist_t *config)
3076{
3077 nvlist_t **l2cache;
3078 uint_t i, j, nl2cache;
3079 nvlist_t *nvroot;
3080 uint64_t guid;
3081 vdev_t *vd;
3082 vdev_stat_t *vs;
3083 uint_t vsc;
3084
9babb374
BB
3085 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3086
34dc7c2f
BB
3087 if (spa->spa_l2cache.sav_count == 0)
3088 return;
3089
34dc7c2f
BB
3090 VERIFY(nvlist_lookup_nvlist(config,
3091 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3092 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3093 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3094 if (nl2cache != 0) {
3095 VERIFY(nvlist_add_nvlist_array(nvroot,
3096 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3097 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3098 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3099
3100 /*
3101 * Update level 2 cache device stats.
3102 */
3103
3104 for (i = 0; i < nl2cache; i++) {
3105 VERIFY(nvlist_lookup_uint64(l2cache[i],
3106 ZPOOL_CONFIG_GUID, &guid) == 0);
3107
3108 vd = NULL;
3109 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3110 if (guid ==
3111 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3112 vd = spa->spa_l2cache.sav_vdevs[j];
3113 break;
3114 }
3115 }
3116 ASSERT(vd != NULL);
3117
3118 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
428870ff
BB
3119 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3120 == 0);
34dc7c2f
BB
3121 vdev_get_stats(vd, vs);
3122 }
3123 }
34dc7c2f
BB
3124}
3125
9ae529ec
CS
3126static void
3127spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3128{
3129 nvlist_t *features;
3130 zap_cursor_t zc;
3131 zap_attribute_t za;
3132
3133 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3134 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3135
3136 if (spa->spa_feat_for_read_obj != 0) {
3137 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3138 spa->spa_feat_for_read_obj);
3139 zap_cursor_retrieve(&zc, &za) == 0;
3140 zap_cursor_advance(&zc)) {
3141 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3142 za.za_num_integers == 1);
3143 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3144 za.za_first_integer));
3145 }
3146 zap_cursor_fini(&zc);
3147 }
3148
3149 if (spa->spa_feat_for_write_obj != 0) {
3150 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3151 spa->spa_feat_for_write_obj);
3152 zap_cursor_retrieve(&zc, &za) == 0;
3153 zap_cursor_advance(&zc)) {
3154 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3155 za.za_num_integers == 1);
3156 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3157 za.za_first_integer));
3158 }
3159 zap_cursor_fini(&zc);
3160 }
3161
3162 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3163 features) == 0);
3164 nvlist_free(features);
3165}
3166
34dc7c2f 3167int
9ae529ec
CS
3168spa_get_stats(const char *name, nvlist_t **config,
3169 char *altroot, size_t buflen)
34dc7c2f
BB
3170{
3171 int error;
3172 spa_t *spa;
3173
3174 *config = NULL;
428870ff 3175 error = spa_open_common(name, &spa, FTAG, NULL, config);
34dc7c2f 3176
9babb374
BB
3177 if (spa != NULL) {
3178 /*
3179 * This still leaves a window of inconsistency where the spares
3180 * or l2cache devices could change and the config would be
3181 * self-inconsistent.
3182 */
3183 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f 3184
9babb374 3185 if (*config != NULL) {
572e2857
BB
3186 uint64_t loadtimes[2];
3187
3188 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3189 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3190 VERIFY(nvlist_add_uint64_array(*config,
3191 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3192
b128c09f 3193 VERIFY(nvlist_add_uint64(*config,
9babb374
BB
3194 ZPOOL_CONFIG_ERRCOUNT,
3195 spa_get_errlog_size(spa)) == 0);
3196
3197 if (spa_suspended(spa))
3198 VERIFY(nvlist_add_uint64(*config,
3199 ZPOOL_CONFIG_SUSPENDED,
3200 spa->spa_failmode) == 0);
b128c09f 3201
9babb374
BB
3202 spa_add_spares(spa, *config);
3203 spa_add_l2cache(spa, *config);
9ae529ec 3204 spa_add_feature_stats(spa, *config);
9babb374 3205 }
34dc7c2f
BB
3206 }
3207
3208 /*
3209 * We want to get the alternate root even for faulted pools, so we cheat
3210 * and call spa_lookup() directly.
3211 */
3212 if (altroot) {
3213 if (spa == NULL) {
3214 mutex_enter(&spa_namespace_lock);
3215 spa = spa_lookup(name);
3216 if (spa)
3217 spa_altroot(spa, altroot, buflen);
3218 else
3219 altroot[0] = '\0';
3220 spa = NULL;
3221 mutex_exit(&spa_namespace_lock);
3222 } else {
3223 spa_altroot(spa, altroot, buflen);
3224 }
3225 }
3226
9babb374
BB
3227 if (spa != NULL) {
3228 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 3229 spa_close(spa, FTAG);
9babb374 3230 }
34dc7c2f
BB
3231
3232 return (error);
3233}
3234
3235/*
3236 * Validate that the auxiliary device array is well formed. We must have an
3237 * array of nvlists, each which describes a valid leaf vdev. If this is an
3238 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3239 * specified, as long as they are well-formed.
3240 */
3241static int
3242spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3243 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3244 vdev_labeltype_t label)
3245{
3246 nvlist_t **dev;
3247 uint_t i, ndev;
3248 vdev_t *vd;
3249 int error;
3250
b128c09f
BB
3251 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3252
34dc7c2f
BB
3253 /*
3254 * It's acceptable to have no devs specified.
3255 */
3256 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3257 return (0);
3258
3259 if (ndev == 0)
2e528b49 3260 return (SET_ERROR(EINVAL));
34dc7c2f
BB
3261
3262 /*
3263 * Make sure the pool is formatted with a version that supports this
3264 * device type.
3265 */
3266 if (spa_version(spa) < version)
2e528b49 3267 return (SET_ERROR(ENOTSUP));
34dc7c2f
BB
3268
3269 /*
3270 * Set the pending device list so we correctly handle device in-use
3271 * checking.
3272 */
3273 sav->sav_pending = dev;
3274 sav->sav_npending = ndev;
3275
3276 for (i = 0; i < ndev; i++) {
3277 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3278 mode)) != 0)
3279 goto out;
3280
3281 if (!vd->vdev_ops->vdev_op_leaf) {
3282 vdev_free(vd);
2e528b49 3283 error = SET_ERROR(EINVAL);
34dc7c2f
BB
3284 goto out;
3285 }
3286
3287 /*
b128c09f
BB
3288 * The L2ARC currently only supports disk devices in
3289 * kernel context. For user-level testing, we allow it.
34dc7c2f 3290 */
b128c09f 3291#ifdef _KERNEL
34dc7c2f
BB
3292 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3293 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
2e528b49 3294 error = SET_ERROR(ENOTBLK);
5ffb9d1d 3295 vdev_free(vd);
34dc7c2f
BB
3296 goto out;
3297 }
b128c09f 3298#endif
34dc7c2f
BB
3299 vd->vdev_top = vd;
3300
3301 if ((error = vdev_open(vd)) == 0 &&
3302 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3303 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3304 vd->vdev_guid) == 0);
3305 }
3306
3307 vdev_free(vd);
3308
3309 if (error &&
3310 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3311 goto out;
3312 else
3313 error = 0;
3314 }
3315
3316out:
3317 sav->sav_pending = NULL;
3318 sav->sav_npending = 0;
3319 return (error);
3320}
3321
3322static int
3323spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3324{
3325 int error;
3326
b128c09f
BB
3327 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3328
34dc7c2f
BB
3329 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3330 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3331 VDEV_LABEL_SPARE)) != 0) {
3332 return (error);
3333 }
3334
3335 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3336 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3337 VDEV_LABEL_L2CACHE));
3338}
3339
3340static void
3341spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3342 const char *config)
3343{
3344 int i;
3345
3346 if (sav->sav_config != NULL) {
3347 nvlist_t **olddevs;
3348 uint_t oldndevs;
3349 nvlist_t **newdevs;
3350
3351 /*
3352 * Generate new dev list by concatentating with the
3353 * current dev list.
3354 */
3355 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3356 &olddevs, &oldndevs) == 0);
3357
3358 newdevs = kmem_alloc(sizeof (void *) *
b8d06fca 3359 (ndevs + oldndevs), KM_PUSHPAGE);
34dc7c2f
BB
3360 for (i = 0; i < oldndevs; i++)
3361 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
b8d06fca 3362 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3363 for (i = 0; i < ndevs; i++)
3364 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
b8d06fca 3365 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3366
3367 VERIFY(nvlist_remove(sav->sav_config, config,
3368 DATA_TYPE_NVLIST_ARRAY) == 0);
3369
3370 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3371 config, newdevs, ndevs + oldndevs) == 0);
3372 for (i = 0; i < oldndevs + ndevs; i++)
3373 nvlist_free(newdevs[i]);
3374 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3375 } else {
3376 /*
3377 * Generate a new dev list.
3378 */
3379 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
b8d06fca 3380 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3381 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3382 devs, ndevs) == 0);
3383 }
3384}
3385
3386/*
3387 * Stop and drop level 2 ARC devices
3388 */
3389void
3390spa_l2cache_drop(spa_t *spa)
3391{
3392 vdev_t *vd;
3393 int i;
3394 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3395
3396 for (i = 0; i < sav->sav_count; i++) {
3397 uint64_t pool;
3398
3399 vd = sav->sav_vdevs[i];
3400 ASSERT(vd != NULL);
3401
fb5f0bc8
BB
3402 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3403 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 3404 l2arc_remove_vdev(vd);
34dc7c2f
BB
3405 }
3406}
3407
3408/*
3409 * Pool Creation
3410 */
3411int
3412spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
6f1ffb06 3413 nvlist_t *zplprops)
34dc7c2f
BB
3414{
3415 spa_t *spa;
3416 char *altroot = NULL;
3417 vdev_t *rvd;
3418 dsl_pool_t *dp;
3419 dmu_tx_t *tx;
9babb374 3420 int error = 0;
34dc7c2f
BB
3421 uint64_t txg = TXG_INITIAL;
3422 nvlist_t **spares, **l2cache;
3423 uint_t nspares, nl2cache;
428870ff 3424 uint64_t version, obj;
9ae529ec
CS
3425 boolean_t has_features;
3426 nvpair_t *elem;
d6320ddb 3427 int c;
34dc7c2f
BB
3428
3429 /*
3430 * If this pool already exists, return failure.
3431 */
3432 mutex_enter(&spa_namespace_lock);
3433 if (spa_lookup(pool) != NULL) {
3434 mutex_exit(&spa_namespace_lock);
2e528b49 3435 return (SET_ERROR(EEXIST));
34dc7c2f
BB
3436 }
3437
3438 /*
3439 * Allocate a new spa_t structure.
3440 */
3441 (void) nvlist_lookup_string(props,
3442 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
428870ff 3443 spa = spa_add(pool, NULL, altroot);
fb5f0bc8 3444 spa_activate(spa, spa_mode_global);
34dc7c2f 3445
34dc7c2f 3446 if (props && (error = spa_prop_validate(spa, props))) {
34dc7c2f
BB
3447 spa_deactivate(spa);
3448 spa_remove(spa);
b128c09f 3449 mutex_exit(&spa_namespace_lock);
34dc7c2f
BB
3450 return (error);
3451 }
3452
9ae529ec
CS
3453 has_features = B_FALSE;
3454 for (elem = nvlist_next_nvpair(props, NULL);
3455 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3456 if (zpool_prop_feature(nvpair_name(elem)))
3457 has_features = B_TRUE;
3458 }
3459
3460 if (has_features || nvlist_lookup_uint64(props,
3461 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
34dc7c2f 3462 version = SPA_VERSION;
9ae529ec
CS
3463 }
3464 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
428870ff
BB
3465
3466 spa->spa_first_txg = txg;
3467 spa->spa_uberblock.ub_txg = txg - 1;
34dc7c2f
BB
3468 spa->spa_uberblock.ub_version = version;
3469 spa->spa_ubsync = spa->spa_uberblock;
3470
9babb374
BB
3471 /*
3472 * Create "The Godfather" zio to hold all async IOs
3473 */
3474 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
3475 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
3476
34dc7c2f
BB
3477 /*
3478 * Create the root vdev.
3479 */
b128c09f 3480 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
3481
3482 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3483
3484 ASSERT(error != 0 || rvd != NULL);
3485 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3486
3487 if (error == 0 && !zfs_allocatable_devs(nvroot))
2e528b49 3488 error = SET_ERROR(EINVAL);
34dc7c2f
BB
3489
3490 if (error == 0 &&
3491 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3492 (error = spa_validate_aux(spa, nvroot, txg,
3493 VDEV_ALLOC_ADD)) == 0) {
d6320ddb 3494 for (c = 0; c < rvd->vdev_children; c++) {
9babb374
BB
3495 vdev_metaslab_set_size(rvd->vdev_child[c]);
3496 vdev_expand(rvd->vdev_child[c], txg);
3497 }
34dc7c2f
BB
3498 }
3499
b128c09f 3500 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3501
3502 if (error != 0) {
3503 spa_unload(spa);
3504 spa_deactivate(spa);
3505 spa_remove(spa);
3506 mutex_exit(&spa_namespace_lock);
3507 return (error);
3508 }
3509
3510 /*
3511 * Get the list of spares, if specified.
3512 */
3513 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3514 &spares, &nspares) == 0) {
3515 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
b8d06fca 3516 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3517 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3518 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 3519 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3520 spa_load_spares(spa);
b128c09f 3521 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3522 spa->spa_spares.sav_sync = B_TRUE;
3523 }
3524
3525 /*
3526 * Get the list of level 2 cache devices, if specified.
3527 */
3528 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3529 &l2cache, &nl2cache) == 0) {
3530 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
b8d06fca 3531 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
3532 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3533 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 3534 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3535 spa_load_l2cache(spa);
b128c09f 3536 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3537 spa->spa_l2cache.sav_sync = B_TRUE;
3538 }
3539
9ae529ec 3540 spa->spa_is_initializing = B_TRUE;
b128c09f 3541 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
34dc7c2f 3542 spa->spa_meta_objset = dp->dp_meta_objset;
9ae529ec 3543 spa->spa_is_initializing = B_FALSE;
34dc7c2f 3544
428870ff
BB
3545 /*
3546 * Create DDTs (dedup tables).
3547 */
3548 ddt_create(spa);
3549
3550 spa_update_dspace(spa);
3551
34dc7c2f
BB
3552 tx = dmu_tx_create_assigned(dp, txg);
3553
3554 /*
3555 * Create the pool config object.
3556 */
3557 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
b128c09f 3558 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
34dc7c2f
BB
3559 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3560
3561 if (zap_add(spa->spa_meta_objset,
3562 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3563 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3564 cmn_err(CE_PANIC, "failed to add pool config");
3565 }
3566
9ae529ec
CS
3567 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3568 spa_feature_create_zap_objects(spa, tx);
3569
428870ff
BB
3570 if (zap_add(spa->spa_meta_objset,
3571 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3572 sizeof (uint64_t), 1, &version, tx) != 0) {
3573 cmn_err(CE_PANIC, "failed to add pool version");
3574 }
3575
34dc7c2f
BB
3576 /* Newly created pools with the right version are always deflated. */
3577 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3578 spa->spa_deflate = TRUE;
3579 if (zap_add(spa->spa_meta_objset,
3580 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3581 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3582 cmn_err(CE_PANIC, "failed to add deflate");
3583 }
3584 }
3585
3586 /*
428870ff 3587 * Create the deferred-free bpobj. Turn off compression
34dc7c2f
BB
3588 * because sync-to-convergence takes longer if the blocksize
3589 * keeps changing.
3590 */
428870ff
BB
3591 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3592 dmu_object_set_compress(spa->spa_meta_objset, obj,
34dc7c2f 3593 ZIO_COMPRESS_OFF, tx);
34dc7c2f 3594 if (zap_add(spa->spa_meta_objset,
428870ff
BB
3595 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3596 sizeof (uint64_t), 1, &obj, tx) != 0) {
3597 cmn_err(CE_PANIC, "failed to add bpobj");
34dc7c2f 3598 }
428870ff
BB
3599 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3600 spa->spa_meta_objset, obj));
34dc7c2f
BB
3601
3602 /*
3603 * Create the pool's history object.
3604 */
3605 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3606 spa_history_create_obj(spa, tx);
3607
3608 /*
3609 * Set pool properties.
3610 */
3611 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3612 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3613 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
9babb374 3614 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
428870ff 3615
d164b209
BB
3616 if (props != NULL) {
3617 spa_configfile_set(spa, props, B_FALSE);
13fe0198 3618 spa_sync_props(props, tx);
d164b209 3619 }
34dc7c2f
BB
3620
3621 dmu_tx_commit(tx);
3622
3623 spa->spa_sync_on = B_TRUE;
3624 txg_sync_start(spa->spa_dsl_pool);
3625
3626 /*
3627 * We explicitly wait for the first transaction to complete so that our
3628 * bean counters are appropriately updated.
3629 */
3630 txg_wait_synced(spa->spa_dsl_pool, txg);
3631
b128c09f 3632 spa_config_sync(spa, B_FALSE, B_TRUE);
34dc7c2f 3633
6f1ffb06 3634 spa_history_log_version(spa, "create");
34dc7c2f 3635
b128c09f
BB
3636 spa->spa_minref = refcount_count(&spa->spa_refcount);
3637
d164b209
BB
3638 mutex_exit(&spa_namespace_lock);
3639
34dc7c2f
BB
3640 return (0);
3641}
3642
9babb374 3643#ifdef _KERNEL
34dc7c2f 3644/*
9babb374
BB
3645 * Get the root pool information from the root disk, then import the root pool
3646 * during the system boot up time.
34dc7c2f 3647 */
9babb374
BB
3648extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3649
3650static nvlist_t *
3651spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3652{
3653 nvlist_t *config;
3654 nvlist_t *nvtop, *nvroot;
3655 uint64_t pgid;
3656
3657 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3658 return (NULL);
3659
3660 /*
3661 * Add this top-level vdev to the child array.
3662 */
3663 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3664 &nvtop) == 0);
3665 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3666 &pgid) == 0);
3667 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3668
3669 /*
3670 * Put this pool's top-level vdevs into a root vdev.
3671 */
b8d06fca 3672 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
9babb374
BB
3673 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3674 VDEV_TYPE_ROOT) == 0);
3675 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3676 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3677 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3678 &nvtop, 1) == 0);
3679
3680 /*
3681 * Replace the existing vdev_tree with the new root vdev in
3682 * this pool's configuration (remove the old, add the new).
3683 */
3684 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3685 nvlist_free(nvroot);
3686 return (config);
3687}
3688
3689/*
3690 * Walk the vdev tree and see if we can find a device with "better"
3691 * configuration. A configuration is "better" if the label on that
3692 * device has a more recent txg.
3693 */
3694static void
3695spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3696{
d6320ddb
BB
3697 int c;
3698
3699 for (c = 0; c < vd->vdev_children; c++)
9babb374
BB
3700 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3701
3702 if (vd->vdev_ops->vdev_op_leaf) {
3703 nvlist_t *label;
3704 uint64_t label_txg;
3705
3706 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3707 &label) != 0)
3708 return;
3709
3710 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3711 &label_txg) == 0);
3712
3713 /*
3714 * Do we have a better boot device?
3715 */
3716 if (label_txg > *txg) {
3717 *txg = label_txg;
3718 *avd = vd;
3719 }
3720 nvlist_free(label);
3721 }
3722}
3723
3724/*
3725 * Import a root pool.
3726 *
3727 * For x86. devpath_list will consist of devid and/or physpath name of
3728 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3729 * The GRUB "findroot" command will return the vdev we should boot.
3730 *
3731 * For Sparc, devpath_list consists the physpath name of the booting device
3732 * no matter the rootpool is a single device pool or a mirrored pool.
3733 * e.g.
3734 * "/pci@1f,0/ide@d/disk@0,0:a"
3735 */
3736int
3737spa_import_rootpool(char *devpath, char *devid)
3738{
3739 spa_t *spa;
3740 vdev_t *rvd, *bvd, *avd = NULL;
3741 nvlist_t *config, *nvtop;
3742 uint64_t guid, txg;
3743 char *pname;
3744 int error;
3745
3746 /*
3747 * Read the label from the boot device and generate a configuration.
3748 */
428870ff
BB
3749 config = spa_generate_rootconf(devpath, devid, &guid);
3750#if defined(_OBP) && defined(_KERNEL)
3751 if (config == NULL) {
3752 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3753 /* iscsi boot */
3754 get_iscsi_bootpath_phy(devpath);
3755 config = spa_generate_rootconf(devpath, devid, &guid);
3756 }
3757 }
3758#endif
3759 if (config == NULL) {
9ae529ec 3760 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
9babb374 3761 devpath);
2e528b49 3762 return (SET_ERROR(EIO));
9babb374
BB
3763 }
3764
3765 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3766 &pname) == 0);
3767 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3768
3769 mutex_enter(&spa_namespace_lock);
3770 if ((spa = spa_lookup(pname)) != NULL) {
3771 /*
3772 * Remove the existing root pool from the namespace so that we
3773 * can replace it with the correct config we just read in.
3774 */
3775 spa_remove(spa);
3776 }
3777
428870ff 3778 spa = spa_add(pname, config, NULL);
9babb374 3779 spa->spa_is_root = B_TRUE;
572e2857 3780 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
9babb374
BB
3781
3782 /*
3783 * Build up a vdev tree based on the boot device's label config.
3784 */
3785 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3786 &nvtop) == 0);
3787 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3788 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3789 VDEV_ALLOC_ROOTPOOL);
3790 spa_config_exit(spa, SCL_ALL, FTAG);
3791 if (error) {
3792 mutex_exit(&spa_namespace_lock);
3793 nvlist_free(config);
3794 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3795 pname);
3796 return (error);
3797 }
3798
3799 /*
3800 * Get the boot vdev.
3801 */
3802 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3803 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3804 (u_longlong_t)guid);
2e528b49 3805 error = SET_ERROR(ENOENT);
9babb374
BB
3806 goto out;
3807 }
3808
3809 /*
3810 * Determine if there is a better boot device.
3811 */
3812 avd = bvd;
3813 spa_alt_rootvdev(rvd, &avd, &txg);
3814 if (avd != bvd) {
3815 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3816 "try booting from '%s'", avd->vdev_path);
2e528b49 3817 error = SET_ERROR(EINVAL);
9babb374
BB
3818 goto out;
3819 }
3820
3821 /*
3822 * If the boot device is part of a spare vdev then ensure that
3823 * we're booting off the active spare.
3824 */
3825 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3826 !bvd->vdev_isspare) {
3827 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3828 "try booting from '%s'",
572e2857
BB
3829 bvd->vdev_parent->
3830 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
2e528b49 3831 error = SET_ERROR(EINVAL);
9babb374
BB
3832 goto out;
3833 }
3834
9babb374
BB
3835 error = 0;
3836out:
3837 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3838 vdev_free(rvd);
3839 spa_config_exit(spa, SCL_ALL, FTAG);
3840 mutex_exit(&spa_namespace_lock);
3841
3842 nvlist_free(config);
3843 return (error);
3844}
3845
3846#endif
3847
9babb374
BB
3848/*
3849 * Import a non-root pool into the system.
3850 */
3851int
13fe0198 3852spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
34dc7c2f
BB
3853{
3854 spa_t *spa;
3855 char *altroot = NULL;
428870ff
BB
3856 spa_load_state_t state = SPA_LOAD_IMPORT;
3857 zpool_rewind_policy_t policy;
572e2857
BB
3858 uint64_t mode = spa_mode_global;
3859 uint64_t readonly = B_FALSE;
9babb374 3860 int error;
34dc7c2f
BB
3861 nvlist_t *nvroot;
3862 nvlist_t **spares, **l2cache;
3863 uint_t nspares, nl2cache;
34dc7c2f
BB
3864
3865 /*
3866 * If a pool with this name exists, return failure.
3867 */
3868 mutex_enter(&spa_namespace_lock);
428870ff 3869 if (spa_lookup(pool) != NULL) {
9babb374 3870 mutex_exit(&spa_namespace_lock);
2e528b49 3871 return (SET_ERROR(EEXIST));
34dc7c2f
BB
3872 }
3873
3874 /*
3875 * Create and initialize the spa structure.
3876 */
3877 (void) nvlist_lookup_string(props,
3878 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
572e2857
BB
3879 (void) nvlist_lookup_uint64(props,
3880 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3881 if (readonly)
3882 mode = FREAD;
428870ff 3883 spa = spa_add(pool, config, altroot);
572e2857
BB
3884 spa->spa_import_flags = flags;
3885
3886 /*
3887 * Verbatim import - Take a pool and insert it into the namespace
3888 * as if it had been loaded at boot.
3889 */
3890 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
3891 if (props != NULL)
3892 spa_configfile_set(spa, props, B_FALSE);
3893
3894 spa_config_sync(spa, B_FALSE, B_TRUE);
3895
3896 mutex_exit(&spa_namespace_lock);
6f1ffb06 3897 spa_history_log_version(spa, "import");
572e2857
BB
3898
3899 return (0);
3900 }
3901
3902 spa_activate(spa, mode);
34dc7c2f 3903
9babb374
BB
3904 /*
3905 * Don't start async tasks until we know everything is healthy.
3906 */
3907 spa_async_suspend(spa);
b128c09f 3908
572e2857
BB
3909 zpool_get_rewind_policy(config, &policy);
3910 if (policy.zrp_request & ZPOOL_DO_REWIND)
3911 state = SPA_LOAD_RECOVER;
3912
34dc7c2f 3913 /*
9babb374
BB
3914 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
3915 * because the user-supplied config is actually the one to trust when
b128c09f 3916 * doing an import.
34dc7c2f 3917 */
428870ff
BB
3918 if (state != SPA_LOAD_RECOVER)
3919 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
572e2857 3920
428870ff
BB
3921 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
3922 policy.zrp_request);
3923
3924 /*
572e2857
BB
3925 * Propagate anything learned while loading the pool and pass it
3926 * back to caller (i.e. rewind info, missing devices, etc).
428870ff 3927 */
572e2857
BB
3928 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3929 spa->spa_load_info) == 0);
34dc7c2f 3930
b128c09f 3931 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3932 /*
9babb374
BB
3933 * Toss any existing sparelist, as it doesn't have any validity
3934 * anymore, and conflicts with spa_has_spare().
34dc7c2f 3935 */
9babb374 3936 if (spa->spa_spares.sav_config) {
34dc7c2f
BB
3937 nvlist_free(spa->spa_spares.sav_config);
3938 spa->spa_spares.sav_config = NULL;
3939 spa_load_spares(spa);
3940 }
9babb374 3941 if (spa->spa_l2cache.sav_config) {
34dc7c2f
BB
3942 nvlist_free(spa->spa_l2cache.sav_config);
3943 spa->spa_l2cache.sav_config = NULL;
3944 spa_load_l2cache(spa);
3945 }
3946
3947 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3948 &nvroot) == 0);
3949 if (error == 0)
9babb374
BB
3950 error = spa_validate_aux(spa, nvroot, -1ULL,
3951 VDEV_ALLOC_SPARE);
34dc7c2f
BB
3952 if (error == 0)
3953 error = spa_validate_aux(spa, nvroot, -1ULL,
3954 VDEV_ALLOC_L2CACHE);
b128c09f 3955 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 3956
d164b209
BB
3957 if (props != NULL)
3958 spa_configfile_set(spa, props, B_FALSE);
3959
fb5f0bc8
BB
3960 if (error != 0 || (props && spa_writeable(spa) &&
3961 (error = spa_prop_set(spa, props)))) {
9babb374
BB
3962 spa_unload(spa);
3963 spa_deactivate(spa);
3964 spa_remove(spa);
34dc7c2f
BB
3965 mutex_exit(&spa_namespace_lock);
3966 return (error);
3967 }
3968
572e2857
BB
3969 spa_async_resume(spa);
3970
34dc7c2f
BB
3971 /*
3972 * Override any spares and level 2 cache devices as specified by
3973 * the user, as these may have correct device names/devids, etc.
3974 */
3975 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3976 &spares, &nspares) == 0) {
3977 if (spa->spa_spares.sav_config)
3978 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
3979 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
3980 else
3981 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
b8d06fca 3982 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
3983 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3984 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 3985 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3986 spa_load_spares(spa);
b128c09f 3987 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3988 spa->spa_spares.sav_sync = B_TRUE;
3989 }
3990 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3991 &l2cache, &nl2cache) == 0) {
3992 if (spa->spa_l2cache.sav_config)
3993 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
3994 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
3995 else
3996 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
b8d06fca 3997 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
3998 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3999 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 4000 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4001 spa_load_l2cache(spa);
b128c09f 4002 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4003 spa->spa_l2cache.sav_sync = B_TRUE;
4004 }
4005
428870ff
BB
4006 /*
4007 * Check for any removed devices.
4008 */
4009 if (spa->spa_autoreplace) {
4010 spa_aux_check_removed(&spa->spa_spares);
4011 spa_aux_check_removed(&spa->spa_l2cache);
4012 }
4013
fb5f0bc8 4014 if (spa_writeable(spa)) {
b128c09f
BB
4015 /*
4016 * Update the config cache to include the newly-imported pool.
4017 */
45d1cae3 4018 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
b128c09f 4019 }
34dc7c2f 4020
34dc7c2f 4021 /*
9babb374
BB
4022 * It's possible that the pool was expanded while it was exported.
4023 * We kick off an async task to handle this for us.
34dc7c2f 4024 */
9babb374 4025 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
b128c09f 4026
9babb374 4027 mutex_exit(&spa_namespace_lock);
6f1ffb06 4028 spa_history_log_version(spa, "import");
b128c09f 4029
526af785
PJD
4030#ifdef _KERNEL
4031 zvol_create_minors(pool);
4032#endif
4033
b128c09f
BB
4034 return (0);
4035}
4036
34dc7c2f
BB
4037nvlist_t *
4038spa_tryimport(nvlist_t *tryconfig)
4039{
4040 nvlist_t *config = NULL;
4041 char *poolname;
4042 spa_t *spa;
4043 uint64_t state;
d164b209 4044 int error;
34dc7c2f
BB
4045
4046 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4047 return (NULL);
4048
4049 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4050 return (NULL);
4051
4052 /*
4053 * Create and initialize the spa structure.
4054 */
4055 mutex_enter(&spa_namespace_lock);
428870ff 4056 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
fb5f0bc8 4057 spa_activate(spa, FREAD);
34dc7c2f
BB
4058
4059 /*
4060 * Pass off the heavy lifting to spa_load().
4061 * Pass TRUE for mosconfig because the user-supplied config
4062 * is actually the one to trust when doing an import.
4063 */
428870ff 4064 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
34dc7c2f
BB
4065
4066 /*
4067 * If 'tryconfig' was at least parsable, return the current config.
4068 */
4069 if (spa->spa_root_vdev != NULL) {
34dc7c2f 4070 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f
BB
4071 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4072 poolname) == 0);
4073 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4074 state) == 0);
4075 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4076 spa->spa_uberblock.ub_timestamp) == 0);
9ae529ec
CS
4077 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4078 spa->spa_load_info) == 0);
34dc7c2f
BB
4079
4080 /*
4081 * If the bootfs property exists on this pool then we
4082 * copy it out so that external consumers can tell which
4083 * pools are bootable.
4084 */
d164b209 4085 if ((!error || error == EEXIST) && spa->spa_bootfs) {
b8d06fca 4086 char *tmpname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
34dc7c2f
BB
4087
4088 /*
4089 * We have to play games with the name since the
4090 * pool was opened as TRYIMPORT_NAME.
4091 */
b128c09f 4092 if (dsl_dsobj_to_dsname(spa_name(spa),
34dc7c2f
BB
4093 spa->spa_bootfs, tmpname) == 0) {
4094 char *cp;
b8d06fca 4095 char *dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
34dc7c2f
BB
4096
4097 cp = strchr(tmpname, '/');
4098 if (cp == NULL) {
4099 (void) strlcpy(dsname, tmpname,
4100 MAXPATHLEN);
4101 } else {
4102 (void) snprintf(dsname, MAXPATHLEN,
4103 "%s/%s", poolname, ++cp);
4104 }
4105 VERIFY(nvlist_add_string(config,
4106 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4107 kmem_free(dsname, MAXPATHLEN);
4108 }
4109 kmem_free(tmpname, MAXPATHLEN);
4110 }
4111
4112 /*
4113 * Add the list of hot spares and level 2 cache devices.
4114 */
9babb374 4115 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
4116 spa_add_spares(spa, config);
4117 spa_add_l2cache(spa, config);
9babb374 4118 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f
BB
4119 }
4120
4121 spa_unload(spa);
4122 spa_deactivate(spa);
4123 spa_remove(spa);
4124 mutex_exit(&spa_namespace_lock);
4125
4126 return (config);
4127}
4128
4129/*
4130 * Pool export/destroy
4131 *
4132 * The act of destroying or exporting a pool is very simple. We make sure there
4133 * is no more pending I/O and any references to the pool are gone. Then, we
4134 * update the pool state and sync all the labels to disk, removing the
fb5f0bc8
BB
4135 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4136 * we don't sync the labels or remove the configuration cache.
34dc7c2f
BB
4137 */
4138static int
b128c09f 4139spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
fb5f0bc8 4140 boolean_t force, boolean_t hardforce)
34dc7c2f
BB
4141{
4142 spa_t *spa;
4143
4144 if (oldconfig)
4145 *oldconfig = NULL;
4146
fb5f0bc8 4147 if (!(spa_mode_global & FWRITE))
2e528b49 4148 return (SET_ERROR(EROFS));
34dc7c2f
BB
4149
4150 mutex_enter(&spa_namespace_lock);
4151 if ((spa = spa_lookup(pool)) == NULL) {
4152 mutex_exit(&spa_namespace_lock);
2e528b49 4153 return (SET_ERROR(ENOENT));
34dc7c2f
BB
4154 }
4155
4156 /*
4157 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4158 * reacquire the namespace lock, and see if we can export.
4159 */
4160 spa_open_ref(spa, FTAG);
4161 mutex_exit(&spa_namespace_lock);
4162 spa_async_suspend(spa);
4163 mutex_enter(&spa_namespace_lock);
4164 spa_close(spa, FTAG);
4165
4166 /*
4167 * The pool will be in core if it's openable,
4168 * in which case we can modify its state.
4169 */
4170 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4171 /*
4172 * Objsets may be open only because they're dirty, so we
4173 * have to force it to sync before checking spa_refcnt.
4174 */
34dc7c2f
BB
4175 txg_wait_synced(spa->spa_dsl_pool, 0);
4176
4177 /*
4178 * A pool cannot be exported or destroyed if there are active
4179 * references. If we are resetting a pool, allow references by
4180 * fault injection handlers.
4181 */
4182 if (!spa_refcount_zero(spa) ||
4183 (spa->spa_inject_ref != 0 &&
4184 new_state != POOL_STATE_UNINITIALIZED)) {
34dc7c2f
BB
4185 spa_async_resume(spa);
4186 mutex_exit(&spa_namespace_lock);
2e528b49 4187 return (SET_ERROR(EBUSY));
34dc7c2f
BB
4188 }
4189
b128c09f
BB
4190 /*
4191 * A pool cannot be exported if it has an active shared spare.
4192 * This is to prevent other pools stealing the active spare
4193 * from an exported pool. At user's own will, such pool can
4194 * be forcedly exported.
4195 */
4196 if (!force && new_state == POOL_STATE_EXPORTED &&
4197 spa_has_active_shared_spare(spa)) {
4198 spa_async_resume(spa);
4199 mutex_exit(&spa_namespace_lock);
2e528b49 4200 return (SET_ERROR(EXDEV));
b128c09f 4201 }
34dc7c2f
BB
4202
4203 /*
4204 * We want this to be reflected on every label,
4205 * so mark them all dirty. spa_unload() will do the
4206 * final sync that pushes these changes out.
4207 */
fb5f0bc8 4208 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
b128c09f 4209 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4210 spa->spa_state = new_state;
428870ff
BB
4211 spa->spa_final_txg = spa_last_synced_txg(spa) +
4212 TXG_DEFER_SIZE + 1;
34dc7c2f 4213 vdev_config_dirty(spa->spa_root_vdev);
b128c09f 4214 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4215 }
4216 }
4217
26685276 4218 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_DESTROY);
34dc7c2f
BB
4219
4220 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4221 spa_unload(spa);
4222 spa_deactivate(spa);
4223 }
4224
4225 if (oldconfig && spa->spa_config)
4226 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4227
4228 if (new_state != POOL_STATE_UNINITIALIZED) {
fb5f0bc8
BB
4229 if (!hardforce)
4230 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 4231 spa_remove(spa);
34dc7c2f
BB
4232 }
4233 mutex_exit(&spa_namespace_lock);
4234
4235 return (0);
4236}
4237
4238/*
4239 * Destroy a storage pool.
4240 */
4241int
4242spa_destroy(char *pool)
4243{
fb5f0bc8
BB
4244 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4245 B_FALSE, B_FALSE));
34dc7c2f
BB
4246}
4247
4248/*
4249 * Export a storage pool.
4250 */
4251int
fb5f0bc8
BB
4252spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4253 boolean_t hardforce)
34dc7c2f 4254{
fb5f0bc8
BB
4255 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4256 force, hardforce));
34dc7c2f
BB
4257}
4258
4259/*
4260 * Similar to spa_export(), this unloads the spa_t without actually removing it
4261 * from the namespace in any way.
4262 */
4263int
4264spa_reset(char *pool)
4265{
b128c09f 4266 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
fb5f0bc8 4267 B_FALSE, B_FALSE));
34dc7c2f
BB
4268}
4269
34dc7c2f
BB
4270/*
4271 * ==========================================================================
4272 * Device manipulation
4273 * ==========================================================================
4274 */
4275
4276/*
4277 * Add a device to a storage pool.
4278 */
4279int
4280spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4281{
428870ff 4282 uint64_t txg, id;
fb5f0bc8 4283 int error;
34dc7c2f
BB
4284 vdev_t *rvd = spa->spa_root_vdev;
4285 vdev_t *vd, *tvd;
4286 nvlist_t **spares, **l2cache;
4287 uint_t nspares, nl2cache;
d6320ddb 4288 int c;
34dc7c2f 4289
572e2857
BB
4290 ASSERT(spa_writeable(spa));
4291
34dc7c2f
BB
4292 txg = spa_vdev_enter(spa);
4293
4294 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4295 VDEV_ALLOC_ADD)) != 0)
4296 return (spa_vdev_exit(spa, NULL, txg, error));
4297
b128c09f 4298 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
34dc7c2f
BB
4299
4300 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4301 &nspares) != 0)
4302 nspares = 0;
4303
4304 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4305 &nl2cache) != 0)
4306 nl2cache = 0;
4307
b128c09f 4308 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
34dc7c2f 4309 return (spa_vdev_exit(spa, vd, txg, EINVAL));
34dc7c2f 4310
b128c09f
BB
4311 if (vd->vdev_children != 0 &&
4312 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4313 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4314
4315 /*
4316 * We must validate the spares and l2cache devices after checking the
4317 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4318 */
b128c09f 4319 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
34dc7c2f 4320 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4321
4322 /*
4323 * Transfer each new top-level vdev from vd to rvd.
4324 */
d6320ddb 4325 for (c = 0; c < vd->vdev_children; c++) {
428870ff
BB
4326
4327 /*
4328 * Set the vdev id to the first hole, if one exists.
4329 */
4330 for (id = 0; id < rvd->vdev_children; id++) {
4331 if (rvd->vdev_child[id]->vdev_ishole) {
4332 vdev_free(rvd->vdev_child[id]);
4333 break;
4334 }
4335 }
34dc7c2f
BB
4336 tvd = vd->vdev_child[c];
4337 vdev_remove_child(vd, tvd);
428870ff 4338 tvd->vdev_id = id;
34dc7c2f
BB
4339 vdev_add_child(rvd, tvd);
4340 vdev_config_dirty(tvd);
4341 }
4342
4343 if (nspares != 0) {
4344 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4345 ZPOOL_CONFIG_SPARES);
4346 spa_load_spares(spa);
4347 spa->spa_spares.sav_sync = B_TRUE;
4348 }
4349
4350 if (nl2cache != 0) {
4351 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4352 ZPOOL_CONFIG_L2CACHE);
4353 spa_load_l2cache(spa);
4354 spa->spa_l2cache.sav_sync = B_TRUE;
4355 }
4356
4357 /*
4358 * We have to be careful when adding new vdevs to an existing pool.
4359 * If other threads start allocating from these vdevs before we
4360 * sync the config cache, and we lose power, then upon reboot we may
4361 * fail to open the pool because there are DVAs that the config cache
4362 * can't translate. Therefore, we first add the vdevs without
4363 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4364 * and then let spa_config_update() initialize the new metaslabs.
4365 *
4366 * spa_load() checks for added-but-not-initialized vdevs, so that
4367 * if we lose power at any point in this sequence, the remaining
4368 * steps will be completed the next time we load the pool.
4369 */
4370 (void) spa_vdev_exit(spa, vd, txg, 0);
4371
4372 mutex_enter(&spa_namespace_lock);
4373 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4374 mutex_exit(&spa_namespace_lock);
4375
4376 return (0);
4377}
4378
4379/*
4380 * Attach a device to a mirror. The arguments are the path to any device
4381 * in the mirror, and the nvroot for the new device. If the path specifies
4382 * a device that is not mirrored, we automatically insert the mirror vdev.
4383 *
4384 * If 'replacing' is specified, the new device is intended to replace the
4385 * existing device; in this case the two devices are made into their own
4386 * mirror using the 'replacing' vdev, which is functionally identical to
4387 * the mirror vdev (it actually reuses all the same ops) but has a few
4388 * extra rules: you can't attach to it after it's been created, and upon
4389 * completion of resilvering, the first disk (the one being replaced)
4390 * is automatically detached.
4391 */
4392int
4393spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4394{
428870ff 4395 uint64_t txg, dtl_max_txg;
34dc7c2f
BB
4396 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4397 vdev_ops_t *pvops;
b128c09f
BB
4398 char *oldvdpath, *newvdpath;
4399 int newvd_isspare;
4400 int error;
2e528b49 4401 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
34dc7c2f 4402
572e2857
BB
4403 ASSERT(spa_writeable(spa));
4404
34dc7c2f
BB
4405 txg = spa_vdev_enter(spa);
4406
b128c09f 4407 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4408
4409 if (oldvd == NULL)
4410 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4411
4412 if (!oldvd->vdev_ops->vdev_op_leaf)
4413 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4414
4415 pvd = oldvd->vdev_parent;
4416
4417 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
5ffb9d1d 4418 VDEV_ALLOC_ATTACH)) != 0)
34dc7c2f
BB
4419 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4420
4421 if (newrootvd->vdev_children != 1)
4422 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4423
4424 newvd = newrootvd->vdev_child[0];
4425
4426 if (!newvd->vdev_ops->vdev_op_leaf)
4427 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4428
4429 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4430 return (spa_vdev_exit(spa, newrootvd, txg, error));
4431
4432 /*
4433 * Spares can't replace logs
4434 */
b128c09f 4435 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
34dc7c2f
BB
4436 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4437
4438 if (!replacing) {
4439 /*
4440 * For attach, the only allowable parent is a mirror or the root
4441 * vdev.
4442 */
4443 if (pvd->vdev_ops != &vdev_mirror_ops &&
4444 pvd->vdev_ops != &vdev_root_ops)
4445 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4446
4447 pvops = &vdev_mirror_ops;
4448 } else {
4449 /*
4450 * Active hot spares can only be replaced by inactive hot
4451 * spares.
4452 */
4453 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857 4454 oldvd->vdev_isspare &&
34dc7c2f
BB
4455 !spa_has_spare(spa, newvd->vdev_guid))
4456 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4457
4458 /*
4459 * If the source is a hot spare, and the parent isn't already a
4460 * spare, then we want to create a new hot spare. Otherwise, we
4461 * want to create a replacing vdev. The user is not allowed to
4462 * attach to a spared vdev child unless the 'isspare' state is
4463 * the same (spare replaces spare, non-spare replaces
4464 * non-spare).
4465 */
572e2857
BB
4466 if (pvd->vdev_ops == &vdev_replacing_ops &&
4467 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
34dc7c2f 4468 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4469 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4470 newvd->vdev_isspare != oldvd->vdev_isspare) {
34dc7c2f 4471 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4472 }
4473
4474 if (newvd->vdev_isspare)
34dc7c2f
BB
4475 pvops = &vdev_spare_ops;
4476 else
4477 pvops = &vdev_replacing_ops;
4478 }
4479
4480 /*
9babb374 4481 * Make sure the new device is big enough.
34dc7c2f 4482 */
9babb374 4483 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
34dc7c2f
BB
4484 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4485
4486 /*
4487 * The new device cannot have a higher alignment requirement
4488 * than the top-level vdev.
4489 */
4490 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4491 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4492
4493 /*
4494 * If this is an in-place replacement, update oldvd's path and devid
4495 * to make it distinguishable from newvd, and unopenable from now on.
4496 */
4497 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4498 spa_strfree(oldvd->vdev_path);
4499 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
b8d06fca 4500 KM_PUSHPAGE);
34dc7c2f
BB
4501 (void) sprintf(oldvd->vdev_path, "%s/%s",
4502 newvd->vdev_path, "old");
4503 if (oldvd->vdev_devid != NULL) {
4504 spa_strfree(oldvd->vdev_devid);
4505 oldvd->vdev_devid = NULL;
4506 }
4507 }
4508
572e2857
BB
4509 /* mark the device being resilvered */
4510 newvd->vdev_resilvering = B_TRUE;
4511
34dc7c2f
BB
4512 /*
4513 * If the parent is not a mirror, or if we're replacing, insert the new
4514 * mirror/replacing/spare vdev above oldvd.
4515 */
4516 if (pvd->vdev_ops != pvops)
4517 pvd = vdev_add_parent(oldvd, pvops);
4518
4519 ASSERT(pvd->vdev_top->vdev_parent == rvd);
4520 ASSERT(pvd->vdev_ops == pvops);
4521 ASSERT(oldvd->vdev_parent == pvd);
4522
4523 /*
4524 * Extract the new device from its root and add it to pvd.
4525 */
4526 vdev_remove_child(newrootvd, newvd);
4527 newvd->vdev_id = pvd->vdev_children;
428870ff 4528 newvd->vdev_crtxg = oldvd->vdev_crtxg;
34dc7c2f
BB
4529 vdev_add_child(pvd, newvd);
4530
34dc7c2f
BB
4531 tvd = newvd->vdev_top;
4532 ASSERT(pvd->vdev_top == tvd);
4533 ASSERT(tvd->vdev_parent == rvd);
4534
4535 vdev_config_dirty(tvd);
4536
4537 /*
428870ff
BB
4538 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4539 * for any dmu_sync-ed blocks. It will propagate upward when
4540 * spa_vdev_exit() calls vdev_dtl_reassess().
34dc7c2f 4541 */
428870ff 4542 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
34dc7c2f 4543
428870ff
BB
4544 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4545 dtl_max_txg - TXG_INITIAL);
34dc7c2f 4546
9babb374 4547 if (newvd->vdev_isspare) {
34dc7c2f 4548 spa_spare_activate(newvd);
26685276 4549 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_DEVICE_SPARE);
9babb374
BB
4550 }
4551
b128c09f
BB
4552 oldvdpath = spa_strdup(oldvd->vdev_path);
4553 newvdpath = spa_strdup(newvd->vdev_path);
4554 newvd_isspare = newvd->vdev_isspare;
34dc7c2f
BB
4555
4556 /*
4557 * Mark newvd's DTL dirty in this txg.
4558 */
4559 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4560
428870ff
BB
4561 /*
4562 * Restart the resilver
4563 */
4564 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4565
4566 /*
4567 * Commit the config
4568 */
4569 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
34dc7c2f 4570
6f1ffb06 4571 spa_history_log_internal(spa, "vdev attach", NULL,
428870ff 4572 "%s vdev=%s %s vdev=%s",
45d1cae3
BB
4573 replacing && newvd_isspare ? "spare in" :
4574 replacing ? "replace" : "attach", newvdpath,
4575 replacing ? "for" : "to", oldvdpath);
b128c09f
BB
4576
4577 spa_strfree(oldvdpath);
4578 spa_strfree(newvdpath);
4579
572e2857 4580 if (spa->spa_bootfs)
26685276 4581 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_BOOTFS_VDEV_ATTACH);
572e2857 4582
34dc7c2f
BB
4583 return (0);
4584}
4585
4586/*
4587 * Detach a device from a mirror or replacing vdev.
d3cc8b15 4588 *
34dc7c2f
BB
4589 * If 'replace_done' is specified, only detach if the parent
4590 * is a replacing vdev.
4591 */
4592int
fb5f0bc8 4593spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
34dc7c2f
BB
4594{
4595 uint64_t txg;
fb5f0bc8 4596 int error;
34dc7c2f
BB
4597 vdev_t *vd, *pvd, *cvd, *tvd;
4598 boolean_t unspare = B_FALSE;
d4ed6673 4599 uint64_t unspare_guid = 0;
428870ff 4600 char *vdpath;
d6320ddb 4601 int c, t;
2e528b49 4602 ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
572e2857
BB
4603 ASSERT(spa_writeable(spa));
4604
34dc7c2f
BB
4605 txg = spa_vdev_enter(spa);
4606
b128c09f 4607 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4608
4609 if (vd == NULL)
4610 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4611
4612 if (!vd->vdev_ops->vdev_op_leaf)
4613 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4614
4615 pvd = vd->vdev_parent;
4616
fb5f0bc8
BB
4617 /*
4618 * If the parent/child relationship is not as expected, don't do it.
4619 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4620 * vdev that's replacing B with C. The user's intent in replacing
4621 * is to go from M(A,B) to M(A,C). If the user decides to cancel
4622 * the replace by detaching C, the expected behavior is to end up
4623 * M(A,B). But suppose that right after deciding to detach C,
4624 * the replacement of B completes. We would have M(A,C), and then
4625 * ask to detach C, which would leave us with just A -- not what
4626 * the user wanted. To prevent this, we make sure that the
4627 * parent/child relationship hasn't changed -- in this example,
4628 * that C's parent is still the replacing vdev R.
4629 */
4630 if (pvd->vdev_guid != pguid && pguid != 0)
4631 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4632
34dc7c2f 4633 /*
572e2857 4634 * Only 'replacing' or 'spare' vdevs can be replaced.
34dc7c2f 4635 */
572e2857
BB
4636 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4637 pvd->vdev_ops != &vdev_spare_ops)
4638 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
34dc7c2f
BB
4639
4640 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4641 spa_version(spa) >= SPA_VERSION_SPARES);
4642
4643 /*
4644 * Only mirror, replacing, and spare vdevs support detach.
4645 */
4646 if (pvd->vdev_ops != &vdev_replacing_ops &&
4647 pvd->vdev_ops != &vdev_mirror_ops &&
4648 pvd->vdev_ops != &vdev_spare_ops)
4649 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4650
4651 /*
fb5f0bc8
BB
4652 * If this device has the only valid copy of some data,
4653 * we cannot safely detach it.
34dc7c2f 4654 */
fb5f0bc8 4655 if (vdev_dtl_required(vd))
34dc7c2f
BB
4656 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4657
fb5f0bc8 4658 ASSERT(pvd->vdev_children >= 2);
34dc7c2f 4659
b128c09f
BB
4660 /*
4661 * If we are detaching the second disk from a replacing vdev, then
4662 * check to see if we changed the original vdev's path to have "/old"
4663 * at the end in spa_vdev_attach(). If so, undo that change now.
4664 */
572e2857
BB
4665 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4666 vd->vdev_path != NULL) {
4667 size_t len = strlen(vd->vdev_path);
4668
d6320ddb 4669 for (c = 0; c < pvd->vdev_children; c++) {
572e2857
BB
4670 cvd = pvd->vdev_child[c];
4671
4672 if (cvd == vd || cvd->vdev_path == NULL)
4673 continue;
4674
4675 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4676 strcmp(cvd->vdev_path + len, "/old") == 0) {
4677 spa_strfree(cvd->vdev_path);
4678 cvd->vdev_path = spa_strdup(vd->vdev_path);
4679 break;
4680 }
b128c09f
BB
4681 }
4682 }
4683
34dc7c2f
BB
4684 /*
4685 * If we are detaching the original disk from a spare, then it implies
4686 * that the spare should become a real disk, and be removed from the
4687 * active spare list for the pool.
4688 */
4689 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857
BB
4690 vd->vdev_id == 0 &&
4691 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
34dc7c2f
BB
4692 unspare = B_TRUE;
4693
4694 /*
4695 * Erase the disk labels so the disk can be used for other things.
4696 * This must be done after all other error cases are handled,
4697 * but before we disembowel vd (so we can still do I/O to it).
4698 * But if we can't do it, don't treat the error as fatal --
4699 * it may be that the unwritability of the disk is the reason
4700 * it's being detached!
4701 */
4702 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4703
4704 /*
4705 * Remove vd from its parent and compact the parent's children.
4706 */
4707 vdev_remove_child(pvd, vd);
4708 vdev_compact_children(pvd);
4709
4710 /*
4711 * Remember one of the remaining children so we can get tvd below.
4712 */
572e2857 4713 cvd = pvd->vdev_child[pvd->vdev_children - 1];
34dc7c2f
BB
4714
4715 /*
4716 * If we need to remove the remaining child from the list of hot spares,
fb5f0bc8
BB
4717 * do it now, marking the vdev as no longer a spare in the process.
4718 * We must do this before vdev_remove_parent(), because that can
4719 * change the GUID if it creates a new toplevel GUID. For a similar
4720 * reason, we must remove the spare now, in the same txg as the detach;
4721 * otherwise someone could attach a new sibling, change the GUID, and
4722 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
34dc7c2f
BB
4723 */
4724 if (unspare) {
4725 ASSERT(cvd->vdev_isspare);
4726 spa_spare_remove(cvd);
4727 unspare_guid = cvd->vdev_guid;
fb5f0bc8 4728 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
572e2857 4729 cvd->vdev_unspare = B_TRUE;
34dc7c2f
BB
4730 }
4731
428870ff
BB
4732 /*
4733 * If the parent mirror/replacing vdev only has one child,
4734 * the parent is no longer needed. Remove it from the tree.
4735 */
572e2857
BB
4736 if (pvd->vdev_children == 1) {
4737 if (pvd->vdev_ops == &vdev_spare_ops)
4738 cvd->vdev_unspare = B_FALSE;
428870ff 4739 vdev_remove_parent(cvd);
572e2857
BB
4740 cvd->vdev_resilvering = B_FALSE;
4741 }
4742
428870ff
BB
4743
4744 /*
4745 * We don't set tvd until now because the parent we just removed
4746 * may have been the previous top-level vdev.
4747 */
4748 tvd = cvd->vdev_top;
4749 ASSERT(tvd->vdev_parent == rvd);
4750
4751 /*
4752 * Reevaluate the parent vdev state.
4753 */
4754 vdev_propagate_state(cvd);
4755
4756 /*
4757 * If the 'autoexpand' property is set on the pool then automatically
4758 * try to expand the size of the pool. For example if the device we
4759 * just detached was smaller than the others, it may be possible to
4760 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4761 * first so that we can obtain the updated sizes of the leaf vdevs.
4762 */
4763 if (spa->spa_autoexpand) {
4764 vdev_reopen(tvd);
4765 vdev_expand(tvd, txg);
4766 }
4767
4768 vdev_config_dirty(tvd);
4769
4770 /*
4771 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
4772 * vd->vdev_detached is set and free vd's DTL object in syncing context.
4773 * But first make sure we're not on any *other* txg's DTL list, to
4774 * prevent vd from being accessed after it's freed.
4775 */
4776 vdpath = spa_strdup(vd->vdev_path);
d6320ddb 4777 for (t = 0; t < TXG_SIZE; t++)
428870ff
BB
4778 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4779 vd->vdev_detached = B_TRUE;
4780 vdev_dirty(tvd, VDD_DTL, vd, txg);
4781
26685276 4782 spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_REMOVE);
428870ff 4783
572e2857
BB
4784 /* hang on to the spa before we release the lock */
4785 spa_open_ref(spa, FTAG);
4786
428870ff
BB
4787 error = spa_vdev_exit(spa, vd, txg, 0);
4788
6f1ffb06 4789 spa_history_log_internal(spa, "detach", NULL,
428870ff
BB
4790 "vdev=%s", vdpath);
4791 spa_strfree(vdpath);
4792
4793 /*
4794 * If this was the removal of the original device in a hot spare vdev,
4795 * then we want to go through and remove the device from the hot spare
4796 * list of every other pool.
4797 */
4798 if (unspare) {
572e2857
BB
4799 spa_t *altspa = NULL;
4800
428870ff 4801 mutex_enter(&spa_namespace_lock);
572e2857
BB
4802 while ((altspa = spa_next(altspa)) != NULL) {
4803 if (altspa->spa_state != POOL_STATE_ACTIVE ||
4804 altspa == spa)
428870ff 4805 continue;
572e2857
BB
4806
4807 spa_open_ref(altspa, FTAG);
428870ff 4808 mutex_exit(&spa_namespace_lock);
572e2857 4809 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
428870ff 4810 mutex_enter(&spa_namespace_lock);
572e2857 4811 spa_close(altspa, FTAG);
428870ff
BB
4812 }
4813 mutex_exit(&spa_namespace_lock);
572e2857
BB
4814
4815 /* search the rest of the vdevs for spares to remove */
4816 spa_vdev_resilver_done(spa);
428870ff
BB
4817 }
4818
572e2857
BB
4819 /* all done with the spa; OK to release */
4820 mutex_enter(&spa_namespace_lock);
4821 spa_close(spa, FTAG);
4822 mutex_exit(&spa_namespace_lock);
4823
428870ff
BB
4824 return (error);
4825}
4826
4827/*
4828 * Split a set of devices from their mirrors, and create a new pool from them.
4829 */
4830int
4831spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4832 nvlist_t *props, boolean_t exp)
4833{
4834 int error = 0;
4835 uint64_t txg, *glist;
4836 spa_t *newspa;
4837 uint_t c, children, lastlog;
4838 nvlist_t **child, *nvl, *tmp;
4839 dmu_tx_t *tx;
4840 char *altroot = NULL;
4841 vdev_t *rvd, **vml = NULL; /* vdev modify list */
4842 boolean_t activate_slog;
4843
572e2857 4844 ASSERT(spa_writeable(spa));
428870ff
BB
4845
4846 txg = spa_vdev_enter(spa);
4847
4848 /* clear the log and flush everything up to now */
4849 activate_slog = spa_passivate_log(spa);
4850 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4851 error = spa_offline_log(spa);
4852 txg = spa_vdev_config_enter(spa);
4853
4854 if (activate_slog)
4855 spa_activate_log(spa);
4856
4857 if (error != 0)
4858 return (spa_vdev_exit(spa, NULL, txg, error));
4859
4860 /* check new spa name before going any further */
4861 if (spa_lookup(newname) != NULL)
4862 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
4863
4864 /*
4865 * scan through all the children to ensure they're all mirrors
4866 */
4867 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
4868 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
4869 &children) != 0)
4870 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4871
4872 /* first, check to ensure we've got the right child count */
4873 rvd = spa->spa_root_vdev;
4874 lastlog = 0;
4875 for (c = 0; c < rvd->vdev_children; c++) {
4876 vdev_t *vd = rvd->vdev_child[c];
4877
4878 /* don't count the holes & logs as children */
4879 if (vd->vdev_islog || vd->vdev_ishole) {
4880 if (lastlog == 0)
4881 lastlog = c;
4882 continue;
4883 }
4884
4885 lastlog = 0;
4886 }
4887 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
4888 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4889
4890 /* next, ensure no spare or cache devices are part of the split */
4891 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
4892 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
4893 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4894
b8d06fca
RY
4895 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_PUSHPAGE);
4896 glist = kmem_zalloc(children * sizeof (uint64_t), KM_PUSHPAGE);
428870ff
BB
4897
4898 /* then, loop over each vdev and validate it */
4899 for (c = 0; c < children; c++) {
4900 uint64_t is_hole = 0;
4901
4902 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4903 &is_hole);
4904
4905 if (is_hole != 0) {
4906 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
4907 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
4908 continue;
4909 } else {
2e528b49 4910 error = SET_ERROR(EINVAL);
428870ff
BB
4911 break;
4912 }
4913 }
4914
4915 /* which disk is going to be split? */
4916 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4917 &glist[c]) != 0) {
2e528b49 4918 error = SET_ERROR(EINVAL);
428870ff
BB
4919 break;
4920 }
4921
4922 /* look it up in the spa */
4923 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
4924 if (vml[c] == NULL) {
2e528b49 4925 error = SET_ERROR(ENODEV);
428870ff
BB
4926 break;
4927 }
4928
4929 /* make sure there's nothing stopping the split */
4930 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
4931 vml[c]->vdev_islog ||
4932 vml[c]->vdev_ishole ||
4933 vml[c]->vdev_isspare ||
4934 vml[c]->vdev_isl2cache ||
4935 !vdev_writeable(vml[c]) ||
4936 vml[c]->vdev_children != 0 ||
4937 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
4938 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
2e528b49 4939 error = SET_ERROR(EINVAL);
428870ff
BB
4940 break;
4941 }
4942
4943 if (vdev_dtl_required(vml[c])) {
2e528b49 4944 error = SET_ERROR(EBUSY);
428870ff
BB
4945 break;
4946 }
4947
4948 /* we need certain info from the top level */
4949 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
4950 vml[c]->vdev_top->vdev_ms_array) == 0);
4951 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
4952 vml[c]->vdev_top->vdev_ms_shift) == 0);
4953 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
4954 vml[c]->vdev_top->vdev_asize) == 0);
4955 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
4956 vml[c]->vdev_top->vdev_ashift) == 0);
4957 }
4958
4959 if (error != 0) {
4960 kmem_free(vml, children * sizeof (vdev_t *));
4961 kmem_free(glist, children * sizeof (uint64_t));
4962 return (spa_vdev_exit(spa, NULL, txg, error));
4963 }
4964
4965 /* stop writers from using the disks */
4966 for (c = 0; c < children; c++) {
4967 if (vml[c] != NULL)
4968 vml[c]->vdev_offline = B_TRUE;
4969 }
4970 vdev_reopen(spa->spa_root_vdev);
34dc7c2f
BB
4971
4972 /*
428870ff
BB
4973 * Temporarily record the splitting vdevs in the spa config. This
4974 * will disappear once the config is regenerated.
34dc7c2f 4975 */
b8d06fca 4976 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
428870ff
BB
4977 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
4978 glist, children) == 0);
4979 kmem_free(glist, children * sizeof (uint64_t));
34dc7c2f 4980
428870ff
BB
4981 mutex_enter(&spa->spa_props_lock);
4982 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
4983 nvl) == 0);
4984 mutex_exit(&spa->spa_props_lock);
4985 spa->spa_config_splitting = nvl;
4986 vdev_config_dirty(spa->spa_root_vdev);
4987
4988 /* configure and create the new pool */
4989 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
4990 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4991 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
4992 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
4993 spa_version(spa)) == 0);
4994 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
4995 spa->spa_config_txg) == 0);
4996 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4997 spa_generate_guid(NULL)) == 0);
4998 (void) nvlist_lookup_string(props,
4999 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
34dc7c2f 5000
428870ff
BB
5001 /* add the new pool to the namespace */
5002 newspa = spa_add(newname, config, altroot);
5003 newspa->spa_config_txg = spa->spa_config_txg;
5004 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5005
5006 /* release the spa config lock, retaining the namespace lock */
5007 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5008
5009 if (zio_injection_enabled)
5010 zio_handle_panic_injection(spa, FTAG, 1);
5011
5012 spa_activate(newspa, spa_mode_global);
5013 spa_async_suspend(newspa);
5014
5015 /* create the new pool from the disks of the original pool */
5016 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5017 if (error)
5018 goto out;
5019
5020 /* if that worked, generate a real config for the new pool */
5021 if (newspa->spa_root_vdev != NULL) {
5022 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
b8d06fca 5023 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
428870ff
BB
5024 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5025 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5026 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5027 B_TRUE));
9babb374 5028 }
34dc7c2f 5029
428870ff
BB
5030 /* set the props */
5031 if (props != NULL) {
5032 spa_configfile_set(newspa, props, B_FALSE);
5033 error = spa_prop_set(newspa, props);
5034 if (error)
5035 goto out;
5036 }
34dc7c2f 5037
428870ff
BB
5038 /* flush everything */
5039 txg = spa_vdev_config_enter(newspa);
5040 vdev_config_dirty(newspa->spa_root_vdev);
5041 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
34dc7c2f 5042
428870ff
BB
5043 if (zio_injection_enabled)
5044 zio_handle_panic_injection(spa, FTAG, 2);
34dc7c2f 5045
428870ff 5046 spa_async_resume(newspa);
34dc7c2f 5047
428870ff
BB
5048 /* finally, update the original pool's config */
5049 txg = spa_vdev_config_enter(spa);
5050 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5051 error = dmu_tx_assign(tx, TXG_WAIT);
5052 if (error != 0)
5053 dmu_tx_abort(tx);
5054 for (c = 0; c < children; c++) {
5055 if (vml[c] != NULL) {
5056 vdev_split(vml[c]);
5057 if (error == 0)
6f1ffb06
MA
5058 spa_history_log_internal(spa, "detach", tx,
5059 "vdev=%s", vml[c]->vdev_path);
428870ff 5060 vdev_free(vml[c]);
34dc7c2f 5061 }
34dc7c2f 5062 }
428870ff
BB
5063 vdev_config_dirty(spa->spa_root_vdev);
5064 spa->spa_config_splitting = NULL;
5065 nvlist_free(nvl);
5066 if (error == 0)
5067 dmu_tx_commit(tx);
5068 (void) spa_vdev_exit(spa, NULL, txg, 0);
5069
5070 if (zio_injection_enabled)
5071 zio_handle_panic_injection(spa, FTAG, 3);
5072
5073 /* split is complete; log a history record */
6f1ffb06
MA
5074 spa_history_log_internal(newspa, "split", NULL,
5075 "from pool %s", spa_name(spa));
428870ff
BB
5076
5077 kmem_free(vml, children * sizeof (vdev_t *));
5078
5079 /* if we're not going to mount the filesystems in userland, export */
5080 if (exp)
5081 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5082 B_FALSE, B_FALSE);
5083
5084 return (error);
5085
5086out:
5087 spa_unload(newspa);
5088 spa_deactivate(newspa);
5089 spa_remove(newspa);
5090
5091 txg = spa_vdev_config_enter(spa);
5092
5093 /* re-online all offlined disks */
5094 for (c = 0; c < children; c++) {
5095 if (vml[c] != NULL)
5096 vml[c]->vdev_offline = B_FALSE;
5097 }
5098 vdev_reopen(spa->spa_root_vdev);
5099
5100 nvlist_free(spa->spa_config_splitting);
5101 spa->spa_config_splitting = NULL;
5102 (void) spa_vdev_exit(spa, NULL, txg, error);
34dc7c2f 5103
428870ff 5104 kmem_free(vml, children * sizeof (vdev_t *));
34dc7c2f
BB
5105 return (error);
5106}
5107
b128c09f
BB
5108static nvlist_t *
5109spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
34dc7c2f 5110{
d6320ddb
BB
5111 int i;
5112
5113 for (i = 0; i < count; i++) {
b128c09f 5114 uint64_t guid;
34dc7c2f 5115
b128c09f
BB
5116 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5117 &guid) == 0);
34dc7c2f 5118
b128c09f
BB
5119 if (guid == target_guid)
5120 return (nvpp[i]);
34dc7c2f
BB
5121 }
5122
b128c09f 5123 return (NULL);
34dc7c2f
BB
5124}
5125
b128c09f
BB
5126static void
5127spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5128 nvlist_t *dev_to_remove)
34dc7c2f 5129{
b128c09f 5130 nvlist_t **newdev = NULL;
d6320ddb 5131 int i, j;
34dc7c2f 5132
b128c09f 5133 if (count > 1)
b8d06fca 5134 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_PUSHPAGE);
34dc7c2f 5135
d6320ddb 5136 for (i = 0, j = 0; i < count; i++) {
b128c09f
BB
5137 if (dev[i] == dev_to_remove)
5138 continue;
b8d06fca 5139 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_PUSHPAGE) == 0);
34dc7c2f
BB
5140 }
5141
b128c09f
BB
5142 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5143 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
34dc7c2f 5144
d6320ddb 5145 for (i = 0; i < count - 1; i++)
b128c09f 5146 nvlist_free(newdev[i]);
34dc7c2f 5147
b128c09f
BB
5148 if (count > 1)
5149 kmem_free(newdev, (count - 1) * sizeof (void *));
34dc7c2f
BB
5150}
5151
428870ff
BB
5152/*
5153 * Evacuate the device.
5154 */
5155static int
5156spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5157{
5158 uint64_t txg;
5159 int error = 0;
5160
5161 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5162 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5163 ASSERT(vd == vd->vdev_top);
5164
5165 /*
5166 * Evacuate the device. We don't hold the config lock as writer
5167 * since we need to do I/O but we do keep the
5168 * spa_namespace_lock held. Once this completes the device
5169 * should no longer have any blocks allocated on it.
5170 */
5171 if (vd->vdev_islog) {
5172 if (vd->vdev_stat.vs_alloc != 0)
5173 error = spa_offline_log(spa);
5174 } else {
2e528b49 5175 error = SET_ERROR(ENOTSUP);
428870ff
BB
5176 }
5177
5178 if (error)
5179 return (error);
5180
5181 /*
5182 * The evacuation succeeded. Remove any remaining MOS metadata
5183 * associated with this vdev, and wait for these changes to sync.
5184 */
c99c9001 5185 ASSERT0(vd->vdev_stat.vs_alloc);
428870ff
BB
5186 txg = spa_vdev_config_enter(spa);
5187 vd->vdev_removing = B_TRUE;
5188 vdev_dirty(vd, 0, NULL, txg);
5189 vdev_config_dirty(vd);
5190 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5191
5192 return (0);
5193}
5194
5195/*
5196 * Complete the removal by cleaning up the namespace.
5197 */
5198static void
5199spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5200{
5201 vdev_t *rvd = spa->spa_root_vdev;
5202 uint64_t id = vd->vdev_id;
5203 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5204
5205 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5206 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5207 ASSERT(vd == vd->vdev_top);
5208
5209 /*
5210 * Only remove any devices which are empty.
5211 */
5212 if (vd->vdev_stat.vs_alloc != 0)
5213 return;
5214
5215 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5216
5217 if (list_link_active(&vd->vdev_state_dirty_node))
5218 vdev_state_clean(vd);
5219 if (list_link_active(&vd->vdev_config_dirty_node))
5220 vdev_config_clean(vd);
5221
5222 vdev_free(vd);
5223
5224 if (last_vdev) {
5225 vdev_compact_children(rvd);
5226 } else {
5227 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5228 vdev_add_child(rvd, vd);
5229 }
5230 vdev_config_dirty(rvd);
5231
5232 /*
5233 * Reassess the health of our root vdev.
5234 */
5235 vdev_reopen(rvd);
5236}
5237
5238/*
5239 * Remove a device from the pool -
5240 *
5241 * Removing a device from the vdev namespace requires several steps
5242 * and can take a significant amount of time. As a result we use
5243 * the spa_vdev_config_[enter/exit] functions which allow us to
5244 * grab and release the spa_config_lock while still holding the namespace
5245 * lock. During each step the configuration is synced out.
d3cc8b15
WA
5246 *
5247 * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5248 * devices.
34dc7c2f
BB
5249 */
5250int
5251spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5252{
5253 vdev_t *vd;
428870ff 5254 metaslab_group_t *mg;
b128c09f 5255 nvlist_t **spares, **l2cache, *nv;
fb5f0bc8 5256 uint64_t txg = 0;
428870ff 5257 uint_t nspares, nl2cache;
34dc7c2f 5258 int error = 0;
fb5f0bc8 5259 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
34dc7c2f 5260
572e2857
BB
5261 ASSERT(spa_writeable(spa));
5262
fb5f0bc8
BB
5263 if (!locked)
5264 txg = spa_vdev_enter(spa);
34dc7c2f 5265
b128c09f 5266 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
5267
5268 if (spa->spa_spares.sav_vdevs != NULL &&
34dc7c2f 5269 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
b128c09f
BB
5270 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5271 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5272 /*
5273 * Only remove the hot spare if it's not currently in use
5274 * in this pool.
5275 */
5276 if (vd == NULL || unspare) {
5277 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5278 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5279 spa_load_spares(spa);
5280 spa->spa_spares.sav_sync = B_TRUE;
5281 } else {
2e528b49 5282 error = SET_ERROR(EBUSY);
b128c09f
BB
5283 }
5284 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
34dc7c2f 5285 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
b128c09f
BB
5286 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5287 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5288 /*
5289 * Cache devices can always be removed.
5290 */
5291 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5292 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
34dc7c2f
BB
5293 spa_load_l2cache(spa);
5294 spa->spa_l2cache.sav_sync = B_TRUE;
428870ff
BB
5295 } else if (vd != NULL && vd->vdev_islog) {
5296 ASSERT(!locked);
5297 ASSERT(vd == vd->vdev_top);
5298
5299 /*
5300 * XXX - Once we have bp-rewrite this should
5301 * become the common case.
5302 */
5303
5304 mg = vd->vdev_mg;
5305
5306 /*
5307 * Stop allocating from this vdev.
5308 */
5309 metaslab_group_passivate(mg);
5310
5311 /*
5312 * Wait for the youngest allocations and frees to sync,
5313 * and then wait for the deferral of those frees to finish.
5314 */
5315 spa_vdev_config_exit(spa, NULL,
5316 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5317
5318 /*
5319 * Attempt to evacuate the vdev.
5320 */
5321 error = spa_vdev_remove_evacuate(spa, vd);
5322
5323 txg = spa_vdev_config_enter(spa);
5324
5325 /*
5326 * If we couldn't evacuate the vdev, unwind.
5327 */
5328 if (error) {
5329 metaslab_group_activate(mg);
5330 return (spa_vdev_exit(spa, NULL, txg, error));
5331 }
5332
5333 /*
5334 * Clean up the vdev namespace.
5335 */
5336 spa_vdev_remove_from_namespace(spa, vd);
5337
b128c09f
BB
5338 } else if (vd != NULL) {
5339 /*
5340 * Normal vdevs cannot be removed (yet).
5341 */
2e528b49 5342 error = SET_ERROR(ENOTSUP);
b128c09f
BB
5343 } else {
5344 /*
5345 * There is no vdev of any kind with the specified guid.
5346 */
2e528b49 5347 error = SET_ERROR(ENOENT);
34dc7c2f
BB
5348 }
5349
fb5f0bc8
BB
5350 if (!locked)
5351 return (spa_vdev_exit(spa, NULL, txg, error));
5352
5353 return (error);
34dc7c2f
BB
5354}
5355
5356/*
5357 * Find any device that's done replacing, or a vdev marked 'unspare' that's
d3cc8b15 5358 * currently spared, so we can detach it.
34dc7c2f
BB
5359 */
5360static vdev_t *
5361spa_vdev_resilver_done_hunt(vdev_t *vd)
5362{
5363 vdev_t *newvd, *oldvd;
d6320ddb 5364 int c;
34dc7c2f 5365
d6320ddb 5366 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f
BB
5367 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5368 if (oldvd != NULL)
5369 return (oldvd);
5370 }
5371
5372 /*
572e2857
BB
5373 * Check for a completed replacement. We always consider the first
5374 * vdev in the list to be the oldest vdev, and the last one to be
5375 * the newest (see spa_vdev_attach() for how that works). In
5376 * the case where the newest vdev is faulted, we will not automatically
5377 * remove it after a resilver completes. This is OK as it will require
5378 * user intervention to determine which disk the admin wishes to keep.
34dc7c2f 5379 */
572e2857
BB
5380 if (vd->vdev_ops == &vdev_replacing_ops) {
5381 ASSERT(vd->vdev_children > 1);
5382
5383 newvd = vd->vdev_child[vd->vdev_children - 1];
34dc7c2f 5384 oldvd = vd->vdev_child[0];
34dc7c2f 5385
fb5f0bc8 5386 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5387 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
fb5f0bc8 5388 !vdev_dtl_required(oldvd))
34dc7c2f 5389 return (oldvd);
34dc7c2f
BB
5390 }
5391
5392 /*
5393 * Check for a completed resilver with the 'unspare' flag set.
5394 */
572e2857
BB
5395 if (vd->vdev_ops == &vdev_spare_ops) {
5396 vdev_t *first = vd->vdev_child[0];
5397 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5398
5399 if (last->vdev_unspare) {
5400 oldvd = first;
5401 newvd = last;
5402 } else if (first->vdev_unspare) {
5403 oldvd = last;
5404 newvd = first;
5405 } else {
5406 oldvd = NULL;
5407 }
34dc7c2f 5408
572e2857 5409 if (oldvd != NULL &&
fb5f0bc8 5410 vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5411 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
572e2857 5412 !vdev_dtl_required(oldvd))
34dc7c2f 5413 return (oldvd);
572e2857
BB
5414
5415 /*
5416 * If there are more than two spares attached to a disk,
5417 * and those spares are not required, then we want to
5418 * attempt to free them up now so that they can be used
5419 * by other pools. Once we're back down to a single
5420 * disk+spare, we stop removing them.
5421 */
5422 if (vd->vdev_children > 2) {
5423 newvd = vd->vdev_child[1];
5424
5425 if (newvd->vdev_isspare && last->vdev_isspare &&
5426 vdev_dtl_empty(last, DTL_MISSING) &&
5427 vdev_dtl_empty(last, DTL_OUTAGE) &&
5428 !vdev_dtl_required(newvd))
5429 return (newvd);
34dc7c2f 5430 }
34dc7c2f
BB
5431 }
5432
5433 return (NULL);
5434}
5435
5436static void
5437spa_vdev_resilver_done(spa_t *spa)
5438{
fb5f0bc8
BB
5439 vdev_t *vd, *pvd, *ppvd;
5440 uint64_t guid, sguid, pguid, ppguid;
34dc7c2f 5441
fb5f0bc8 5442 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5443
5444 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
fb5f0bc8
BB
5445 pvd = vd->vdev_parent;
5446 ppvd = pvd->vdev_parent;
34dc7c2f 5447 guid = vd->vdev_guid;
fb5f0bc8
BB
5448 pguid = pvd->vdev_guid;
5449 ppguid = ppvd->vdev_guid;
5450 sguid = 0;
34dc7c2f
BB
5451 /*
5452 * If we have just finished replacing a hot spared device, then
5453 * we need to detach the parent's first child (the original hot
5454 * spare) as well.
5455 */
572e2857
BB
5456 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5457 ppvd->vdev_children == 2) {
34dc7c2f 5458 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
fb5f0bc8 5459 sguid = ppvd->vdev_child[1]->vdev_guid;
34dc7c2f 5460 }
fb5f0bc8
BB
5461 spa_config_exit(spa, SCL_ALL, FTAG);
5462 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
34dc7c2f 5463 return;
fb5f0bc8 5464 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
34dc7c2f 5465 return;
fb5f0bc8 5466 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5467 }
5468
fb5f0bc8 5469 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
5470}
5471
5472/*
428870ff 5473 * Update the stored path or FRU for this vdev.
34dc7c2f
BB
5474 */
5475int
9babb374
BB
5476spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5477 boolean_t ispath)
34dc7c2f 5478{
b128c09f 5479 vdev_t *vd;
428870ff 5480 boolean_t sync = B_FALSE;
34dc7c2f 5481
572e2857
BB
5482 ASSERT(spa_writeable(spa));
5483
428870ff 5484 spa_vdev_state_enter(spa, SCL_ALL);
34dc7c2f 5485
9babb374 5486 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
428870ff 5487 return (spa_vdev_state_exit(spa, NULL, ENOENT));
34dc7c2f
BB
5488
5489 if (!vd->vdev_ops->vdev_op_leaf)
428870ff 5490 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 5491
9babb374 5492 if (ispath) {
428870ff
BB
5493 if (strcmp(value, vd->vdev_path) != 0) {
5494 spa_strfree(vd->vdev_path);
5495 vd->vdev_path = spa_strdup(value);
5496 sync = B_TRUE;
5497 }
9babb374 5498 } else {
428870ff
BB
5499 if (vd->vdev_fru == NULL) {
5500 vd->vdev_fru = spa_strdup(value);
5501 sync = B_TRUE;
5502 } else if (strcmp(value, vd->vdev_fru) != 0) {
9babb374 5503 spa_strfree(vd->vdev_fru);
428870ff
BB
5504 vd->vdev_fru = spa_strdup(value);
5505 sync = B_TRUE;
5506 }
9babb374 5507 }
34dc7c2f 5508
428870ff 5509 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
34dc7c2f
BB
5510}
5511
9babb374
BB
5512int
5513spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5514{
5515 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5516}
5517
5518int
5519spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5520{
5521 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5522}
5523
34dc7c2f
BB
5524/*
5525 * ==========================================================================
428870ff 5526 * SPA Scanning
34dc7c2f
BB
5527 * ==========================================================================
5528 */
5529
34dc7c2f 5530int
428870ff
BB
5531spa_scan_stop(spa_t *spa)
5532{
5533 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5534 if (dsl_scan_resilvering(spa->spa_dsl_pool))
2e528b49 5535 return (SET_ERROR(EBUSY));
428870ff
BB
5536 return (dsl_scan_cancel(spa->spa_dsl_pool));
5537}
5538
5539int
5540spa_scan(spa_t *spa, pool_scan_func_t func)
34dc7c2f 5541{
b128c09f 5542 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
34dc7c2f 5543
428870ff 5544 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
2e528b49 5545 return (SET_ERROR(ENOTSUP));
34dc7c2f 5546
34dc7c2f 5547 /*
b128c09f
BB
5548 * If a resilver was requested, but there is no DTL on a
5549 * writeable leaf device, we have nothing to do.
34dc7c2f 5550 */
428870ff 5551 if (func == POOL_SCAN_RESILVER &&
b128c09f
BB
5552 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5553 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
34dc7c2f
BB
5554 return (0);
5555 }
5556
428870ff 5557 return (dsl_scan(spa->spa_dsl_pool, func));
34dc7c2f
BB
5558}
5559
5560/*
5561 * ==========================================================================
5562 * SPA async task processing
5563 * ==========================================================================
5564 */
5565
5566static void
5567spa_async_remove(spa_t *spa, vdev_t *vd)
5568{
d6320ddb
BB
5569 int c;
5570
b128c09f 5571 if (vd->vdev_remove_wanted) {
428870ff
BB
5572 vd->vdev_remove_wanted = B_FALSE;
5573 vd->vdev_delayed_close = B_FALSE;
b128c09f 5574 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
428870ff
BB
5575
5576 /*
5577 * We want to clear the stats, but we don't want to do a full
5578 * vdev_clear() as that will cause us to throw away
5579 * degraded/faulted state as well as attempt to reopen the
5580 * device, all of which is a waste.
5581 */
5582 vd->vdev_stat.vs_read_errors = 0;
5583 vd->vdev_stat.vs_write_errors = 0;
5584 vd->vdev_stat.vs_checksum_errors = 0;
5585
b128c09f
BB
5586 vdev_state_dirty(vd->vdev_top);
5587 }
34dc7c2f 5588
d6320ddb 5589 for (c = 0; c < vd->vdev_children; c++)
b128c09f
BB
5590 spa_async_remove(spa, vd->vdev_child[c]);
5591}
5592
5593static void
5594spa_async_probe(spa_t *spa, vdev_t *vd)
5595{
d6320ddb
BB
5596 int c;
5597
b128c09f 5598 if (vd->vdev_probe_wanted) {
428870ff 5599 vd->vdev_probe_wanted = B_FALSE;
b128c09f 5600 vdev_reopen(vd); /* vdev_open() does the actual probe */
34dc7c2f 5601 }
b128c09f 5602
d6320ddb 5603 for (c = 0; c < vd->vdev_children; c++)
b128c09f 5604 spa_async_probe(spa, vd->vdev_child[c]);
34dc7c2f
BB
5605}
5606
9babb374
BB
5607static void
5608spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5609{
d6320ddb 5610 int c;
9babb374
BB
5611
5612 if (!spa->spa_autoexpand)
5613 return;
5614
d6320ddb 5615 for (c = 0; c < vd->vdev_children; c++) {
9babb374
BB
5616 vdev_t *cvd = vd->vdev_child[c];
5617 spa_async_autoexpand(spa, cvd);
5618 }
5619
5620 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5621 return;
5622
26685276 5623 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_AUTOEXPAND);
9babb374
BB
5624}
5625
34dc7c2f
BB
5626static void
5627spa_async_thread(spa_t *spa)
5628{
d6320ddb 5629 int tasks, i;
34dc7c2f
BB
5630
5631 ASSERT(spa->spa_sync_on);
5632
5633 mutex_enter(&spa->spa_async_lock);
5634 tasks = spa->spa_async_tasks;
5635 spa->spa_async_tasks = 0;
5636 mutex_exit(&spa->spa_async_lock);
5637
5638 /*
5639 * See if the config needs to be updated.
5640 */
5641 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
428870ff 5642 uint64_t old_space, new_space;
9babb374 5643
34dc7c2f 5644 mutex_enter(&spa_namespace_lock);
428870ff 5645 old_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5646 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
428870ff 5647 new_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5648 mutex_exit(&spa_namespace_lock);
9babb374
BB
5649
5650 /*
5651 * If the pool grew as a result of the config update,
5652 * then log an internal history event.
5653 */
428870ff 5654 if (new_space != old_space) {
6f1ffb06 5655 spa_history_log_internal(spa, "vdev online", NULL,
45d1cae3 5656 "pool '%s' size: %llu(+%llu)",
428870ff 5657 spa_name(spa), new_space, new_space - old_space);
9babb374 5658 }
34dc7c2f
BB
5659 }
5660
5661 /*
5662 * See if any devices need to be marked REMOVED.
34dc7c2f 5663 */
b128c09f 5664 if (tasks & SPA_ASYNC_REMOVE) {
428870ff 5665 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 5666 spa_async_remove(spa, spa->spa_root_vdev);
d6320ddb 5667 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
b128c09f 5668 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
d6320ddb 5669 for (i = 0; i < spa->spa_spares.sav_count; i++)
b128c09f
BB
5670 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5671 (void) spa_vdev_state_exit(spa, NULL, 0);
34dc7c2f
BB
5672 }
5673
9babb374
BB
5674 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5675 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5676 spa_async_autoexpand(spa, spa->spa_root_vdev);
5677 spa_config_exit(spa, SCL_CONFIG, FTAG);
5678 }
5679
34dc7c2f 5680 /*
b128c09f 5681 * See if any devices need to be probed.
34dc7c2f 5682 */
b128c09f 5683 if (tasks & SPA_ASYNC_PROBE) {
428870ff 5684 spa_vdev_state_enter(spa, SCL_NONE);
b128c09f
BB
5685 spa_async_probe(spa, spa->spa_root_vdev);
5686 (void) spa_vdev_state_exit(spa, NULL, 0);
5687 }
34dc7c2f
BB
5688
5689 /*
b128c09f 5690 * If any devices are done replacing, detach them.
34dc7c2f 5691 */
b128c09f
BB
5692 if (tasks & SPA_ASYNC_RESILVER_DONE)
5693 spa_vdev_resilver_done(spa);
34dc7c2f
BB
5694
5695 /*
5696 * Kick off a resilver.
5697 */
b128c09f 5698 if (tasks & SPA_ASYNC_RESILVER)
428870ff 5699 dsl_resilver_restart(spa->spa_dsl_pool, 0);
34dc7c2f
BB
5700
5701 /*
5702 * Let the world know that we're done.
5703 */
5704 mutex_enter(&spa->spa_async_lock);
5705 spa->spa_async_thread = NULL;
5706 cv_broadcast(&spa->spa_async_cv);
5707 mutex_exit(&spa->spa_async_lock);
5708 thread_exit();
5709}
5710
5711void
5712spa_async_suspend(spa_t *spa)
5713{
5714 mutex_enter(&spa->spa_async_lock);
5715 spa->spa_async_suspended++;
5716 while (spa->spa_async_thread != NULL)
5717 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5718 mutex_exit(&spa->spa_async_lock);
5719}
5720
5721void
5722spa_async_resume(spa_t *spa)
5723{
5724 mutex_enter(&spa->spa_async_lock);
5725 ASSERT(spa->spa_async_suspended != 0);
5726 spa->spa_async_suspended--;
5727 mutex_exit(&spa->spa_async_lock);
5728}
5729
5730static void
5731spa_async_dispatch(spa_t *spa)
5732{
5733 mutex_enter(&spa->spa_async_lock);
5734 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
5735 spa->spa_async_thread == NULL &&
5736 rootdir != NULL && !vn_is_readonly(rootdir))
5737 spa->spa_async_thread = thread_create(NULL, 0,
5738 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5739 mutex_exit(&spa->spa_async_lock);
5740}
5741
5742void
5743spa_async_request(spa_t *spa, int task)
5744{
428870ff 5745 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
34dc7c2f
BB
5746 mutex_enter(&spa->spa_async_lock);
5747 spa->spa_async_tasks |= task;
5748 mutex_exit(&spa->spa_async_lock);
5749}
5750
5751/*
5752 * ==========================================================================
5753 * SPA syncing routines
5754 * ==========================================================================
5755 */
5756
428870ff
BB
5757static int
5758bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
34dc7c2f 5759{
428870ff
BB
5760 bpobj_t *bpo = arg;
5761 bpobj_enqueue(bpo, bp, tx);
5762 return (0);
5763}
34dc7c2f 5764
428870ff
BB
5765static int
5766spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5767{
5768 zio_t *zio = arg;
34dc7c2f 5769
428870ff
BB
5770 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5771 zio->io_flags));
5772 return (0);
34dc7c2f
BB
5773}
5774
5775static void
5776spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5777{
5778 char *packed = NULL;
b128c09f 5779 size_t bufsize;
34dc7c2f
BB
5780 size_t nvsize = 0;
5781 dmu_buf_t *db;
5782
5783 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5784
b128c09f
BB
5785 /*
5786 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5787 * information. This avoids the dbuf_will_dirty() path and
5788 * saves us a pre-read to get data we don't actually care about.
5789 */
9ae529ec 5790 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
b8d06fca 5791 packed = vmem_alloc(bufsize, KM_PUSHPAGE);
34dc7c2f
BB
5792
5793 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
b8d06fca 5794 KM_PUSHPAGE) == 0);
b128c09f 5795 bzero(packed + nvsize, bufsize - nvsize);
34dc7c2f 5796
b128c09f 5797 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
34dc7c2f 5798
00b46022 5799 vmem_free(packed, bufsize);
34dc7c2f
BB
5800
5801 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5802 dmu_buf_will_dirty(db, tx);
5803 *(uint64_t *)db->db_data = nvsize;
5804 dmu_buf_rele(db, FTAG);
5805}
5806
5807static void
5808spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5809 const char *config, const char *entry)
5810{
5811 nvlist_t *nvroot;
5812 nvlist_t **list;
5813 int i;
5814
5815 if (!sav->sav_sync)
5816 return;
5817
5818 /*
5819 * Update the MOS nvlist describing the list of available devices.
5820 * spa_validate_aux() will have already made sure this nvlist is
5821 * valid and the vdevs are labeled appropriately.
5822 */
5823 if (sav->sav_object == 0) {
5824 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5825 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5826 sizeof (uint64_t), tx);
5827 VERIFY(zap_update(spa->spa_meta_objset,
5828 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5829 &sav->sav_object, tx) == 0);
5830 }
5831
b8d06fca 5832 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
5833 if (sav->sav_count == 0) {
5834 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5835 } else {
b8d06fca 5836 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
5837 for (i = 0; i < sav->sav_count; i++)
5838 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
428870ff 5839 B_FALSE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
5840 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
5841 sav->sav_count) == 0);
5842 for (i = 0; i < sav->sav_count; i++)
5843 nvlist_free(list[i]);
5844 kmem_free(list, sav->sav_count * sizeof (void *));
5845 }
5846
5847 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
5848 nvlist_free(nvroot);
5849
5850 sav->sav_sync = B_FALSE;
5851}
5852
5853static void
5854spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
5855{
5856 nvlist_t *config;
5857
b128c09f 5858 if (list_is_empty(&spa->spa_config_dirty_list))
34dc7c2f
BB
5859 return;
5860
b128c09f
BB
5861 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5862
5863 config = spa_config_generate(spa, spa->spa_root_vdev,
5864 dmu_tx_get_txg(tx), B_FALSE);
5865
ea0b2538
GW
5866 /*
5867 * If we're upgrading the spa version then make sure that
5868 * the config object gets updated with the correct version.
5869 */
5870 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
5871 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5872 spa->spa_uberblock.ub_version);
5873
b128c09f 5874 spa_config_exit(spa, SCL_STATE, FTAG);
34dc7c2f
BB
5875
5876 if (spa->spa_config_syncing)
5877 nvlist_free(spa->spa_config_syncing);
5878 spa->spa_config_syncing = config;
5879
5880 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5881}
5882
9ae529ec 5883static void
13fe0198 5884spa_sync_version(void *arg, dmu_tx_t *tx)
9ae529ec 5885{
13fe0198
MA
5886 uint64_t *versionp = arg;
5887 uint64_t version = *versionp;
5888 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9ae529ec
CS
5889
5890 /*
5891 * Setting the version is special cased when first creating the pool.
5892 */
5893 ASSERT(tx->tx_txg != TXG_INITIAL);
5894
8dca0a9a 5895 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
9ae529ec
CS
5896 ASSERT(version >= spa_version(spa));
5897
5898 spa->spa_uberblock.ub_version = version;
5899 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06 5900 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
9ae529ec
CS
5901}
5902
34dc7c2f
BB
5903/*
5904 * Set zpool properties.
5905 */
5906static void
13fe0198 5907spa_sync_props(void *arg, dmu_tx_t *tx)
34dc7c2f 5908{
13fe0198
MA
5909 nvlist_t *nvp = arg;
5910 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
34dc7c2f 5911 objset_t *mos = spa->spa_meta_objset;
9ae529ec 5912 nvpair_t *elem = NULL;
b128c09f
BB
5913
5914 mutex_enter(&spa->spa_props_lock);
34dc7c2f 5915
34dc7c2f 5916 while ((elem = nvlist_next_nvpair(nvp, elem))) {
9ae529ec
CS
5917 uint64_t intval;
5918 char *strval, *fname;
5919 zpool_prop_t prop;
5920 const char *propname;
5921 zprop_type_t proptype;
5922 zfeature_info_t *feature;
5923
5924 prop = zpool_name_to_prop(nvpair_name(elem));
5925 switch ((int)prop) {
5926 case ZPROP_INVAL:
5927 /*
5928 * We checked this earlier in spa_prop_validate().
5929 */
5930 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5931
5932 fname = strchr(nvpair_name(elem), '@') + 1;
5933 VERIFY3U(0, ==, zfeature_lookup_name(fname, &feature));
5934
5935 spa_feature_enable(spa, feature, tx);
6f1ffb06
MA
5936 spa_history_log_internal(spa, "set", tx,
5937 "%s=enabled", nvpair_name(elem));
9ae529ec
CS
5938 break;
5939
34dc7c2f 5940 case ZPOOL_PROP_VERSION:
9ae529ec 5941 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
34dc7c2f 5942 /*
9ae529ec
CS
5943 * The version is synced seperatly before other
5944 * properties and should be correct by now.
34dc7c2f 5945 */
9ae529ec 5946 ASSERT3U(spa_version(spa), >=, intval);
34dc7c2f
BB
5947 break;
5948
5949 case ZPOOL_PROP_ALTROOT:
5950 /*
5951 * 'altroot' is a non-persistent property. It should
5952 * have been set temporarily at creation or import time.
5953 */
5954 ASSERT(spa->spa_root != NULL);
5955 break;
5956
572e2857 5957 case ZPOOL_PROP_READONLY:
34dc7c2f
BB
5958 case ZPOOL_PROP_CACHEFILE:
5959 /*
572e2857
BB
5960 * 'readonly' and 'cachefile' are also non-persisitent
5961 * properties.
34dc7c2f 5962 */
34dc7c2f 5963 break;
d96eb2b1
DM
5964 case ZPOOL_PROP_COMMENT:
5965 VERIFY(nvpair_value_string(elem, &strval) == 0);
5966 if (spa->spa_comment != NULL)
5967 spa_strfree(spa->spa_comment);
5968 spa->spa_comment = spa_strdup(strval);
5969 /*
5970 * We need to dirty the configuration on all the vdevs
5971 * so that their labels get updated. It's unnecessary
5972 * to do this for pool creation since the vdev's
5973 * configuratoin has already been dirtied.
5974 */
5975 if (tx->tx_txg != TXG_INITIAL)
5976 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06
MA
5977 spa_history_log_internal(spa, "set", tx,
5978 "%s=%s", nvpair_name(elem), strval);
d96eb2b1 5979 break;
34dc7c2f
BB
5980 default:
5981 /*
5982 * Set pool property values in the poolprops mos object.
5983 */
34dc7c2f 5984 if (spa->spa_pool_props_object == 0) {
9ae529ec
CS
5985 spa->spa_pool_props_object =
5986 zap_create_link(mos, DMU_OT_POOL_PROPS,
34dc7c2f 5987 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
9ae529ec 5988 tx);
34dc7c2f 5989 }
34dc7c2f
BB
5990
5991 /* normalize the property name */
5992 propname = zpool_prop_to_name(prop);
5993 proptype = zpool_prop_get_type(prop);
5994
5995 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5996 ASSERT(proptype == PROP_TYPE_STRING);
5997 VERIFY(nvpair_value_string(elem, &strval) == 0);
5998 VERIFY(zap_update(mos,
5999 spa->spa_pool_props_object, propname,
6000 1, strlen(strval) + 1, strval, tx) == 0);
6f1ffb06
MA
6001 spa_history_log_internal(spa, "set", tx,
6002 "%s=%s", nvpair_name(elem), strval);
34dc7c2f
BB
6003 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6004 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
6005
6006 if (proptype == PROP_TYPE_INDEX) {
6007 const char *unused;
6008 VERIFY(zpool_prop_index_to_string(
6009 prop, intval, &unused) == 0);
6010 }
6011 VERIFY(zap_update(mos,
6012 spa->spa_pool_props_object, propname,
6013 8, 1, &intval, tx) == 0);
6f1ffb06
MA
6014 spa_history_log_internal(spa, "set", tx,
6015 "%s=%lld", nvpair_name(elem), intval);
34dc7c2f
BB
6016 } else {
6017 ASSERT(0); /* not allowed */
6018 }
6019
6020 switch (prop) {
6021 case ZPOOL_PROP_DELEGATION:
6022 spa->spa_delegation = intval;
6023 break;
6024 case ZPOOL_PROP_BOOTFS:
6025 spa->spa_bootfs = intval;
6026 break;
6027 case ZPOOL_PROP_FAILUREMODE:
6028 spa->spa_failmode = intval;
6029 break;
9babb374
BB
6030 case ZPOOL_PROP_AUTOEXPAND:
6031 spa->spa_autoexpand = intval;
428870ff
BB
6032 if (tx->tx_txg != TXG_INITIAL)
6033 spa_async_request(spa,
6034 SPA_ASYNC_AUTOEXPAND);
6035 break;
6036 case ZPOOL_PROP_DEDUPDITTO:
6037 spa->spa_dedup_ditto = intval;
9babb374 6038 break;
34dc7c2f
BB
6039 default:
6040 break;
6041 }
6042 }
6043
34dc7c2f 6044 }
b128c09f
BB
6045
6046 mutex_exit(&spa->spa_props_lock);
34dc7c2f
BB
6047}
6048
428870ff
BB
6049/*
6050 * Perform one-time upgrade on-disk changes. spa_version() does not
6051 * reflect the new version this txg, so there must be no changes this
6052 * txg to anything that the upgrade code depends on after it executes.
6053 * Therefore this must be called after dsl_pool_sync() does the sync
6054 * tasks.
6055 */
6056static void
6057spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6058{
6059 dsl_pool_t *dp = spa->spa_dsl_pool;
6060
6061 ASSERT(spa->spa_sync_pass == 1);
6062
13fe0198
MA
6063 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6064
428870ff
BB
6065 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6066 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6067 dsl_pool_create_origin(dp, tx);
6068
6069 /* Keeping the origin open increases spa_minref */
6070 spa->spa_minref += 3;
6071 }
6072
6073 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6074 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6075 dsl_pool_upgrade_clones(dp, tx);
6076 }
6077
6078 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6079 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6080 dsl_pool_upgrade_dir_clones(dp, tx);
6081
6082 /* Keeping the freedir open increases spa_minref */
6083 spa->spa_minref += 3;
6084 }
9ae529ec
CS
6085
6086 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6087 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6088 spa_feature_create_zap_objects(spa, tx);
6089 }
13fe0198 6090 rrw_exit(&dp->dp_config_rwlock, FTAG);
428870ff
BB
6091}
6092
34dc7c2f
BB
6093/*
6094 * Sync the specified transaction group. New blocks may be dirtied as
6095 * part of the process, so we iterate until it converges.
6096 */
6097void
6098spa_sync(spa_t *spa, uint64_t txg)
6099{
6100 dsl_pool_t *dp = spa->spa_dsl_pool;
6101 objset_t *mos = spa->spa_meta_objset;
428870ff
BB
6102 bpobj_t *defer_bpo = &spa->spa_deferred_bpobj;
6103 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
34dc7c2f
BB
6104 vdev_t *rvd = spa->spa_root_vdev;
6105 vdev_t *vd;
34dc7c2f 6106 dmu_tx_t *tx;
b128c09f 6107 int error;
d6320ddb 6108 int c;
34dc7c2f 6109
572e2857
BB
6110 VERIFY(spa_writeable(spa));
6111
34dc7c2f
BB
6112 /*
6113 * Lock out configuration changes.
6114 */
b128c09f 6115 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
6116
6117 spa->spa_syncing_txg = txg;
6118 spa->spa_sync_pass = 0;
6119
b128c09f
BB
6120 /*
6121 * If there are any pending vdev state changes, convert them
6122 * into config changes that go out with this transaction group.
6123 */
6124 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
fb5f0bc8
BB
6125 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6126 /*
6127 * We need the write lock here because, for aux vdevs,
6128 * calling vdev_config_dirty() modifies sav_config.
6129 * This is ugly and will become unnecessary when we
6130 * eliminate the aux vdev wart by integrating all vdevs
6131 * into the root vdev tree.
6132 */
6133 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6134 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6135 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6136 vdev_state_clean(vd);
6137 vdev_config_dirty(vd);
6138 }
6139 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6140 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
b128c09f
BB
6141 }
6142 spa_config_exit(spa, SCL_STATE, FTAG);
6143
34dc7c2f
BB
6144 tx = dmu_tx_create_assigned(dp, txg);
6145
cc92e9d0
GW
6146 spa->spa_sync_starttime = gethrtime();
6147 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6148 spa->spa_deadman_tqid = taskq_dispatch_delay(system_taskq,
cbfa294d 6149 spa_deadman, spa, TQ_PUSHPAGE, ddi_get_lbolt() +
cc92e9d0
GW
6150 NSEC_TO_TICK(spa->spa_deadman_synctime));
6151
34dc7c2f
BB
6152 /*
6153 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6154 * set spa_deflate if we have no raid-z vdevs.
6155 */
6156 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6157 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6158 int i;
6159
6160 for (i = 0; i < rvd->vdev_children; i++) {
6161 vd = rvd->vdev_child[i];
6162 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6163 break;
6164 }
6165 if (i == rvd->vdev_children) {
6166 spa->spa_deflate = TRUE;
6167 VERIFY(0 == zap_add(spa->spa_meta_objset,
6168 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6169 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6170 }
6171 }
6172
6173 /*
428870ff
BB
6174 * If anything has changed in this txg, or if someone is waiting
6175 * for this txg to sync (eg, spa_vdev_remove()), push the
6176 * deferred frees from the previous txg. If not, leave them
6177 * alone so that we don't generate work on an otherwise idle
6178 * system.
34dc7c2f
BB
6179 */
6180 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
6181 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
428870ff
BB
6182 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
6183 ((dsl_scan_active(dp->dp_scan) ||
6184 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
6185 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6186 VERIFY3U(bpobj_iterate(defer_bpo,
6187 spa_free_sync_cb, zio, tx), ==, 0);
c99c9001 6188 VERIFY0(zio_wait(zio));
428870ff 6189 }
34dc7c2f
BB
6190
6191 /*
6192 * Iterate to convergence.
6193 */
6194 do {
428870ff 6195 int pass = ++spa->spa_sync_pass;
34dc7c2f
BB
6196
6197 spa_sync_config_object(spa, tx);
6198 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6199 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6200 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6201 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6202 spa_errlog_sync(spa, txg);
6203 dsl_pool_sync(dp, txg);
6204
55d85d5a 6205 if (pass < zfs_sync_pass_deferred_free) {
428870ff
BB
6206 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6207 bplist_iterate(free_bpl, spa_free_sync_cb,
6208 zio, tx);
6209 VERIFY(zio_wait(zio) == 0);
6210 } else {
6211 bplist_iterate(free_bpl, bpobj_enqueue_cb,
6212 defer_bpo, tx);
34dc7c2f
BB
6213 }
6214
428870ff
BB
6215 ddt_sync(spa, txg);
6216 dsl_scan_sync(dp, tx);
34dc7c2f 6217
c65aa5b2 6218 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
428870ff
BB
6219 vdev_sync(vd, txg);
6220
6221 if (pass == 1)
6222 spa_sync_upgrades(spa, tx);
34dc7c2f 6223
428870ff 6224 } while (dmu_objset_is_dirty(mos, txg));
34dc7c2f
BB
6225
6226 /*
6227 * Rewrite the vdev configuration (which includes the uberblock)
6228 * to commit the transaction group.
6229 *
6230 * If there are no dirty vdevs, we sync the uberblock to a few
6231 * random top-level vdevs that are known to be visible in the
b128c09f
BB
6232 * config cache (see spa_vdev_add() for a complete description).
6233 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
34dc7c2f 6234 */
b128c09f
BB
6235 for (;;) {
6236 /*
6237 * We hold SCL_STATE to prevent vdev open/close/etc.
6238 * while we're attempting to write the vdev labels.
6239 */
6240 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6241
6242 if (list_is_empty(&spa->spa_config_dirty_list)) {
6243 vdev_t *svd[SPA_DVAS_PER_BP];
6244 int svdcount = 0;
6245 int children = rvd->vdev_children;
6246 int c0 = spa_get_random(children);
b128c09f 6247
d6320ddb 6248 for (c = 0; c < children; c++) {
b128c09f
BB
6249 vd = rvd->vdev_child[(c0 + c) % children];
6250 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6251 continue;
6252 svd[svdcount++] = vd;
6253 if (svdcount == SPA_DVAS_PER_BP)
6254 break;
6255 }
9babb374
BB
6256 error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
6257 if (error != 0)
6258 error = vdev_config_sync(svd, svdcount, txg,
6259 B_TRUE);
b128c09f
BB
6260 } else {
6261 error = vdev_config_sync(rvd->vdev_child,
9babb374
BB
6262 rvd->vdev_children, txg, B_FALSE);
6263 if (error != 0)
6264 error = vdev_config_sync(rvd->vdev_child,
6265 rvd->vdev_children, txg, B_TRUE);
34dc7c2f 6266 }
34dc7c2f 6267
3bc7e0fb
GW
6268 if (error == 0)
6269 spa->spa_last_synced_guid = rvd->vdev_guid;
6270
b128c09f
BB
6271 spa_config_exit(spa, SCL_STATE, FTAG);
6272
6273 if (error == 0)
6274 break;
6275 zio_suspend(spa, NULL);
6276 zio_resume_wait(spa);
6277 }
34dc7c2f
BB
6278 dmu_tx_commit(tx);
6279
cc92e9d0
GW
6280 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6281 spa->spa_deadman_tqid = 0;
6282
34dc7c2f
BB
6283 /*
6284 * Clear the dirty config list.
6285 */
b128c09f 6286 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
34dc7c2f
BB
6287 vdev_config_clean(vd);
6288
6289 /*
6290 * Now that the new config has synced transactionally,
6291 * let it become visible to the config cache.
6292 */
6293 if (spa->spa_config_syncing != NULL) {
6294 spa_config_set(spa, spa->spa_config_syncing);
6295 spa->spa_config_txg = txg;
6296 spa->spa_config_syncing = NULL;
6297 }
6298
34dc7c2f 6299 spa->spa_ubsync = spa->spa_uberblock;
34dc7c2f 6300
428870ff 6301 dsl_pool_sync_done(dp, txg);
34dc7c2f
BB
6302
6303 /*
6304 * Update usable space statistics.
6305 */
c65aa5b2 6306 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
34dc7c2f
BB
6307 vdev_sync_done(vd, txg);
6308
428870ff
BB
6309 spa_update_dspace(spa);
6310
34dc7c2f
BB
6311 /*
6312 * It had better be the case that we didn't dirty anything
6313 * since vdev_config_sync().
6314 */
6315 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
6316 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6317 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
428870ff
BB
6318
6319 spa->spa_sync_pass = 0;
34dc7c2f 6320
b128c09f 6321 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 6322
428870ff
BB
6323 spa_handle_ignored_writes(spa);
6324
34dc7c2f
BB
6325 /*
6326 * If any async tasks have been requested, kick them off.
6327 */
6328 spa_async_dispatch(spa);
6329}
6330
6331/*
6332 * Sync all pools. We don't want to hold the namespace lock across these
6333 * operations, so we take a reference on the spa_t and drop the lock during the
6334 * sync.
6335 */
6336void
6337spa_sync_allpools(void)
6338{
6339 spa_t *spa = NULL;
6340 mutex_enter(&spa_namespace_lock);
6341 while ((spa = spa_next(spa)) != NULL) {
572e2857
BB
6342 if (spa_state(spa) != POOL_STATE_ACTIVE ||
6343 !spa_writeable(spa) || spa_suspended(spa))
34dc7c2f
BB
6344 continue;
6345 spa_open_ref(spa, FTAG);
6346 mutex_exit(&spa_namespace_lock);
6347 txg_wait_synced(spa_get_dsl(spa), 0);
6348 mutex_enter(&spa_namespace_lock);
6349 spa_close(spa, FTAG);
6350 }
6351 mutex_exit(&spa_namespace_lock);
6352}
6353
6354/*
6355 * ==========================================================================
6356 * Miscellaneous routines
6357 * ==========================================================================
6358 */
6359
6360/*
6361 * Remove all pools in the system.
6362 */
6363void
6364spa_evict_all(void)
6365{
6366 spa_t *spa;
6367
6368 /*
6369 * Remove all cached state. All pools should be closed now,
6370 * so every spa in the AVL tree should be unreferenced.
6371 */
6372 mutex_enter(&spa_namespace_lock);
6373 while ((spa = spa_next(NULL)) != NULL) {
6374 /*
6375 * Stop async tasks. The async thread may need to detach
6376 * a device that's been replaced, which requires grabbing
6377 * spa_namespace_lock, so we must drop it here.
6378 */
6379 spa_open_ref(spa, FTAG);
6380 mutex_exit(&spa_namespace_lock);
6381 spa_async_suspend(spa);
6382 mutex_enter(&spa_namespace_lock);
34dc7c2f
BB
6383 spa_close(spa, FTAG);
6384
6385 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6386 spa_unload(spa);
6387 spa_deactivate(spa);
6388 }
6389 spa_remove(spa);
6390 }
6391 mutex_exit(&spa_namespace_lock);
6392}
6393
6394vdev_t *
9babb374 6395spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
34dc7c2f 6396{
b128c09f
BB
6397 vdev_t *vd;
6398 int i;
6399
6400 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
6401 return (vd);
6402
9babb374 6403 if (aux) {
b128c09f
BB
6404 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
6405 vd = spa->spa_l2cache.sav_vdevs[i];
9babb374
BB
6406 if (vd->vdev_guid == guid)
6407 return (vd);
6408 }
6409
6410 for (i = 0; i < spa->spa_spares.sav_count; i++) {
6411 vd = spa->spa_spares.sav_vdevs[i];
b128c09f
BB
6412 if (vd->vdev_guid == guid)
6413 return (vd);
6414 }
6415 }
6416
6417 return (NULL);
34dc7c2f
BB
6418}
6419
6420void
6421spa_upgrade(spa_t *spa, uint64_t version)
6422{
572e2857
BB
6423 ASSERT(spa_writeable(spa));
6424
b128c09f 6425 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
6426
6427 /*
6428 * This should only be called for a non-faulted pool, and since a
6429 * future version would result in an unopenable pool, this shouldn't be
6430 * possible.
6431 */
8dca0a9a 6432 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
34dc7c2f
BB
6433 ASSERT(version >= spa->spa_uberblock.ub_version);
6434
6435 spa->spa_uberblock.ub_version = version;
6436 vdev_config_dirty(spa->spa_root_vdev);
6437
b128c09f 6438 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
6439
6440 txg_wait_synced(spa_get_dsl(spa), 0);
6441}
6442
6443boolean_t
6444spa_has_spare(spa_t *spa, uint64_t guid)
6445{
6446 int i;
6447 uint64_t spareguid;
6448 spa_aux_vdev_t *sav = &spa->spa_spares;
6449
6450 for (i = 0; i < sav->sav_count; i++)
6451 if (sav->sav_vdevs[i]->vdev_guid == guid)
6452 return (B_TRUE);
6453
6454 for (i = 0; i < sav->sav_npending; i++) {
6455 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
6456 &spareguid) == 0 && spareguid == guid)
6457 return (B_TRUE);
6458 }
6459
6460 return (B_FALSE);
6461}
6462
b128c09f
BB
6463/*
6464 * Check if a pool has an active shared spare device.
6465 * Note: reference count of an active spare is 2, as a spare and as a replace
6466 */
6467static boolean_t
6468spa_has_active_shared_spare(spa_t *spa)
6469{
6470 int i, refcnt;
6471 uint64_t pool;
6472 spa_aux_vdev_t *sav = &spa->spa_spares;
6473
6474 for (i = 0; i < sav->sav_count; i++) {
6475 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
6476 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
6477 refcnt > 2)
6478 return (B_TRUE);
6479 }
6480
6481 return (B_FALSE);
6482}
6483
34dc7c2f 6484/*
26685276 6485 * Post a FM_EREPORT_ZFS_* event from sys/fm/fs/zfs.h. The payload will be
34dc7c2f
BB
6486 * filled in from the spa and (optionally) the vdev. This doesn't do anything
6487 * in the userland libzpool, as we don't want consumers to misinterpret ztest
6488 * or zdb as real changes.
6489 */
6490void
6491spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
6492{
6493#ifdef _KERNEL
26685276 6494 zfs_ereport_post(name, spa, vd, NULL, 0, 0);
34dc7c2f
BB
6495#endif
6496}
c28b2279
BB
6497
6498#if defined(_KERNEL) && defined(HAVE_SPL)
6499/* state manipulation functions */
6500EXPORT_SYMBOL(spa_open);
6501EXPORT_SYMBOL(spa_open_rewind);
6502EXPORT_SYMBOL(spa_get_stats);
6503EXPORT_SYMBOL(spa_create);
6504EXPORT_SYMBOL(spa_import_rootpool);
6505EXPORT_SYMBOL(spa_import);
6506EXPORT_SYMBOL(spa_tryimport);
6507EXPORT_SYMBOL(spa_destroy);
6508EXPORT_SYMBOL(spa_export);
6509EXPORT_SYMBOL(spa_reset);
6510EXPORT_SYMBOL(spa_async_request);
6511EXPORT_SYMBOL(spa_async_suspend);
6512EXPORT_SYMBOL(spa_async_resume);
6513EXPORT_SYMBOL(spa_inject_addref);
6514EXPORT_SYMBOL(spa_inject_delref);
6515EXPORT_SYMBOL(spa_scan_stat_init);
6516EXPORT_SYMBOL(spa_scan_get_stats);
6517
6518/* device maniion */
6519EXPORT_SYMBOL(spa_vdev_add);
6520EXPORT_SYMBOL(spa_vdev_attach);
6521EXPORT_SYMBOL(spa_vdev_detach);
6522EXPORT_SYMBOL(spa_vdev_remove);
6523EXPORT_SYMBOL(spa_vdev_setpath);
6524EXPORT_SYMBOL(spa_vdev_setfru);
6525EXPORT_SYMBOL(spa_vdev_split_mirror);
6526
6527/* spare statech is global across all pools) */
6528EXPORT_SYMBOL(spa_spare_add);
6529EXPORT_SYMBOL(spa_spare_remove);
6530EXPORT_SYMBOL(spa_spare_exists);
6531EXPORT_SYMBOL(spa_spare_activate);
6532
6533/* L2ARC statech is global across all pools) */
6534EXPORT_SYMBOL(spa_l2cache_add);
6535EXPORT_SYMBOL(spa_l2cache_remove);
6536EXPORT_SYMBOL(spa_l2cache_exists);
6537EXPORT_SYMBOL(spa_l2cache_activate);
6538EXPORT_SYMBOL(spa_l2cache_drop);
6539
6540/* scanning */
6541EXPORT_SYMBOL(spa_scan);
6542EXPORT_SYMBOL(spa_scan_stop);
6543
6544/* spa syncing */
6545EXPORT_SYMBOL(spa_sync); /* only for DMU use */
6546EXPORT_SYMBOL(spa_sync_allpools);
6547
6548/* properties */
6549EXPORT_SYMBOL(spa_prop_set);
6550EXPORT_SYMBOL(spa_prop_get);
6551EXPORT_SYMBOL(spa_prop_clear_bootfs);
6552
6553/* asynchronous event notification */
6554EXPORT_SYMBOL(spa_event_notify);
6555#endif