]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/spa.c
Illumos #2882, #2883, #2900
[mirror_zfs.git] / module / zfs / spa.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
428870ff 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
a38718a6 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
1bd201e7 25 * Copyright (c) 2012 by Delphix. All rights reserved.
a38718a6 26 */
34dc7c2f 27
34dc7c2f
BB
28/*
29 * This file contains all the routines used when modifying on-disk SPA state.
30 * This includes opening, importing, destroying, exporting a pool, and syncing a
31 * pool.
32 */
33
34#include <sys/zfs_context.h>
35#include <sys/fm/fs/zfs.h>
36#include <sys/spa_impl.h>
37#include <sys/zio.h>
38#include <sys/zio_checksum.h>
34dc7c2f
BB
39#include <sys/dmu.h>
40#include <sys/dmu_tx.h>
41#include <sys/zap.h>
42#include <sys/zil.h>
428870ff 43#include <sys/ddt.h>
34dc7c2f 44#include <sys/vdev_impl.h>
c28b2279 45#include <sys/vdev_disk.h>
34dc7c2f 46#include <sys/metaslab.h>
428870ff 47#include <sys/metaslab_impl.h>
34dc7c2f
BB
48#include <sys/uberblock_impl.h>
49#include <sys/txg.h>
50#include <sys/avl.h>
51#include <sys/dmu_traverse.h>
52#include <sys/dmu_objset.h>
53#include <sys/unique.h>
54#include <sys/dsl_pool.h>
55#include <sys/dsl_dataset.h>
56#include <sys/dsl_dir.h>
57#include <sys/dsl_prop.h>
58#include <sys/dsl_synctask.h>
59#include <sys/fs/zfs.h>
60#include <sys/arc.h>
61#include <sys/callb.h>
62#include <sys/systeminfo.h>
34dc7c2f 63#include <sys/spa_boot.h>
9babb374 64#include <sys/zfs_ioctl.h>
428870ff 65#include <sys/dsl_scan.h>
9ae529ec 66#include <sys/zfeature.h>
526af785 67#include <sys/zvol.h>
34dc7c2f 68
d164b209 69#ifdef _KERNEL
428870ff
BB
70#include <sys/bootprops.h>
71#include <sys/callb.h>
72#include <sys/cpupart.h>
73#include <sys/pool.h>
74#include <sys/sysdc.h>
d164b209
BB
75#include <sys/zone.h>
76#endif /* _KERNEL */
77
34dc7c2f
BB
78#include "zfs_prop.h"
79#include "zfs_comutil.h"
80
428870ff 81typedef enum zti_modes {
7ef5e54e
AL
82 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
83 ZTI_MODE_ONLINE_PERCENT, /* value is % of online CPUs */
84 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
85 ZTI_MODE_NULL, /* don't create a taskq */
86 ZTI_NMODES
428870ff 87} zti_modes_t;
34dc7c2f 88
7ef5e54e
AL
89#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
90#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
91#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
92#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
9babb374 93
7ef5e54e
AL
94#define ZTI_N(n) ZTI_P(n, 1)
95#define ZTI_ONE ZTI_N(1)
9babb374
BB
96
97typedef struct zio_taskq_info {
7ef5e54e 98 zti_modes_t zti_mode;
428870ff 99 uint_t zti_value;
7ef5e54e 100 uint_t zti_count;
9babb374
BB
101} zio_taskq_info_t;
102
103static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
451041db 104 "iss", "iss_h", "int", "int_h"
9babb374
BB
105};
106
428870ff 107/*
7ef5e54e
AL
108 * This table defines the taskq settings for each ZFS I/O type. When
109 * initializing a pool, we use this table to create an appropriately sized
110 * taskq. Some operations are low volume and therefore have a small, static
111 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
112 * macros. Other operations process a large amount of data; the ZTI_BATCH
113 * macro causes us to create a taskq oriented for throughput. Some operations
114 * are so high frequency and short-lived that the taskq itself can become a a
115 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
116 * additional degree of parallelism specified by the number of threads per-
117 * taskq and the number of taskqs; when dispatching an event in this case, the
118 * particular taskq is chosen at random.
119 *
120 * The different taskq priorities are to handle the different contexts (issue
121 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
122 * need to be handled with minimum delay.
428870ff
BB
123 */
124const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
125 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
7ef5e54e
AL
126 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
127 { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */
128 { ZTI_BATCH, ZTI_N(5), ZTI_N(16), ZTI_N(5) }, /* WRITE */
129 { ZTI_P(4, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
130 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
131 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
9babb374
BB
132};
133
9ae529ec 134static dsl_syncfunc_t spa_sync_version;
428870ff 135static dsl_syncfunc_t spa_sync_props;
3bc7e0fb
GW
136static dsl_checkfunc_t spa_change_guid_check;
137static dsl_syncfunc_t spa_change_guid_sync;
b128c09f 138static boolean_t spa_has_active_shared_spare(spa_t *spa);
bf701a83 139static inline int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
428870ff
BB
140 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
141 char **ereport);
572e2857 142static void spa_vdev_resilver_done(spa_t *spa);
428870ff
BB
143
144uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */
145id_t zio_taskq_psrset_bind = PS_NONE;
146boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
147uint_t zio_taskq_basedc = 80; /* base duty cycle */
148
149boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
150
151/*
152 * This (illegal) pool name is used when temporarily importing a spa_t in order
153 * to get the vdev stats associated with the imported devices.
154 */
155#define TRYIMPORT_NAME "$import"
34dc7c2f
BB
156
157/*
158 * ==========================================================================
159 * SPA properties routines
160 * ==========================================================================
161 */
162
163/*
164 * Add a (source=src, propname=propval) list to an nvlist.
165 */
166static void
167spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
168 uint64_t intval, zprop_source_t src)
169{
170 const char *propname = zpool_prop_to_name(prop);
171 nvlist_t *propval;
172
b8d06fca 173 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
174 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
175
176 if (strval != NULL)
177 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
178 else
179 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
180
181 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
182 nvlist_free(propval);
183}
184
185/*
186 * Get property values from the spa configuration.
187 */
188static void
189spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
190{
1bd201e7 191 vdev_t *rvd = spa->spa_root_vdev;
9ae529ec 192 dsl_pool_t *pool = spa->spa_dsl_pool;
d164b209 193 uint64_t size;
428870ff 194 uint64_t alloc;
1bd201e7 195 uint64_t space;
34dc7c2f
BB
196 uint64_t cap, version;
197 zprop_source_t src = ZPROP_SRC_NONE;
b128c09f 198 spa_config_dirent_t *dp;
1bd201e7 199 int c;
b128c09f
BB
200
201 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
34dc7c2f 202
1bd201e7 203 if (rvd != NULL) {
428870ff
BB
204 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
205 size = metaslab_class_get_space(spa_normal_class(spa));
d164b209
BB
206 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
207 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
428870ff
BB
208 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
209 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
210 size - alloc, src);
1bd201e7
CS
211
212 space = 0;
213 for (c = 0; c < rvd->vdev_children; c++) {
214 vdev_t *tvd = rvd->vdev_child[c];
215 space += tvd->vdev_max_asize - tvd->vdev_asize;
216 }
217 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space,
218 src);
219
572e2857
BB
220 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
221 (spa_mode(spa) == FREAD), src);
d164b209 222
428870ff 223 cap = (size == 0) ? 0 : (alloc * 100 / size);
d164b209
BB
224 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
225
428870ff
BB
226 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
227 ddt_get_pool_dedup_ratio(spa), src);
228
d164b209 229 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
1bd201e7 230 rvd->vdev_state, src);
d164b209
BB
231
232 version = spa_version(spa);
233 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
234 src = ZPROP_SRC_DEFAULT;
235 else
236 src = ZPROP_SRC_LOCAL;
237 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
238 }
34dc7c2f 239
9ae529ec
CS
240 if (pool != NULL) {
241 dsl_dir_t *freedir = pool->dp_free_dir;
242
243 /*
244 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
245 * when opening pools before this version freedir will be NULL.
246 */
247 if (freedir != NULL) {
248 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
249 freedir->dd_phys->dd_used_bytes, src);
250 } else {
251 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
252 NULL, 0, src);
253 }
254 }
255
34dc7c2f 256 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
34dc7c2f 257
d96eb2b1
DM
258 if (spa->spa_comment != NULL) {
259 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
260 0, ZPROP_SRC_LOCAL);
261 }
262
34dc7c2f
BB
263 if (spa->spa_root != NULL)
264 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
265 0, ZPROP_SRC_LOCAL);
266
b128c09f
BB
267 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
268 if (dp->scd_path == NULL) {
34dc7c2f 269 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f
BB
270 "none", 0, ZPROP_SRC_LOCAL);
271 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
34dc7c2f 272 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
b128c09f 273 dp->scd_path, 0, ZPROP_SRC_LOCAL);
34dc7c2f
BB
274 }
275 }
276}
277
278/*
279 * Get zpool property values.
280 */
281int
282spa_prop_get(spa_t *spa, nvlist_t **nvp)
283{
428870ff 284 objset_t *mos = spa->spa_meta_objset;
34dc7c2f
BB
285 zap_cursor_t zc;
286 zap_attribute_t za;
34dc7c2f
BB
287 int err;
288
b8d06fca 289 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE);
c28b2279
BB
290 if (err)
291 return err;
34dc7c2f 292
b128c09f
BB
293 mutex_enter(&spa->spa_props_lock);
294
34dc7c2f
BB
295 /*
296 * Get properties from the spa config.
297 */
298 spa_prop_get_config(spa, nvp);
299
34dc7c2f 300 /* If no pool property object, no more prop to get. */
428870ff 301 if (mos == NULL || spa->spa_pool_props_object == 0) {
34dc7c2f 302 mutex_exit(&spa->spa_props_lock);
c28b2279 303 goto out;
34dc7c2f
BB
304 }
305
306 /*
307 * Get properties from the MOS pool property object.
308 */
309 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
310 (err = zap_cursor_retrieve(&zc, &za)) == 0;
311 zap_cursor_advance(&zc)) {
312 uint64_t intval = 0;
313 char *strval = NULL;
314 zprop_source_t src = ZPROP_SRC_DEFAULT;
315 zpool_prop_t prop;
316
317 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
318 continue;
319
320 switch (za.za_integer_length) {
321 case 8:
322 /* integer property */
323 if (za.za_first_integer !=
324 zpool_prop_default_numeric(prop))
325 src = ZPROP_SRC_LOCAL;
326
327 if (prop == ZPOOL_PROP_BOOTFS) {
328 dsl_pool_t *dp;
329 dsl_dataset_t *ds = NULL;
330
331 dp = spa_get_dsl(spa);
332 rw_enter(&dp->dp_config_rwlock, RW_READER);
c65aa5b2
BB
333 if ((err = dsl_dataset_hold_obj(dp,
334 za.za_first_integer, FTAG, &ds))) {
34dc7c2f
BB
335 rw_exit(&dp->dp_config_rwlock);
336 break;
337 }
338
339 strval = kmem_alloc(
340 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
b8d06fca 341 KM_PUSHPAGE);
34dc7c2f 342 dsl_dataset_name(ds, strval);
b128c09f 343 dsl_dataset_rele(ds, FTAG);
34dc7c2f
BB
344 rw_exit(&dp->dp_config_rwlock);
345 } else {
346 strval = NULL;
347 intval = za.za_first_integer;
348 }
349
350 spa_prop_add_list(*nvp, prop, strval, intval, src);
351
352 if (strval != NULL)
353 kmem_free(strval,
354 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
355
356 break;
357
358 case 1:
359 /* string property */
b8d06fca 360 strval = kmem_alloc(za.za_num_integers, KM_PUSHPAGE);
34dc7c2f
BB
361 err = zap_lookup(mos, spa->spa_pool_props_object,
362 za.za_name, 1, za.za_num_integers, strval);
363 if (err) {
364 kmem_free(strval, za.za_num_integers);
365 break;
366 }
367 spa_prop_add_list(*nvp, prop, strval, 0, src);
368 kmem_free(strval, za.za_num_integers);
369 break;
370
371 default:
372 break;
373 }
374 }
375 zap_cursor_fini(&zc);
376 mutex_exit(&spa->spa_props_lock);
377out:
378 if (err && err != ENOENT) {
379 nvlist_free(*nvp);
380 *nvp = NULL;
381 return (err);
382 }
383
384 return (0);
385}
386
387/*
388 * Validate the given pool properties nvlist and modify the list
389 * for the property values to be set.
390 */
391static int
392spa_prop_validate(spa_t *spa, nvlist_t *props)
393{
394 nvpair_t *elem;
395 int error = 0, reset_bootfs = 0;
d4ed6673 396 uint64_t objnum = 0;
9ae529ec 397 boolean_t has_feature = B_FALSE;
34dc7c2f
BB
398
399 elem = NULL;
400 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
34dc7c2f 401 uint64_t intval;
9ae529ec
CS
402 char *strval, *slash, *check, *fname;
403 const char *propname = nvpair_name(elem);
404 zpool_prop_t prop = zpool_name_to_prop(propname);
405
406 switch ((int)prop) {
407 case ZPROP_INVAL:
408 if (!zpool_prop_feature(propname)) {
409 error = EINVAL;
410 break;
411 }
412
413 /*
414 * Sanitize the input.
415 */
416 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
417 error = EINVAL;
418 break;
419 }
420
421 if (nvpair_value_uint64(elem, &intval) != 0) {
422 error = EINVAL;
423 break;
424 }
34dc7c2f 425
9ae529ec
CS
426 if (intval != 0) {
427 error = EINVAL;
428 break;
429 }
34dc7c2f 430
9ae529ec
CS
431 fname = strchr(propname, '@') + 1;
432 if (zfeature_lookup_name(fname, NULL) != 0) {
433 error = EINVAL;
434 break;
435 }
436
437 has_feature = B_TRUE;
438 break;
34dc7c2f 439
34dc7c2f
BB
440 case ZPOOL_PROP_VERSION:
441 error = nvpair_value_uint64(elem, &intval);
442 if (!error &&
9ae529ec
CS
443 (intval < spa_version(spa) ||
444 intval > SPA_VERSION_BEFORE_FEATURES ||
445 has_feature))
34dc7c2f
BB
446 error = EINVAL;
447 break;
448
449 case ZPOOL_PROP_DELEGATION:
450 case ZPOOL_PROP_AUTOREPLACE:
b128c09f 451 case ZPOOL_PROP_LISTSNAPS:
9babb374 452 case ZPOOL_PROP_AUTOEXPAND:
34dc7c2f
BB
453 error = nvpair_value_uint64(elem, &intval);
454 if (!error && intval > 1)
455 error = EINVAL;
456 break;
457
458 case ZPOOL_PROP_BOOTFS:
9babb374
BB
459 /*
460 * If the pool version is less than SPA_VERSION_BOOTFS,
461 * or the pool is still being created (version == 0),
462 * the bootfs property cannot be set.
463 */
34dc7c2f
BB
464 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
465 error = ENOTSUP;
466 break;
467 }
468
469 /*
b128c09f 470 * Make sure the vdev config is bootable
34dc7c2f 471 */
b128c09f 472 if (!vdev_is_bootable(spa->spa_root_vdev)) {
34dc7c2f
BB
473 error = ENOTSUP;
474 break;
475 }
476
477 reset_bootfs = 1;
478
479 error = nvpair_value_string(elem, &strval);
480
481 if (!error) {
9ae529ec 482 objset_t *os;
b128c09f
BB
483 uint64_t compress;
484
34dc7c2f
BB
485 if (strval == NULL || strval[0] == '\0') {
486 objnum = zpool_prop_default_numeric(
487 ZPOOL_PROP_BOOTFS);
488 break;
489 }
490
c65aa5b2 491 if ((error = dmu_objset_hold(strval,FTAG,&os)))
34dc7c2f 492 break;
b128c09f 493
428870ff
BB
494 /* Must be ZPL and not gzip compressed. */
495
496 if (dmu_objset_type(os) != DMU_OST_ZFS) {
497 error = ENOTSUP;
498 } else if ((error = dsl_prop_get_integer(strval,
b128c09f
BB
499 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
500 &compress, NULL)) == 0 &&
501 !BOOTFS_COMPRESS_VALID(compress)) {
502 error = ENOTSUP;
503 } else {
504 objnum = dmu_objset_id(os);
505 }
428870ff 506 dmu_objset_rele(os, FTAG);
34dc7c2f
BB
507 }
508 break;
b128c09f 509
34dc7c2f
BB
510 case ZPOOL_PROP_FAILUREMODE:
511 error = nvpair_value_uint64(elem, &intval);
512 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
513 intval > ZIO_FAILURE_MODE_PANIC))
514 error = EINVAL;
515
516 /*
517 * This is a special case which only occurs when
518 * the pool has completely failed. This allows
519 * the user to change the in-core failmode property
520 * without syncing it out to disk (I/Os might
521 * currently be blocked). We do this by returning
522 * EIO to the caller (spa_prop_set) to trick it
523 * into thinking we encountered a property validation
524 * error.
525 */
b128c09f 526 if (!error && spa_suspended(spa)) {
34dc7c2f
BB
527 spa->spa_failmode = intval;
528 error = EIO;
529 }
530 break;
531
532 case ZPOOL_PROP_CACHEFILE:
533 if ((error = nvpair_value_string(elem, &strval)) != 0)
534 break;
535
536 if (strval[0] == '\0')
537 break;
538
539 if (strcmp(strval, "none") == 0)
540 break;
541
542 if (strval[0] != '/') {
543 error = EINVAL;
544 break;
545 }
546
547 slash = strrchr(strval, '/');
548 ASSERT(slash != NULL);
549
550 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
551 strcmp(slash, "/..") == 0)
552 error = EINVAL;
553 break;
428870ff 554
d96eb2b1
DM
555 case ZPOOL_PROP_COMMENT:
556 if ((error = nvpair_value_string(elem, &strval)) != 0)
557 break;
558 for (check = strval; *check != '\0'; check++) {
559 if (!isprint(*check)) {
560 error = EINVAL;
561 break;
562 }
563 check++;
564 }
565 if (strlen(strval) > ZPROP_MAX_COMMENT)
566 error = E2BIG;
567 break;
568
428870ff
BB
569 case ZPOOL_PROP_DEDUPDITTO:
570 if (spa_version(spa) < SPA_VERSION_DEDUP)
571 error = ENOTSUP;
572 else
573 error = nvpair_value_uint64(elem, &intval);
574 if (error == 0 &&
575 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
576 error = EINVAL;
577 break;
e75c13c3
BB
578
579 default:
580 break;
34dc7c2f
BB
581 }
582
583 if (error)
584 break;
585 }
586
587 if (!error && reset_bootfs) {
588 error = nvlist_remove(props,
589 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
590
591 if (!error) {
592 error = nvlist_add_uint64(props,
593 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
594 }
595 }
596
597 return (error);
598}
599
d164b209
BB
600void
601spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
602{
603 char *cachefile;
604 spa_config_dirent_t *dp;
605
606 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
607 &cachefile) != 0)
608 return;
609
610 dp = kmem_alloc(sizeof (spa_config_dirent_t),
b8d06fca 611 KM_PUSHPAGE);
d164b209
BB
612
613 if (cachefile[0] == '\0')
614 dp->scd_path = spa_strdup(spa_config_path);
615 else if (strcmp(cachefile, "none") == 0)
616 dp->scd_path = NULL;
617 else
618 dp->scd_path = spa_strdup(cachefile);
619
620 list_insert_head(&spa->spa_config_list, dp);
621 if (need_sync)
622 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
623}
624
34dc7c2f
BB
625int
626spa_prop_set(spa_t *spa, nvlist_t *nvp)
627{
628 int error;
9ae529ec 629 nvpair_t *elem = NULL;
d164b209 630 boolean_t need_sync = B_FALSE;
34dc7c2f
BB
631
632 if ((error = spa_prop_validate(spa, nvp)) != 0)
633 return (error);
634
d164b209 635 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
9ae529ec 636 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
d164b209 637
572e2857
BB
638 if (prop == ZPOOL_PROP_CACHEFILE ||
639 prop == ZPOOL_PROP_ALTROOT ||
640 prop == ZPOOL_PROP_READONLY)
d164b209
BB
641 continue;
642
9ae529ec
CS
643 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
644 uint64_t ver;
645
646 if (prop == ZPOOL_PROP_VERSION) {
647 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
648 } else {
649 ASSERT(zpool_prop_feature(nvpair_name(elem)));
650 ver = SPA_VERSION_FEATURES;
651 need_sync = B_TRUE;
652 }
653
654 /* Save time if the version is already set. */
655 if (ver == spa_version(spa))
656 continue;
657
658 /*
659 * In addition to the pool directory object, we might
660 * create the pool properties object, the features for
661 * read object, the features for write object, or the
662 * feature descriptions object.
663 */
664 error = dsl_sync_task_do(spa_get_dsl(spa), NULL,
665 spa_sync_version, spa, &ver, 6);
666 if (error)
667 return (error);
668 continue;
669 }
670
d164b209
BB
671 need_sync = B_TRUE;
672 break;
673 }
674
9ae529ec 675 if (need_sync) {
d164b209 676 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
9ae529ec
CS
677 spa, nvp, 6));
678 }
679
680 return (0);
34dc7c2f
BB
681}
682
683/*
684 * If the bootfs property value is dsobj, clear it.
685 */
686void
687spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
688{
689 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
690 VERIFY(zap_remove(spa->spa_meta_objset,
691 spa->spa_pool_props_object,
692 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
693 spa->spa_bootfs = 0;
694 }
695}
696
3bc7e0fb
GW
697/*ARGSUSED*/
698static int
699spa_change_guid_check(void *arg1, void *arg2, dmu_tx_t *tx)
700{
701 spa_t *spa = arg1;
702 vdev_t *rvd = spa->spa_root_vdev;
703 uint64_t vdev_state;
704 ASSERTV(uint64_t *newguid = arg2);
705
706 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
707 vdev_state = rvd->vdev_state;
708 spa_config_exit(spa, SCL_STATE, FTAG);
709
710 if (vdev_state != VDEV_STATE_HEALTHY)
711 return (ENXIO);
712
713 ASSERT3U(spa_guid(spa), !=, *newguid);
714
715 return (0);
716}
717
718static void
719spa_change_guid_sync(void *arg1, void *arg2, dmu_tx_t *tx)
720{
721 spa_t *spa = arg1;
722 uint64_t *newguid = arg2;
723 uint64_t oldguid;
724 vdev_t *rvd = spa->spa_root_vdev;
725
726 oldguid = spa_guid(spa);
727
728 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
729 rvd->vdev_guid = *newguid;
730 rvd->vdev_guid_sum += (*newguid - oldguid);
731 vdev_config_dirty(rvd);
732 spa_config_exit(spa, SCL_STATE, FTAG);
733
6f1ffb06
MA
734 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
735 oldguid, *newguid);
3bc7e0fb
GW
736}
737
3541dc6d
GA
738/*
739 * Change the GUID for the pool. This is done so that we can later
740 * re-import a pool built from a clone of our own vdevs. We will modify
741 * the root vdev's guid, our own pool guid, and then mark all of our
742 * vdevs dirty. Note that we must make sure that all our vdevs are
743 * online when we do this, or else any vdevs that weren't present
744 * would be orphaned from our pool. We are also going to issue a
745 * sysevent to update any watchers.
746 */
747int
748spa_change_guid(spa_t *spa)
749{
3bc7e0fb
GW
750 int error;
751 uint64_t guid;
3541dc6d 752
3bc7e0fb
GW
753 mutex_enter(&spa_namespace_lock);
754 guid = spa_generate_guid(NULL);
3541dc6d 755
3bc7e0fb
GW
756 error = dsl_sync_task_do(spa_get_dsl(spa), spa_change_guid_check,
757 spa_change_guid_sync, spa, &guid, 5);
3541dc6d 758
3bc7e0fb
GW
759 if (error == 0) {
760 spa_config_sync(spa, B_FALSE, B_TRUE);
761 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_REGUID);
762 }
3541dc6d 763
3bc7e0fb 764 mutex_exit(&spa_namespace_lock);
3541dc6d 765
3bc7e0fb 766 return (error);
3541dc6d
GA
767}
768
34dc7c2f
BB
769/*
770 * ==========================================================================
771 * SPA state manipulation (open/create/destroy/import/export)
772 * ==========================================================================
773 */
774
775static int
776spa_error_entry_compare(const void *a, const void *b)
777{
778 spa_error_entry_t *sa = (spa_error_entry_t *)a;
779 spa_error_entry_t *sb = (spa_error_entry_t *)b;
780 int ret;
781
782 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
783 sizeof (zbookmark_t));
784
785 if (ret < 0)
786 return (-1);
787 else if (ret > 0)
788 return (1);
789 else
790 return (0);
791}
792
793/*
794 * Utility function which retrieves copies of the current logs and
795 * re-initializes them in the process.
796 */
797void
798spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
799{
800 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
801
802 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
803 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
804
805 avl_create(&spa->spa_errlist_scrub,
806 spa_error_entry_compare, sizeof (spa_error_entry_t),
807 offsetof(spa_error_entry_t, se_avl));
808 avl_create(&spa->spa_errlist_last,
809 spa_error_entry_compare, sizeof (spa_error_entry_t),
810 offsetof(spa_error_entry_t, se_avl));
811}
812
7ef5e54e
AL
813static void
814spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
34dc7c2f 815{
7ef5e54e
AL
816 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
817 enum zti_modes mode = ztip->zti_mode;
818 uint_t value = ztip->zti_value;
819 uint_t count = ztip->zti_count;
820 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
821 char name[32];
822 uint_t i, flags = 0;
428870ff 823 boolean_t batch = B_FALSE;
34dc7c2f 824
7ef5e54e
AL
825 if (mode == ZTI_MODE_NULL) {
826 tqs->stqs_count = 0;
827 tqs->stqs_taskq = NULL;
828 return;
829 }
428870ff 830
7ef5e54e 831 ASSERT3U(count, >, 0);
428870ff 832
7ef5e54e
AL
833 tqs->stqs_count = count;
834 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
428870ff 835
7ef5e54e
AL
836 for (i = 0; i < count; i++) {
837 taskq_t *tq;
428870ff 838
7ef5e54e
AL
839 switch (mode) {
840 case ZTI_MODE_FIXED:
841 ASSERT3U(value, >=, 1);
842 value = MAX(value, 1);
843 break;
844
845 case ZTI_MODE_BATCH:
846 batch = B_TRUE;
847 flags |= TASKQ_THREADS_CPU_PCT;
848 value = zio_taskq_batch_pct;
849 break;
850
851 case ZTI_MODE_ONLINE_PERCENT:
852 flags |= TASKQ_THREADS_CPU_PCT;
853 break;
854
855 default:
856 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
857 "spa_activate()",
858 zio_type_name[t], zio_taskq_types[q], mode, value);
859 break;
860 }
861
862 if (count > 1) {
863 (void) snprintf(name, sizeof (name), "%s_%s_%u",
864 zio_type_name[t], zio_taskq_types[q], i);
865 } else {
866 (void) snprintf(name, sizeof (name), "%s_%s",
867 zio_type_name[t], zio_taskq_types[q]);
868 }
869
870 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
871 if (batch)
872 flags |= TASKQ_DC_BATCH;
873
874 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
875 spa->spa_proc, zio_taskq_basedc, flags);
876 } else {
877 tq = taskq_create_proc(name, value, maxclsyspri, 50,
878 INT_MAX, spa->spa_proc, flags);
879 }
880
881 tqs->stqs_taskq[i] = tq;
882 }
883}
884
885static void
886spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
887{
888 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
889 uint_t i;
890
891 if (tqs->stqs_taskq == NULL) {
892 ASSERT3U(tqs->stqs_count, ==, 0);
893 return;
894 }
895
896 for (i = 0; i < tqs->stqs_count; i++) {
897 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
898 taskq_destroy(tqs->stqs_taskq[i]);
428870ff 899 }
34dc7c2f 900
7ef5e54e
AL
901 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
902 tqs->stqs_taskq = NULL;
903}
34dc7c2f 904
7ef5e54e
AL
905/*
906 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
907 * Note that a type may have multiple discrete taskqs to avoid lock contention
908 * on the taskq itself. In that case we choose which taskq at random by using
909 * the low bits of gethrtime().
910 */
911void
912spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
913 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
914{
915 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
916 taskq_t *tq;
917
918 ASSERT3P(tqs->stqs_taskq, !=, NULL);
919 ASSERT3U(tqs->stqs_count, !=, 0);
920
921 if (tqs->stqs_count == 1) {
922 tq = tqs->stqs_taskq[0];
923 } else {
c12936b1 924 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
428870ff 925 }
7ef5e54e
AL
926
927 taskq_dispatch_ent(tq, func, arg, flags, ent);
428870ff
BB
928}
929
044baf00
BB
930/*
931 * Same as spa_taskq_dispatch_ent() but block on the task until completion.
932 */
933void
934spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
935 task_func_t *func, void *arg, uint_t flags)
936{
937 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
938 taskq_t *tq;
939 taskqid_t id;
940
941 ASSERT3P(tqs->stqs_taskq, !=, NULL);
942 ASSERT3U(tqs->stqs_count, !=, 0);
943
944 if (tqs->stqs_count == 1) {
945 tq = tqs->stqs_taskq[0];
946 } else {
c12936b1 947 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
044baf00
BB
948 }
949
950 id = taskq_dispatch(tq, func, arg, flags);
951 if (id)
952 taskq_wait_id(tq, id);
953}
954
428870ff
BB
955static void
956spa_create_zio_taskqs(spa_t *spa)
957{
d6320ddb
BB
958 int t, q;
959
960 for (t = 0; t < ZIO_TYPES; t++) {
961 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 962 spa_taskqs_init(spa, t, q);
428870ff
BB
963 }
964 }
965}
9babb374 966
7b89a549 967#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
428870ff
BB
968static void
969spa_thread(void *arg)
970{
971 callb_cpr_t cprinfo;
9babb374 972
428870ff
BB
973 spa_t *spa = arg;
974 user_t *pu = PTOU(curproc);
9babb374 975
428870ff
BB
976 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
977 spa->spa_name);
9babb374 978
428870ff
BB
979 ASSERT(curproc != &p0);
980 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
981 "zpool-%s", spa->spa_name);
982 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
983
984 /* bind this thread to the requested psrset */
985 if (zio_taskq_psrset_bind != PS_NONE) {
986 pool_lock();
987 mutex_enter(&cpu_lock);
988 mutex_enter(&pidlock);
989 mutex_enter(&curproc->p_lock);
990
991 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
992 0, NULL, NULL) == 0) {
993 curthread->t_bind_pset = zio_taskq_psrset_bind;
994 } else {
995 cmn_err(CE_WARN,
996 "Couldn't bind process for zfs pool \"%s\" to "
997 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
998 }
999
1000 mutex_exit(&curproc->p_lock);
1001 mutex_exit(&pidlock);
1002 mutex_exit(&cpu_lock);
1003 pool_unlock();
1004 }
1005
1006 if (zio_taskq_sysdc) {
1007 sysdc_thread_enter(curthread, 100, 0);
1008 }
1009
1010 spa->spa_proc = curproc;
1011 spa->spa_did = curthread->t_did;
1012
1013 spa_create_zio_taskqs(spa);
1014
1015 mutex_enter(&spa->spa_proc_lock);
1016 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1017
1018 spa->spa_proc_state = SPA_PROC_ACTIVE;
1019 cv_broadcast(&spa->spa_proc_cv);
1020
1021 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1022 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1023 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1024 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1025
1026 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1027 spa->spa_proc_state = SPA_PROC_GONE;
1028 spa->spa_proc = &p0;
1029 cv_broadcast(&spa->spa_proc_cv);
1030 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1031
1032 mutex_enter(&curproc->p_lock);
1033 lwp_exit();
1034}
1035#endif
1036
1037/*
1038 * Activate an uninitialized pool.
1039 */
1040static void
1041spa_activate(spa_t *spa, int mode)
1042{
1043 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1044
1045 spa->spa_state = POOL_STATE_ACTIVE;
1046 spa->spa_mode = mode;
1047
1048 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1049 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1050
1051 /* Try to create a covering process */
1052 mutex_enter(&spa->spa_proc_lock);
1053 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1054 ASSERT(spa->spa_proc == &p0);
1055 spa->spa_did = 0;
1056
7b89a549 1057#ifdef HAVE_SPA_THREAD
428870ff
BB
1058 /* Only create a process if we're going to be around a while. */
1059 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1060 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1061 NULL, 0) == 0) {
1062 spa->spa_proc_state = SPA_PROC_CREATED;
1063 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1064 cv_wait(&spa->spa_proc_cv,
1065 &spa->spa_proc_lock);
9babb374 1066 }
428870ff
BB
1067 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1068 ASSERT(spa->spa_proc != &p0);
1069 ASSERT(spa->spa_did != 0);
1070 } else {
1071#ifdef _KERNEL
1072 cmn_err(CE_WARN,
1073 "Couldn't create process for zfs pool \"%s\"\n",
1074 spa->spa_name);
1075#endif
b128c09f 1076 }
34dc7c2f 1077 }
7b89a549 1078#endif /* HAVE_SPA_THREAD */
428870ff
BB
1079 mutex_exit(&spa->spa_proc_lock);
1080
1081 /* If we didn't create a process, we need to create our taskqs. */
1082 if (spa->spa_proc == &p0) {
1083 spa_create_zio_taskqs(spa);
1084 }
34dc7c2f 1085
b128c09f
BB
1086 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1087 offsetof(vdev_t, vdev_config_dirty_node));
1088 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1089 offsetof(vdev_t, vdev_state_dirty_node));
34dc7c2f
BB
1090
1091 txg_list_create(&spa->spa_vdev_txg_list,
1092 offsetof(struct vdev, vdev_txg_node));
1093
1094 avl_create(&spa->spa_errlist_scrub,
1095 spa_error_entry_compare, sizeof (spa_error_entry_t),
1096 offsetof(spa_error_entry_t, se_avl));
1097 avl_create(&spa->spa_errlist_last,
1098 spa_error_entry_compare, sizeof (spa_error_entry_t),
1099 offsetof(spa_error_entry_t, se_avl));
1100}
1101
1102/*
1103 * Opposite of spa_activate().
1104 */
1105static void
1106spa_deactivate(spa_t *spa)
1107{
d6320ddb
BB
1108 int t, q;
1109
34dc7c2f
BB
1110 ASSERT(spa->spa_sync_on == B_FALSE);
1111 ASSERT(spa->spa_dsl_pool == NULL);
1112 ASSERT(spa->spa_root_vdev == NULL);
9babb374 1113 ASSERT(spa->spa_async_zio_root == NULL);
34dc7c2f
BB
1114 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1115
1116 txg_list_destroy(&spa->spa_vdev_txg_list);
1117
b128c09f
BB
1118 list_destroy(&spa->spa_config_dirty_list);
1119 list_destroy(&spa->spa_state_dirty_list);
34dc7c2f 1120
cc92e9d0
GW
1121 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
1122
d6320ddb
BB
1123 for (t = 0; t < ZIO_TYPES; t++) {
1124 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
7ef5e54e 1125 spa_taskqs_fini(spa, t, q);
b128c09f 1126 }
34dc7c2f
BB
1127 }
1128
1129 metaslab_class_destroy(spa->spa_normal_class);
1130 spa->spa_normal_class = NULL;
1131
1132 metaslab_class_destroy(spa->spa_log_class);
1133 spa->spa_log_class = NULL;
1134
1135 /*
1136 * If this was part of an import or the open otherwise failed, we may
1137 * still have errors left in the queues. Empty them just in case.
1138 */
1139 spa_errlog_drain(spa);
1140
1141 avl_destroy(&spa->spa_errlist_scrub);
1142 avl_destroy(&spa->spa_errlist_last);
1143
1144 spa->spa_state = POOL_STATE_UNINITIALIZED;
428870ff
BB
1145
1146 mutex_enter(&spa->spa_proc_lock);
1147 if (spa->spa_proc_state != SPA_PROC_NONE) {
1148 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1149 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1150 cv_broadcast(&spa->spa_proc_cv);
1151 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1152 ASSERT(spa->spa_proc != &p0);
1153 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1154 }
1155 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1156 spa->spa_proc_state = SPA_PROC_NONE;
1157 }
1158 ASSERT(spa->spa_proc == &p0);
1159 mutex_exit(&spa->spa_proc_lock);
1160
1161 /*
1162 * We want to make sure spa_thread() has actually exited the ZFS
1163 * module, so that the module can't be unloaded out from underneath
1164 * it.
1165 */
1166 if (spa->spa_did != 0) {
1167 thread_join(spa->spa_did);
1168 spa->spa_did = 0;
1169 }
34dc7c2f
BB
1170}
1171
1172/*
1173 * Verify a pool configuration, and construct the vdev tree appropriately. This
1174 * will create all the necessary vdevs in the appropriate layout, with each vdev
1175 * in the CLOSED state. This will prep the pool before open/creation/import.
1176 * All vdev validation is done by the vdev_alloc() routine.
1177 */
1178static int
1179spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1180 uint_t id, int atype)
1181{
1182 nvlist_t **child;
9babb374 1183 uint_t children;
34dc7c2f 1184 int error;
d6320ddb 1185 int c;
34dc7c2f
BB
1186
1187 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1188 return (error);
1189
1190 if ((*vdp)->vdev_ops->vdev_op_leaf)
1191 return (0);
1192
b128c09f
BB
1193 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1194 &child, &children);
1195
1196 if (error == ENOENT)
1197 return (0);
1198
1199 if (error) {
34dc7c2f
BB
1200 vdev_free(*vdp);
1201 *vdp = NULL;
1202 return (EINVAL);
1203 }
1204
d6320ddb 1205 for (c = 0; c < children; c++) {
34dc7c2f
BB
1206 vdev_t *vd;
1207 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1208 atype)) != 0) {
1209 vdev_free(*vdp);
1210 *vdp = NULL;
1211 return (error);
1212 }
1213 }
1214
1215 ASSERT(*vdp != NULL);
1216
1217 return (0);
1218}
1219
1220/*
1221 * Opposite of spa_load().
1222 */
1223static void
1224spa_unload(spa_t *spa)
1225{
1226 int i;
1227
b128c09f
BB
1228 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1229
34dc7c2f
BB
1230 /*
1231 * Stop async tasks.
1232 */
1233 spa_async_suspend(spa);
1234
1235 /*
1236 * Stop syncing.
1237 */
1238 if (spa->spa_sync_on) {
1239 txg_sync_stop(spa->spa_dsl_pool);
1240 spa->spa_sync_on = B_FALSE;
1241 }
1242
1243 /*
b128c09f 1244 * Wait for any outstanding async I/O to complete.
34dc7c2f 1245 */
9babb374
BB
1246 if (spa->spa_async_zio_root != NULL) {
1247 (void) zio_wait(spa->spa_async_zio_root);
1248 spa->spa_async_zio_root = NULL;
1249 }
34dc7c2f 1250
428870ff
BB
1251 bpobj_close(&spa->spa_deferred_bpobj);
1252
34dc7c2f
BB
1253 /*
1254 * Close the dsl pool.
1255 */
1256 if (spa->spa_dsl_pool) {
1257 dsl_pool_close(spa->spa_dsl_pool);
1258 spa->spa_dsl_pool = NULL;
428870ff 1259 spa->spa_meta_objset = NULL;
34dc7c2f
BB
1260 }
1261
428870ff
BB
1262 ddt_unload(spa);
1263
fb5f0bc8
BB
1264 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1265
1266 /*
1267 * Drop and purge level 2 cache
1268 */
1269 spa_l2cache_drop(spa);
1270
34dc7c2f
BB
1271 /*
1272 * Close all vdevs.
1273 */
1274 if (spa->spa_root_vdev)
1275 vdev_free(spa->spa_root_vdev);
1276 ASSERT(spa->spa_root_vdev == NULL);
1277
1278 for (i = 0; i < spa->spa_spares.sav_count; i++)
1279 vdev_free(spa->spa_spares.sav_vdevs[i]);
1280 if (spa->spa_spares.sav_vdevs) {
1281 kmem_free(spa->spa_spares.sav_vdevs,
1282 spa->spa_spares.sav_count * sizeof (void *));
1283 spa->spa_spares.sav_vdevs = NULL;
1284 }
1285 if (spa->spa_spares.sav_config) {
1286 nvlist_free(spa->spa_spares.sav_config);
1287 spa->spa_spares.sav_config = NULL;
1288 }
b128c09f 1289 spa->spa_spares.sav_count = 0;
34dc7c2f 1290
5ffb9d1d
GW
1291 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1292 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
34dc7c2f 1293 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
5ffb9d1d 1294 }
34dc7c2f
BB
1295 if (spa->spa_l2cache.sav_vdevs) {
1296 kmem_free(spa->spa_l2cache.sav_vdevs,
1297 spa->spa_l2cache.sav_count * sizeof (void *));
1298 spa->spa_l2cache.sav_vdevs = NULL;
1299 }
1300 if (spa->spa_l2cache.sav_config) {
1301 nvlist_free(spa->spa_l2cache.sav_config);
1302 spa->spa_l2cache.sav_config = NULL;
1303 }
b128c09f 1304 spa->spa_l2cache.sav_count = 0;
34dc7c2f
BB
1305
1306 spa->spa_async_suspended = 0;
fb5f0bc8 1307
d96eb2b1
DM
1308 if (spa->spa_comment != NULL) {
1309 spa_strfree(spa->spa_comment);
1310 spa->spa_comment = NULL;
1311 }
1312
fb5f0bc8 1313 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
1314}
1315
1316/*
1317 * Load (or re-load) the current list of vdevs describing the active spares for
1318 * this pool. When this is called, we have some form of basic information in
1319 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1320 * then re-generate a more complete list including status information.
1321 */
1322static void
1323spa_load_spares(spa_t *spa)
1324{
1325 nvlist_t **spares;
1326 uint_t nspares;
1327 int i;
1328 vdev_t *vd, *tvd;
1329
b128c09f
BB
1330 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1331
34dc7c2f
BB
1332 /*
1333 * First, close and free any existing spare vdevs.
1334 */
1335 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1336 vd = spa->spa_spares.sav_vdevs[i];
1337
1338 /* Undo the call to spa_activate() below */
b128c09f
BB
1339 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1340 B_FALSE)) != NULL && tvd->vdev_isspare)
34dc7c2f
BB
1341 spa_spare_remove(tvd);
1342 vdev_close(vd);
1343 vdev_free(vd);
1344 }
1345
1346 if (spa->spa_spares.sav_vdevs)
1347 kmem_free(spa->spa_spares.sav_vdevs,
1348 spa->spa_spares.sav_count * sizeof (void *));
1349
1350 if (spa->spa_spares.sav_config == NULL)
1351 nspares = 0;
1352 else
1353 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1354 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1355
1356 spa->spa_spares.sav_count = (int)nspares;
1357 spa->spa_spares.sav_vdevs = NULL;
1358
1359 if (nspares == 0)
1360 return;
1361
1362 /*
1363 * Construct the array of vdevs, opening them to get status in the
1364 * process. For each spare, there is potentially two different vdev_t
1365 * structures associated with it: one in the list of spares (used only
1366 * for basic validation purposes) and one in the active vdev
1367 * configuration (if it's spared in). During this phase we open and
1368 * validate each vdev on the spare list. If the vdev also exists in the
1369 * active configuration, then we also mark this vdev as an active spare.
1370 */
1371 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
b8d06fca 1372 KM_PUSHPAGE);
34dc7c2f
BB
1373 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1374 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1375 VDEV_ALLOC_SPARE) == 0);
1376 ASSERT(vd != NULL);
1377
1378 spa->spa_spares.sav_vdevs[i] = vd;
1379
b128c09f
BB
1380 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1381 B_FALSE)) != NULL) {
34dc7c2f
BB
1382 if (!tvd->vdev_isspare)
1383 spa_spare_add(tvd);
1384
1385 /*
1386 * We only mark the spare active if we were successfully
1387 * able to load the vdev. Otherwise, importing a pool
1388 * with a bad active spare would result in strange
1389 * behavior, because multiple pool would think the spare
1390 * is actively in use.
1391 *
1392 * There is a vulnerability here to an equally bizarre
1393 * circumstance, where a dead active spare is later
1394 * brought back to life (onlined or otherwise). Given
1395 * the rarity of this scenario, and the extra complexity
1396 * it adds, we ignore the possibility.
1397 */
1398 if (!vdev_is_dead(tvd))
1399 spa_spare_activate(tvd);
1400 }
1401
b128c09f 1402 vd->vdev_top = vd;
9babb374 1403 vd->vdev_aux = &spa->spa_spares;
b128c09f 1404
34dc7c2f
BB
1405 if (vdev_open(vd) != 0)
1406 continue;
1407
34dc7c2f
BB
1408 if (vdev_validate_aux(vd) == 0)
1409 spa_spare_add(vd);
1410 }
1411
1412 /*
1413 * Recompute the stashed list of spares, with status information
1414 * this time.
1415 */
1416 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1417 DATA_TYPE_NVLIST_ARRAY) == 0);
1418
1419 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
b8d06fca 1420 KM_PUSHPAGE);
34dc7c2f
BB
1421 for (i = 0; i < spa->spa_spares.sav_count; i++)
1422 spares[i] = vdev_config_generate(spa,
428870ff 1423 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
34dc7c2f
BB
1424 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1425 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1426 for (i = 0; i < spa->spa_spares.sav_count; i++)
1427 nvlist_free(spares[i]);
1428 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1429}
1430
1431/*
1432 * Load (or re-load) the current list of vdevs describing the active l2cache for
1433 * this pool. When this is called, we have some form of basic information in
1434 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1435 * then re-generate a more complete list including status information.
1436 * Devices which are already active have their details maintained, and are
1437 * not re-opened.
1438 */
1439static void
1440spa_load_l2cache(spa_t *spa)
1441{
1442 nvlist_t **l2cache;
1443 uint_t nl2cache;
1444 int i, j, oldnvdevs;
9babb374 1445 uint64_t guid;
d4ed6673 1446 vdev_t *vd, **oldvdevs, **newvdevs = NULL;
34dc7c2f
BB
1447 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1448
b128c09f
BB
1449 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1450
34dc7c2f
BB
1451 if (sav->sav_config != NULL) {
1452 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1453 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
b8d06fca 1454 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
1455 } else {
1456 nl2cache = 0;
1457 }
1458
1459 oldvdevs = sav->sav_vdevs;
1460 oldnvdevs = sav->sav_count;
1461 sav->sav_vdevs = NULL;
1462 sav->sav_count = 0;
1463
1464 /*
1465 * Process new nvlist of vdevs.
1466 */
1467 for (i = 0; i < nl2cache; i++) {
1468 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1469 &guid) == 0);
1470
1471 newvdevs[i] = NULL;
1472 for (j = 0; j < oldnvdevs; j++) {
1473 vd = oldvdevs[j];
1474 if (vd != NULL && guid == vd->vdev_guid) {
1475 /*
1476 * Retain previous vdev for add/remove ops.
1477 */
1478 newvdevs[i] = vd;
1479 oldvdevs[j] = NULL;
1480 break;
1481 }
1482 }
1483
1484 if (newvdevs[i] == NULL) {
1485 /*
1486 * Create new vdev
1487 */
1488 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1489 VDEV_ALLOC_L2CACHE) == 0);
1490 ASSERT(vd != NULL);
1491 newvdevs[i] = vd;
1492
1493 /*
1494 * Commit this vdev as an l2cache device,
1495 * even if it fails to open.
1496 */
1497 spa_l2cache_add(vd);
1498
b128c09f
BB
1499 vd->vdev_top = vd;
1500 vd->vdev_aux = sav;
1501
1502 spa_l2cache_activate(vd);
1503
34dc7c2f
BB
1504 if (vdev_open(vd) != 0)
1505 continue;
1506
34dc7c2f
BB
1507 (void) vdev_validate_aux(vd);
1508
9babb374
BB
1509 if (!vdev_is_dead(vd))
1510 l2arc_add_vdev(spa, vd);
34dc7c2f
BB
1511 }
1512 }
1513
1514 /*
1515 * Purge vdevs that were dropped
1516 */
1517 for (i = 0; i < oldnvdevs; i++) {
1518 uint64_t pool;
1519
1520 vd = oldvdevs[i];
1521 if (vd != NULL) {
5ffb9d1d
GW
1522 ASSERT(vd->vdev_isl2cache);
1523
fb5f0bc8
BB
1524 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1525 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 1526 l2arc_remove_vdev(vd);
5ffb9d1d
GW
1527 vdev_clear_stats(vd);
1528 vdev_free(vd);
34dc7c2f
BB
1529 }
1530 }
1531
1532 if (oldvdevs)
1533 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1534
1535 if (sav->sav_config == NULL)
1536 goto out;
1537
1538 sav->sav_vdevs = newvdevs;
1539 sav->sav_count = (int)nl2cache;
1540
1541 /*
1542 * Recompute the stashed list of l2cache devices, with status
1543 * information this time.
1544 */
1545 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1546 DATA_TYPE_NVLIST_ARRAY) == 0);
1547
b8d06fca 1548 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
1549 for (i = 0; i < sav->sav_count; i++)
1550 l2cache[i] = vdev_config_generate(spa,
428870ff 1551 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
1552 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1553 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1554out:
1555 for (i = 0; i < sav->sav_count; i++)
1556 nvlist_free(l2cache[i]);
1557 if (sav->sav_count)
1558 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1559}
1560
1561static int
1562load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1563{
1564 dmu_buf_t *db;
1565 char *packed = NULL;
1566 size_t nvsize = 0;
1567 int error;
1568 *value = NULL;
1569
c3275b56
BB
1570 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1571 if (error)
1572 return (error);
1573
34dc7c2f
BB
1574 nvsize = *(uint64_t *)db->db_data;
1575 dmu_buf_rele(db, FTAG);
1576
b8d06fca 1577 packed = kmem_alloc(nvsize, KM_PUSHPAGE | KM_NODEBUG);
9babb374
BB
1578 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1579 DMU_READ_PREFETCH);
34dc7c2f
BB
1580 if (error == 0)
1581 error = nvlist_unpack(packed, nvsize, value, 0);
1582 kmem_free(packed, nvsize);
1583
1584 return (error);
1585}
1586
1587/*
1588 * Checks to see if the given vdev could not be opened, in which case we post a
1589 * sysevent to notify the autoreplace code that the device has been removed.
1590 */
1591static void
1592spa_check_removed(vdev_t *vd)
1593{
d6320ddb
BB
1594 int c;
1595
1596 for (c = 0; c < vd->vdev_children; c++)
34dc7c2f
BB
1597 spa_check_removed(vd->vdev_child[c]);
1598
1599 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
26685276
BB
1600 zfs_ereport_post(FM_EREPORT_RESOURCE_AUTOREPLACE,
1601 vd->vdev_spa, vd, NULL, 0, 0);
1602 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_CHECK);
34dc7c2f
BB
1603 }
1604}
1605
9babb374 1606/*
572e2857 1607 * Validate the current config against the MOS config
9babb374 1608 */
572e2857
BB
1609static boolean_t
1610spa_config_valid(spa_t *spa, nvlist_t *config)
9babb374 1611{
572e2857
BB
1612 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1613 nvlist_t *nv;
d6320ddb 1614 int c, i;
572e2857
BB
1615
1616 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1617
1618 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1619 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1620
1621 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
9babb374 1622
428870ff 1623 /*
572e2857
BB
1624 * If we're doing a normal import, then build up any additional
1625 * diagnostic information about missing devices in this config.
1626 * We'll pass this up to the user for further processing.
428870ff 1627 */
572e2857
BB
1628 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1629 nvlist_t **child, *nv;
1630 uint64_t idx = 0;
1631
1632 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
b8d06fca
RY
1633 KM_PUSHPAGE);
1634 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
572e2857 1635
d6320ddb 1636 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1637 vdev_t *tvd = rvd->vdev_child[c];
1638 vdev_t *mtvd = mrvd->vdev_child[c];
1639
1640 if (tvd->vdev_ops == &vdev_missing_ops &&
1641 mtvd->vdev_ops != &vdev_missing_ops &&
1642 mtvd->vdev_islog)
1643 child[idx++] = vdev_config_generate(spa, mtvd,
1644 B_FALSE, 0);
1645 }
9babb374 1646
572e2857
BB
1647 if (idx) {
1648 VERIFY(nvlist_add_nvlist_array(nv,
1649 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1650 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1651 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1652
d6320ddb 1653 for (i = 0; i < idx; i++)
572e2857
BB
1654 nvlist_free(child[i]);
1655 }
1656 nvlist_free(nv);
1657 kmem_free(child, rvd->vdev_children * sizeof (char **));
1658 }
1659
1660 /*
1661 * Compare the root vdev tree with the information we have
1662 * from the MOS config (mrvd). Check each top-level vdev
1663 * with the corresponding MOS config top-level (mtvd).
1664 */
d6320ddb 1665 for (c = 0; c < rvd->vdev_children; c++) {
572e2857
BB
1666 vdev_t *tvd = rvd->vdev_child[c];
1667 vdev_t *mtvd = mrvd->vdev_child[c];
1668
1669 /*
1670 * Resolve any "missing" vdevs in the current configuration.
1671 * If we find that the MOS config has more accurate information
1672 * about the top-level vdev then use that vdev instead.
1673 */
1674 if (tvd->vdev_ops == &vdev_missing_ops &&
1675 mtvd->vdev_ops != &vdev_missing_ops) {
1676
1677 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1678 continue;
1679
1680 /*
1681 * Device specific actions.
1682 */
1683 if (mtvd->vdev_islog) {
1684 spa_set_log_state(spa, SPA_LOG_CLEAR);
1685 } else {
1686 /*
1687 * XXX - once we have 'readonly' pool
1688 * support we should be able to handle
1689 * missing data devices by transitioning
1690 * the pool to readonly.
1691 */
1692 continue;
1693 }
1694
1695 /*
1696 * Swap the missing vdev with the data we were
1697 * able to obtain from the MOS config.
1698 */
1699 vdev_remove_child(rvd, tvd);
1700 vdev_remove_child(mrvd, mtvd);
1701
1702 vdev_add_child(rvd, mtvd);
1703 vdev_add_child(mrvd, tvd);
1704
1705 spa_config_exit(spa, SCL_ALL, FTAG);
1706 vdev_load(mtvd);
1707 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1708
1709 vdev_reopen(rvd);
1710 } else if (mtvd->vdev_islog) {
1711 /*
1712 * Load the slog device's state from the MOS config
1713 * since it's possible that the label does not
1714 * contain the most up-to-date information.
1715 */
1716 vdev_load_log_state(tvd, mtvd);
1717 vdev_reopen(tvd);
1718 }
9babb374 1719 }
572e2857 1720 vdev_free(mrvd);
428870ff 1721 spa_config_exit(spa, SCL_ALL, FTAG);
572e2857
BB
1722
1723 /*
1724 * Ensure we were able to validate the config.
1725 */
1726 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
9babb374
BB
1727}
1728
b128c09f
BB
1729/*
1730 * Check for missing log devices
1731 */
572e2857 1732static int
b128c09f
BB
1733spa_check_logs(spa_t *spa)
1734{
1735 switch (spa->spa_log_state) {
e75c13c3
BB
1736 default:
1737 break;
b128c09f
BB
1738 case SPA_LOG_MISSING:
1739 /* need to recheck in case slog has been restored */
1740 case SPA_LOG_UNKNOWN:
1741 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1742 DS_FIND_CHILDREN)) {
428870ff 1743 spa_set_log_state(spa, SPA_LOG_MISSING);
b128c09f
BB
1744 return (1);
1745 }
1746 break;
b128c09f 1747 }
b128c09f
BB
1748 return (0);
1749}
1750
428870ff
BB
1751static boolean_t
1752spa_passivate_log(spa_t *spa)
34dc7c2f 1753{
428870ff
BB
1754 vdev_t *rvd = spa->spa_root_vdev;
1755 boolean_t slog_found = B_FALSE;
d6320ddb 1756 int c;
b128c09f 1757
428870ff 1758 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
fb5f0bc8 1759
428870ff
BB
1760 if (!spa_has_slogs(spa))
1761 return (B_FALSE);
34dc7c2f 1762
d6320ddb 1763 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1764 vdev_t *tvd = rvd->vdev_child[c];
1765 metaslab_group_t *mg = tvd->vdev_mg;
34dc7c2f 1766
428870ff
BB
1767 if (tvd->vdev_islog) {
1768 metaslab_group_passivate(mg);
1769 slog_found = B_TRUE;
1770 }
34dc7c2f
BB
1771 }
1772
428870ff
BB
1773 return (slog_found);
1774}
34dc7c2f 1775
428870ff
BB
1776static void
1777spa_activate_log(spa_t *spa)
1778{
1779 vdev_t *rvd = spa->spa_root_vdev;
d6320ddb 1780 int c;
34dc7c2f 1781
428870ff
BB
1782 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1783
d6320ddb 1784 for (c = 0; c < rvd->vdev_children; c++) {
428870ff
BB
1785 vdev_t *tvd = rvd->vdev_child[c];
1786 metaslab_group_t *mg = tvd->vdev_mg;
1787
1788 if (tvd->vdev_islog)
1789 metaslab_group_activate(mg);
34dc7c2f 1790 }
428870ff 1791}
34dc7c2f 1792
428870ff
BB
1793int
1794spa_offline_log(spa_t *spa)
1795{
1796 int error = 0;
34dc7c2f 1797
428870ff
BB
1798 if ((error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1799 NULL, DS_FIND_CHILDREN)) == 0) {
9babb374 1800
428870ff
BB
1801 /*
1802 * We successfully offlined the log device, sync out the
1803 * current txg so that the "stubby" block can be removed
1804 * by zil_sync().
1805 */
1806 txg_wait_synced(spa->spa_dsl_pool, 0);
1807 }
1808 return (error);
1809}
34dc7c2f 1810
428870ff
BB
1811static void
1812spa_aux_check_removed(spa_aux_vdev_t *sav)
1813{
d6320ddb
BB
1814 int i;
1815
1816 for (i = 0; i < sav->sav_count; i++)
428870ff
BB
1817 spa_check_removed(sav->sav_vdevs[i]);
1818}
34dc7c2f 1819
428870ff
BB
1820void
1821spa_claim_notify(zio_t *zio)
1822{
1823 spa_t *spa = zio->io_spa;
34dc7c2f 1824
428870ff
BB
1825 if (zio->io_error)
1826 return;
34dc7c2f 1827
428870ff
BB
1828 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1829 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1830 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1831 mutex_exit(&spa->spa_props_lock);
1832}
34dc7c2f 1833
428870ff
BB
1834typedef struct spa_load_error {
1835 uint64_t sle_meta_count;
1836 uint64_t sle_data_count;
1837} spa_load_error_t;
34dc7c2f 1838
428870ff
BB
1839static void
1840spa_load_verify_done(zio_t *zio)
1841{
1842 blkptr_t *bp = zio->io_bp;
1843 spa_load_error_t *sle = zio->io_private;
1844 dmu_object_type_t type = BP_GET_TYPE(bp);
1845 int error = zio->io_error;
34dc7c2f 1846
428870ff 1847 if (error) {
9ae529ec 1848 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
428870ff
BB
1849 type != DMU_OT_INTENT_LOG)
1850 atomic_add_64(&sle->sle_meta_count, 1);
1851 else
1852 atomic_add_64(&sle->sle_data_count, 1);
34dc7c2f 1853 }
428870ff
BB
1854 zio_data_buf_free(zio->io_data, zio->io_size);
1855}
34dc7c2f 1856
428870ff
BB
1857/*ARGSUSED*/
1858static int
1859spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
294f6806 1860 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
428870ff
BB
1861{
1862 if (bp != NULL) {
1863 zio_t *rio = arg;
1864 size_t size = BP_GET_PSIZE(bp);
1865 void *data = zio_data_buf_alloc(size);
34dc7c2f 1866
428870ff
BB
1867 zio_nowait(zio_read(rio, spa, bp, data, size,
1868 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1869 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1870 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
34dc7c2f 1871 }
428870ff
BB
1872 return (0);
1873}
34dc7c2f 1874
428870ff
BB
1875static int
1876spa_load_verify(spa_t *spa)
1877{
1878 zio_t *rio;
1879 spa_load_error_t sle = { 0 };
1880 zpool_rewind_policy_t policy;
1881 boolean_t verify_ok = B_FALSE;
1882 int error;
34dc7c2f 1883
428870ff 1884 zpool_get_rewind_policy(spa->spa_config, &policy);
34dc7c2f 1885
428870ff
BB
1886 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1887 return (0);
34dc7c2f 1888
428870ff
BB
1889 rio = zio_root(spa, NULL, &sle,
1890 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
34dc7c2f 1891
428870ff
BB
1892 error = traverse_pool(spa, spa->spa_verify_min_txg,
1893 TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio);
1894
1895 (void) zio_wait(rio);
1896
1897 spa->spa_load_meta_errors = sle.sle_meta_count;
1898 spa->spa_load_data_errors = sle.sle_data_count;
1899
1900 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1901 sle.sle_data_count <= policy.zrp_maxdata) {
572e2857
BB
1902 int64_t loss = 0;
1903
428870ff
BB
1904 verify_ok = B_TRUE;
1905 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1906 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
572e2857
BB
1907
1908 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1909 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1910 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1911 VERIFY(nvlist_add_int64(spa->spa_load_info,
1912 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1913 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1914 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
428870ff
BB
1915 } else {
1916 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1917 }
1918
1919 if (error) {
1920 if (error != ENXIO && error != EIO)
1921 error = EIO;
1922 return (error);
1923 }
1924
1925 return (verify_ok ? 0 : EIO);
1926}
1927
1928/*
1929 * Find a value in the pool props object.
1930 */
1931static void
1932spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1933{
1934 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1935 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1936}
1937
1938/*
1939 * Find a value in the pool directory object.
1940 */
1941static int
1942spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1943{
1944 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1945 name, sizeof (uint64_t), 1, val));
1946}
1947
1948static int
1949spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
1950{
1951 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
1952 return (err);
1953}
1954
1955/*
1956 * Fix up config after a partly-completed split. This is done with the
1957 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
1958 * pool have that entry in their config, but only the splitting one contains
1959 * a list of all the guids of the vdevs that are being split off.
1960 *
1961 * This function determines what to do with that list: either rejoin
1962 * all the disks to the pool, or complete the splitting process. To attempt
1963 * the rejoin, each disk that is offlined is marked online again, and
1964 * we do a reopen() call. If the vdev label for every disk that was
1965 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
1966 * then we call vdev_split() on each disk, and complete the split.
1967 *
1968 * Otherwise we leave the config alone, with all the vdevs in place in
1969 * the original pool.
1970 */
1971static void
1972spa_try_repair(spa_t *spa, nvlist_t *config)
1973{
1974 uint_t extracted;
1975 uint64_t *glist;
1976 uint_t i, gcount;
1977 nvlist_t *nvl;
1978 vdev_t **vd;
1979 boolean_t attempt_reopen;
1980
1981 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
1982 return;
1983
1984 /* check that the config is complete */
1985 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
1986 &glist, &gcount) != 0)
1987 return;
1988
b8d06fca 1989 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_PUSHPAGE);
428870ff
BB
1990
1991 /* attempt to online all the vdevs & validate */
1992 attempt_reopen = B_TRUE;
1993 for (i = 0; i < gcount; i++) {
1994 if (glist[i] == 0) /* vdev is hole */
1995 continue;
1996
1997 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
1998 if (vd[i] == NULL) {
1999 /*
2000 * Don't bother attempting to reopen the disks;
2001 * just do the split.
2002 */
2003 attempt_reopen = B_FALSE;
2004 } else {
2005 /* attempt to re-online it */
2006 vd[i]->vdev_offline = B_FALSE;
2007 }
2008 }
2009
2010 if (attempt_reopen) {
2011 vdev_reopen(spa->spa_root_vdev);
2012
2013 /* check each device to see what state it's in */
2014 for (extracted = 0, i = 0; i < gcount; i++) {
2015 if (vd[i] != NULL &&
2016 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2017 break;
2018 ++extracted;
2019 }
2020 }
2021
2022 /*
2023 * If every disk has been moved to the new pool, or if we never
2024 * even attempted to look at them, then we split them off for
2025 * good.
2026 */
2027 if (!attempt_reopen || gcount == extracted) {
2028 for (i = 0; i < gcount; i++)
2029 if (vd[i] != NULL)
2030 vdev_split(vd[i]);
2031 vdev_reopen(spa->spa_root_vdev);
2032 }
2033
2034 kmem_free(vd, gcount * sizeof (vdev_t *));
2035}
2036
2037static int
2038spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2039 boolean_t mosconfig)
2040{
2041 nvlist_t *config = spa->spa_config;
2042 char *ereport = FM_EREPORT_ZFS_POOL;
d96eb2b1 2043 char *comment;
428870ff
BB
2044 int error;
2045 uint64_t pool_guid;
2046 nvlist_t *nvl;
2047
2048 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2049 return (EINVAL);
2050
d96eb2b1
DM
2051 ASSERT(spa->spa_comment == NULL);
2052 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2053 spa->spa_comment = spa_strdup(comment);
2054
428870ff
BB
2055 /*
2056 * Versioning wasn't explicitly added to the label until later, so if
2057 * it's not present treat it as the initial version.
2058 */
2059 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2060 &spa->spa_ubsync.ub_version) != 0)
2061 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2062
2063 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2064 &spa->spa_config_txg);
2065
2066 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2067 spa_guid_exists(pool_guid, 0)) {
2068 error = EEXIST;
2069 } else {
3541dc6d 2070 spa->spa_config_guid = pool_guid;
428870ff
BB
2071
2072 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2073 &nvl) == 0) {
2074 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
b8d06fca 2075 KM_PUSHPAGE) == 0);
428870ff
BB
2076 }
2077
9ae529ec
CS
2078 nvlist_free(spa->spa_load_info);
2079 spa->spa_load_info = fnvlist_alloc();
2080
572e2857 2081 gethrestime(&spa->spa_loaded_ts);
428870ff
BB
2082 error = spa_load_impl(spa, pool_guid, config, state, type,
2083 mosconfig, &ereport);
2084 }
2085
2086 spa->spa_minref = refcount_count(&spa->spa_refcount);
572e2857
BB
2087 if (error) {
2088 if (error != EEXIST) {
2089 spa->spa_loaded_ts.tv_sec = 0;
2090 spa->spa_loaded_ts.tv_nsec = 0;
2091 }
2092 if (error != EBADF) {
2093 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2094 }
2095 }
428870ff
BB
2096 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2097 spa->spa_ena = 0;
2098
2099 return (error);
2100}
2101
2102/*
2103 * Load an existing storage pool, using the pool's builtin spa_config as a
2104 * source of configuration information.
2105 */
bf701a83
BB
2106__attribute__((always_inline))
2107static inline int
428870ff
BB
2108spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2109 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2110 char **ereport)
2111{
2112 int error = 0;
2113 nvlist_t *nvroot = NULL;
9ae529ec 2114 nvlist_t *label;
428870ff
BB
2115 vdev_t *rvd;
2116 uberblock_t *ub = &spa->spa_uberblock;
572e2857 2117 uint64_t children, config_cache_txg = spa->spa_config_txg;
428870ff
BB
2118 int orig_mode = spa->spa_mode;
2119 int parse;
2120 uint64_t obj;
9ae529ec 2121 boolean_t missing_feat_write = B_FALSE;
428870ff
BB
2122
2123 /*
2124 * If this is an untrusted config, access the pool in read-only mode.
2125 * This prevents things like resilvering recently removed devices.
2126 */
2127 if (!mosconfig)
2128 spa->spa_mode = FREAD;
2129
2130 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2131
2132 spa->spa_load_state = state;
2133
2134 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2135 return (EINVAL);
2136
2137 parse = (type == SPA_IMPORT_EXISTING ?
2138 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2139
2140 /*
2141 * Create "The Godfather" zio to hold all async IOs
2142 */
2143 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2144 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2145
2146 /*
2147 * Parse the configuration into a vdev tree. We explicitly set the
2148 * value that will be returned by spa_version() since parsing the
2149 * configuration requires knowing the version number.
2150 */
2151 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2152 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2153 spa_config_exit(spa, SCL_ALL, FTAG);
2154
2155 if (error != 0)
2156 return (error);
2157
2158 ASSERT(spa->spa_root_vdev == rvd);
2159
2160 if (type != SPA_IMPORT_ASSEMBLE) {
2161 ASSERT(spa_guid(spa) == pool_guid);
2162 }
2163
2164 /*
2165 * Try to open all vdevs, loading each label in the process.
2166 */
2167 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2168 error = vdev_open(rvd);
2169 spa_config_exit(spa, SCL_ALL, FTAG);
2170 if (error != 0)
2171 return (error);
2172
2173 /*
2174 * We need to validate the vdev labels against the configuration that
2175 * we have in hand, which is dependent on the setting of mosconfig. If
2176 * mosconfig is true then we're validating the vdev labels based on
2177 * that config. Otherwise, we're validating against the cached config
2178 * (zpool.cache) that was read when we loaded the zfs module, and then
2179 * later we will recursively call spa_load() and validate against
2180 * the vdev config.
2181 *
2182 * If we're assembling a new pool that's been split off from an
2183 * existing pool, the labels haven't yet been updated so we skip
2184 * validation for now.
2185 */
2186 if (type != SPA_IMPORT_ASSEMBLE) {
2187 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
c7f2d69d 2188 error = vdev_validate(rvd, mosconfig);
428870ff
BB
2189 spa_config_exit(spa, SCL_ALL, FTAG);
2190
2191 if (error != 0)
2192 return (error);
2193
2194 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2195 return (ENXIO);
2196 }
2197
2198 /*
2199 * Find the best uberblock.
2200 */
9ae529ec 2201 vdev_uberblock_load(rvd, ub, &label);
428870ff
BB
2202
2203 /*
2204 * If we weren't able to find a single valid uberblock, return failure.
2205 */
9ae529ec
CS
2206 if (ub->ub_txg == 0) {
2207 nvlist_free(label);
428870ff 2208 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
9ae529ec 2209 }
428870ff
BB
2210
2211 /*
9ae529ec 2212 * If the pool has an unsupported version we can't open it.
428870ff 2213 */
9ae529ec
CS
2214 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2215 nvlist_free(label);
428870ff 2216 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
9ae529ec
CS
2217 }
2218
2219 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2220 nvlist_t *features;
2221
2222 /*
2223 * If we weren't able to find what's necessary for reading the
2224 * MOS in the label, return failure.
2225 */
2226 if (label == NULL || nvlist_lookup_nvlist(label,
2227 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2228 nvlist_free(label);
2229 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2230 ENXIO));
2231 }
2232
2233 /*
2234 * Update our in-core representation with the definitive values
2235 * from the label.
2236 */
2237 nvlist_free(spa->spa_label_features);
2238 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2239 }
2240
2241 nvlist_free(label);
2242
2243 /*
2244 * Look through entries in the label nvlist's features_for_read. If
2245 * there is a feature listed there which we don't understand then we
2246 * cannot open a pool.
2247 */
2248 if (ub->ub_version >= SPA_VERSION_FEATURES) {
2249 nvlist_t *unsup_feat;
2250 nvpair_t *nvp;
2251
2252 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2253 0);
2254
2255 for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL);
2256 nvp != NULL;
2257 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2258 if (!zfeature_is_supported(nvpair_name(nvp))) {
2259 VERIFY(nvlist_add_string(unsup_feat,
2260 nvpair_name(nvp), "") == 0);
2261 }
2262 }
2263
2264 if (!nvlist_empty(unsup_feat)) {
2265 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2266 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2267 nvlist_free(unsup_feat);
2268 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2269 ENOTSUP));
2270 }
2271
2272 nvlist_free(unsup_feat);
2273 }
428870ff
BB
2274
2275 /*
2276 * If the vdev guid sum doesn't match the uberblock, we have an
572e2857
BB
2277 * incomplete configuration. We first check to see if the pool
2278 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2279 * If it is, defer the vdev_guid_sum check till later so we
2280 * can handle missing vdevs.
428870ff 2281 */
572e2857
BB
2282 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2283 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
428870ff
BB
2284 rvd->vdev_guid_sum != ub->ub_guid_sum)
2285 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2286
2287 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2288 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2289 spa_try_repair(spa, config);
2290 spa_config_exit(spa, SCL_ALL, FTAG);
2291 nvlist_free(spa->spa_config_splitting);
2292 spa->spa_config_splitting = NULL;
2293 }
2294
2295 /*
2296 * Initialize internal SPA structures.
2297 */
2298 spa->spa_state = POOL_STATE_ACTIVE;
2299 spa->spa_ubsync = spa->spa_uberblock;
2300 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2301 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2302 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2303 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2304 spa->spa_claim_max_txg = spa->spa_first_txg;
2305 spa->spa_prev_software_version = ub->ub_software_version;
2306
9ae529ec 2307 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
428870ff
BB
2308 if (error)
2309 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2310 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2311
2312 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2313 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2314
9ae529ec
CS
2315 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2316 boolean_t missing_feat_read = B_FALSE;
b9b24bb4 2317 nvlist_t *unsup_feat, *enabled_feat;
9ae529ec
CS
2318
2319 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2320 &spa->spa_feat_for_read_obj) != 0) {
2321 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2322 }
2323
2324 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2325 &spa->spa_feat_for_write_obj) != 0) {
2326 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2327 }
2328
2329 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2330 &spa->spa_feat_desc_obj) != 0) {
2331 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2332 }
2333
b9b24bb4
CS
2334 enabled_feat = fnvlist_alloc();
2335 unsup_feat = fnvlist_alloc();
9ae529ec
CS
2336
2337 if (!feature_is_supported(spa->spa_meta_objset,
2338 spa->spa_feat_for_read_obj, spa->spa_feat_desc_obj,
b9b24bb4 2339 unsup_feat, enabled_feat))
9ae529ec
CS
2340 missing_feat_read = B_TRUE;
2341
2342 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2343 if (!feature_is_supported(spa->spa_meta_objset,
2344 spa->spa_feat_for_write_obj, spa->spa_feat_desc_obj,
b9b24bb4 2345 unsup_feat, enabled_feat)) {
9ae529ec 2346 missing_feat_write = B_TRUE;
b9b24bb4 2347 }
9ae529ec
CS
2348 }
2349
b9b24bb4
CS
2350 fnvlist_add_nvlist(spa->spa_load_info,
2351 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2352
9ae529ec 2353 if (!nvlist_empty(unsup_feat)) {
b9b24bb4
CS
2354 fnvlist_add_nvlist(spa->spa_load_info,
2355 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
9ae529ec
CS
2356 }
2357
b9b24bb4
CS
2358 fnvlist_free(enabled_feat);
2359 fnvlist_free(unsup_feat);
9ae529ec
CS
2360
2361 if (!missing_feat_read) {
2362 fnvlist_add_boolean(spa->spa_load_info,
2363 ZPOOL_CONFIG_CAN_RDONLY);
2364 }
2365
2366 /*
2367 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2368 * twofold: to determine whether the pool is available for
2369 * import in read-write mode and (if it is not) whether the
2370 * pool is available for import in read-only mode. If the pool
2371 * is available for import in read-write mode, it is displayed
2372 * as available in userland; if it is not available for import
2373 * in read-only mode, it is displayed as unavailable in
2374 * userland. If the pool is available for import in read-only
2375 * mode but not read-write mode, it is displayed as unavailable
2376 * in userland with a special note that the pool is actually
2377 * available for open in read-only mode.
2378 *
2379 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2380 * missing a feature for write, we must first determine whether
2381 * the pool can be opened read-only before returning to
2382 * userland in order to know whether to display the
2383 * abovementioned note.
2384 */
2385 if (missing_feat_read || (missing_feat_write &&
2386 spa_writeable(spa))) {
2387 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2388 ENOTSUP));
2389 }
2390 }
2391
2392 spa->spa_is_initializing = B_TRUE;
2393 error = dsl_pool_open(spa->spa_dsl_pool);
2394 spa->spa_is_initializing = B_FALSE;
2395 if (error != 0)
2396 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2397
428870ff
BB
2398 if (!mosconfig) {
2399 uint64_t hostid;
2400 nvlist_t *policy = NULL, *nvconfig;
2401
2402 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2403 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2404
2405 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
b128c09f 2406 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
34dc7c2f
BB
2407 char *hostname;
2408 unsigned long myhostid = 0;
2409
428870ff 2410 VERIFY(nvlist_lookup_string(nvconfig,
34dc7c2f
BB
2411 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2412
d164b209
BB
2413#ifdef _KERNEL
2414 myhostid = zone_get_hostid(NULL);
2415#else /* _KERNEL */
2416 /*
2417 * We're emulating the system's hostid in userland, so
2418 * we can't use zone_get_hostid().
2419 */
34dc7c2f 2420 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
d164b209 2421#endif /* _KERNEL */
34dc7c2f 2422 if (hostid != 0 && myhostid != 0 &&
d164b209 2423 hostid != myhostid) {
428870ff 2424 nvlist_free(nvconfig);
34dc7c2f
BB
2425 cmn_err(CE_WARN, "pool '%s' could not be "
2426 "loaded as it was last accessed by "
b128c09f 2427 "another system (host: %s hostid: 0x%lx). "
3cee2262 2428 "See: http://zfsonlinux.org/msg/ZFS-8000-EY",
b128c09f 2429 spa_name(spa), hostname,
34dc7c2f 2430 (unsigned long)hostid);
428870ff 2431 return (EBADF);
34dc7c2f
BB
2432 }
2433 }
428870ff
BB
2434 if (nvlist_lookup_nvlist(spa->spa_config,
2435 ZPOOL_REWIND_POLICY, &policy) == 0)
2436 VERIFY(nvlist_add_nvlist(nvconfig,
2437 ZPOOL_REWIND_POLICY, policy) == 0);
34dc7c2f 2438
428870ff 2439 spa_config_set(spa, nvconfig);
34dc7c2f
BB
2440 spa_unload(spa);
2441 spa_deactivate(spa);
fb5f0bc8 2442 spa_activate(spa, orig_mode);
34dc7c2f 2443
428870ff 2444 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
34dc7c2f
BB
2445 }
2446
428870ff
BB
2447 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2448 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2449 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2450 if (error != 0)
2451 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2452
2453 /*
2454 * Load the bit that tells us to use the new accounting function
2455 * (raid-z deflation). If we have an older pool, this will not
2456 * be present.
2457 */
428870ff
BB
2458 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2459 if (error != 0 && error != ENOENT)
2460 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2461
2462 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2463 &spa->spa_creation_version);
2464 if (error != 0 && error != ENOENT)
2465 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2466
2467 /*
2468 * Load the persistent error log. If we have an older pool, this will
2469 * not be present.
2470 */
428870ff
BB
2471 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2472 if (error != 0 && error != ENOENT)
2473 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2474
428870ff
BB
2475 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2476 &spa->spa_errlog_scrub);
2477 if (error != 0 && error != ENOENT)
2478 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2479
2480 /*
2481 * Load the history object. If we have an older pool, this
2482 * will not be present.
2483 */
428870ff
BB
2484 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2485 if (error != 0 && error != ENOENT)
2486 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2487
2488 /*
2489 * If we're assembling the pool from the split-off vdevs of
2490 * an existing pool, we don't want to attach the spares & cache
2491 * devices.
2492 */
34dc7c2f
BB
2493
2494 /*
2495 * Load any hot spares for this pool.
2496 */
428870ff
BB
2497 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2498 if (error != 0 && error != ENOENT)
2499 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2500 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2501 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2502 if (load_nvlist(spa, spa->spa_spares.sav_object,
428870ff
BB
2503 &spa->spa_spares.sav_config) != 0)
2504 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2505
b128c09f 2506 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2507 spa_load_spares(spa);
b128c09f 2508 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2509 } else if (error == 0) {
2510 spa->spa_spares.sav_sync = B_TRUE;
34dc7c2f
BB
2511 }
2512
2513 /*
2514 * Load any level 2 ARC devices for this pool.
2515 */
428870ff 2516 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
34dc7c2f 2517 &spa->spa_l2cache.sav_object);
428870ff
BB
2518 if (error != 0 && error != ENOENT)
2519 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2520 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
34dc7c2f
BB
2521 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2522 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
428870ff
BB
2523 &spa->spa_l2cache.sav_config) != 0)
2524 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f 2525
b128c09f 2526 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2527 spa_load_l2cache(spa);
b128c09f 2528 spa_config_exit(spa, SCL_ALL, FTAG);
428870ff
BB
2529 } else if (error == 0) {
2530 spa->spa_l2cache.sav_sync = B_TRUE;
b128c09f
BB
2531 }
2532
34dc7c2f
BB
2533 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2534
428870ff
BB
2535 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2536 if (error && error != ENOENT)
2537 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
34dc7c2f
BB
2538
2539 if (error == 0) {
428870ff
BB
2540 uint64_t autoreplace;
2541
2542 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2543 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2544 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2545 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2546 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2547 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2548 &spa->spa_dedup_ditto);
2549
2550 spa->spa_autoreplace = (autoreplace != 0);
34dc7c2f
BB
2551 }
2552
2553 /*
2554 * If the 'autoreplace' property is set, then post a resource notifying
2555 * the ZFS DE that it should not issue any faults for unopenable
2556 * devices. We also iterate over the vdevs, and post a sysevent for any
2557 * unopenable vdevs so that the normal autoreplace handler can take
2558 * over.
2559 */
428870ff 2560 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
34dc7c2f 2561 spa_check_removed(spa->spa_root_vdev);
428870ff
BB
2562 /*
2563 * For the import case, this is done in spa_import(), because
2564 * at this point we're using the spare definitions from
2565 * the MOS config, not necessarily from the userland config.
2566 */
2567 if (state != SPA_LOAD_IMPORT) {
2568 spa_aux_check_removed(&spa->spa_spares);
2569 spa_aux_check_removed(&spa->spa_l2cache);
2570 }
2571 }
34dc7c2f
BB
2572
2573 /*
2574 * Load the vdev state for all toplevel vdevs.
2575 */
2576 vdev_load(rvd);
2577
2578 /*
2579 * Propagate the leaf DTLs we just loaded all the way up the tree.
2580 */
b128c09f 2581 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 2582 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
b128c09f 2583 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 2584
428870ff
BB
2585 /*
2586 * Load the DDTs (dedup tables).
2587 */
2588 error = ddt_load(spa);
2589 if (error != 0)
2590 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2591
2592 spa_update_dspace(spa);
2593
428870ff 2594 /*
572e2857
BB
2595 * Validate the config, using the MOS config to fill in any
2596 * information which might be missing. If we fail to validate
2597 * the config then declare the pool unfit for use. If we're
2598 * assembling a pool from a split, the log is not transferred
2599 * over.
428870ff
BB
2600 */
2601 if (type != SPA_IMPORT_ASSEMBLE) {
2602 nvlist_t *nvconfig;
2603
2604 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2605 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2606
572e2857
BB
2607 if (!spa_config_valid(spa, nvconfig)) {
2608 nvlist_free(nvconfig);
2609 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2610 ENXIO));
2611 }
428870ff
BB
2612 nvlist_free(nvconfig);
2613
572e2857 2614 /*
9ae529ec 2615 * Now that we've validated the config, check the state of the
572e2857
BB
2616 * root vdev. If it can't be opened, it indicates one or
2617 * more toplevel vdevs are faulted.
2618 */
2619 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2620 return (ENXIO);
2621
428870ff
BB
2622 if (spa_check_logs(spa)) {
2623 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2624 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2625 }
2626 }
2627
9ae529ec
CS
2628 if (missing_feat_write) {
2629 ASSERT(state == SPA_LOAD_TRYIMPORT);
2630
2631 /*
2632 * At this point, we know that we can open the pool in
2633 * read-only mode but not read-write mode. We now have enough
2634 * information and can return to userland.
2635 */
2636 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2637 }
2638
572e2857
BB
2639 /*
2640 * We've successfully opened the pool, verify that we're ready
2641 * to start pushing transactions.
2642 */
2643 if (state != SPA_LOAD_TRYIMPORT) {
c65aa5b2 2644 if ((error = spa_load_verify(spa)))
572e2857
BB
2645 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2646 error));
2647 }
2648
428870ff
BB
2649 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2650 spa->spa_load_max_txg == UINT64_MAX)) {
34dc7c2f
BB
2651 dmu_tx_t *tx;
2652 int need_update = B_FALSE;
d6320ddb 2653 int c;
fb5f0bc8
BB
2654
2655 ASSERT(state != SPA_LOAD_TRYIMPORT);
34dc7c2f
BB
2656
2657 /*
2658 * Claim log blocks that haven't been committed yet.
2659 * This must all happen in a single txg.
428870ff
BB
2660 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2661 * invoked from zil_claim_log_block()'s i/o done callback.
2662 * Price of rollback is that we abandon the log.
34dc7c2f 2663 */
428870ff
BB
2664 spa->spa_claiming = B_TRUE;
2665
34dc7c2f
BB
2666 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2667 spa_first_txg(spa));
b128c09f 2668 (void) dmu_objset_find(spa_name(spa),
34dc7c2f
BB
2669 zil_claim, tx, DS_FIND_CHILDREN);
2670 dmu_tx_commit(tx);
2671
428870ff
BB
2672 spa->spa_claiming = B_FALSE;
2673
2674 spa_set_log_state(spa, SPA_LOG_GOOD);
34dc7c2f
BB
2675 spa->spa_sync_on = B_TRUE;
2676 txg_sync_start(spa->spa_dsl_pool);
2677
2678 /*
428870ff
BB
2679 * Wait for all claims to sync. We sync up to the highest
2680 * claimed log block birth time so that claimed log blocks
2681 * don't appear to be from the future. spa_claim_max_txg
2682 * will have been set for us by either zil_check_log_chain()
2683 * (invoked from spa_check_logs()) or zil_claim() above.
34dc7c2f 2684 */
428870ff 2685 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
34dc7c2f
BB
2686
2687 /*
2688 * If the config cache is stale, or we have uninitialized
2689 * metaslabs (see spa_vdev_add()), then update the config.
45d1cae3 2690 *
572e2857 2691 * If this is a verbatim import, trust the current
45d1cae3 2692 * in-core spa_config and update the disk labels.
34dc7c2f
BB
2693 */
2694 if (config_cache_txg != spa->spa_config_txg ||
572e2857
BB
2695 state == SPA_LOAD_IMPORT ||
2696 state == SPA_LOAD_RECOVER ||
2697 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
34dc7c2f
BB
2698 need_update = B_TRUE;
2699
d6320ddb 2700 for (c = 0; c < rvd->vdev_children; c++)
34dc7c2f
BB
2701 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2702 need_update = B_TRUE;
2703
2704 /*
2705 * Update the config cache asychronously in case we're the
2706 * root pool, in which case the config cache isn't writable yet.
2707 */
2708 if (need_update)
2709 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
fb5f0bc8
BB
2710
2711 /*
2712 * Check all DTLs to see if anything needs resilvering.
2713 */
428870ff
BB
2714 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2715 vdev_resilver_needed(rvd, NULL, NULL))
fb5f0bc8 2716 spa_async_request(spa, SPA_ASYNC_RESILVER);
428870ff 2717
6f1ffb06
MA
2718 /*
2719 * Log the fact that we booted up (so that we can detect if
2720 * we rebooted in the middle of an operation).
2721 */
2722 spa_history_log_version(spa, "open");
2723
428870ff
BB
2724 /*
2725 * Delete any inconsistent datasets.
2726 */
2727 (void) dmu_objset_find(spa_name(spa),
2728 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2729
2730 /*
2731 * Clean up any stale temporary dataset userrefs.
2732 */
2733 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
34dc7c2f
BB
2734 }
2735
428870ff
BB
2736 return (0);
2737}
34dc7c2f 2738
428870ff
BB
2739static int
2740spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2741{
572e2857
BB
2742 int mode = spa->spa_mode;
2743
428870ff
BB
2744 spa_unload(spa);
2745 spa_deactivate(spa);
2746
2747 spa->spa_load_max_txg--;
2748
572e2857 2749 spa_activate(spa, mode);
428870ff
BB
2750 spa_async_suspend(spa);
2751
2752 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2753}
2754
9ae529ec
CS
2755/*
2756 * If spa_load() fails this function will try loading prior txg's. If
2757 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2758 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2759 * function will not rewind the pool and will return the same error as
2760 * spa_load().
2761 */
428870ff
BB
2762static int
2763spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2764 uint64_t max_request, int rewind_flags)
2765{
9ae529ec 2766 nvlist_t *loadinfo = NULL;
428870ff
BB
2767 nvlist_t *config = NULL;
2768 int load_error, rewind_error;
2769 uint64_t safe_rewind_txg;
2770 uint64_t min_txg;
2771
2772 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2773 spa->spa_load_max_txg = spa->spa_load_txg;
2774 spa_set_log_state(spa, SPA_LOG_CLEAR);
2775 } else {
2776 spa->spa_load_max_txg = max_request;
2777 }
2778
2779 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2780 mosconfig);
2781 if (load_error == 0)
2782 return (0);
2783
2784 if (spa->spa_root_vdev != NULL)
2785 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2786
2787 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2788 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2789
2790 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2791 nvlist_free(config);
2792 return (load_error);
2793 }
2794
9ae529ec
CS
2795 if (state == SPA_LOAD_RECOVER) {
2796 /* Price of rolling back is discarding txgs, including log */
428870ff 2797 spa_set_log_state(spa, SPA_LOG_CLEAR);
9ae529ec
CS
2798 } else {
2799 /*
2800 * If we aren't rolling back save the load info from our first
2801 * import attempt so that we can restore it after attempting
2802 * to rewind.
2803 */
2804 loadinfo = spa->spa_load_info;
2805 spa->spa_load_info = fnvlist_alloc();
2806 }
428870ff
BB
2807
2808 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2809 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2810 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2811 TXG_INITIAL : safe_rewind_txg;
2812
2813 /*
2814 * Continue as long as we're finding errors, we're still within
2815 * the acceptable rewind range, and we're still finding uberblocks
2816 */
2817 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2818 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2819 if (spa->spa_load_max_txg < safe_rewind_txg)
2820 spa->spa_extreme_rewind = B_TRUE;
2821 rewind_error = spa_load_retry(spa, state, mosconfig);
2822 }
2823
428870ff
BB
2824 spa->spa_extreme_rewind = B_FALSE;
2825 spa->spa_load_max_txg = UINT64_MAX;
2826
2827 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2828 spa_config_set(spa, config);
2829
9ae529ec
CS
2830 if (state == SPA_LOAD_RECOVER) {
2831 ASSERT3P(loadinfo, ==, NULL);
2832 return (rewind_error);
2833 } else {
2834 /* Store the rewind info as part of the initial load info */
2835 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2836 spa->spa_load_info);
2837
2838 /* Restore the initial load info */
2839 fnvlist_free(spa->spa_load_info);
2840 spa->spa_load_info = loadinfo;
2841
2842 return (load_error);
2843 }
34dc7c2f
BB
2844}
2845
2846/*
2847 * Pool Open/Import
2848 *
2849 * The import case is identical to an open except that the configuration is sent
2850 * down from userland, instead of grabbed from the configuration cache. For the
2851 * case of an open, the pool configuration will exist in the
2852 * POOL_STATE_UNINITIALIZED state.
2853 *
2854 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2855 * the same time open the pool, without having to keep around the spa_t in some
2856 * ambiguous state.
2857 */
2858static int
428870ff
BB
2859spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2860 nvlist_t **config)
34dc7c2f
BB
2861{
2862 spa_t *spa;
572e2857 2863 spa_load_state_t state = SPA_LOAD_OPEN;
34dc7c2f 2864 int error;
34dc7c2f 2865 int locked = B_FALSE;
526af785 2866 int firstopen = B_FALSE;
34dc7c2f
BB
2867
2868 *spapp = NULL;
2869
2870 /*
2871 * As disgusting as this is, we need to support recursive calls to this
2872 * function because dsl_dir_open() is called during spa_load(), and ends
2873 * up calling spa_open() again. The real fix is to figure out how to
2874 * avoid dsl_dir_open() calling this in the first place.
2875 */
2876 if (mutex_owner(&spa_namespace_lock) != curthread) {
2877 mutex_enter(&spa_namespace_lock);
2878 locked = B_TRUE;
2879 }
2880
2881 if ((spa = spa_lookup(pool)) == NULL) {
2882 if (locked)
2883 mutex_exit(&spa_namespace_lock);
2884 return (ENOENT);
2885 }
428870ff 2886
34dc7c2f 2887 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
428870ff
BB
2888 zpool_rewind_policy_t policy;
2889
526af785
PJD
2890 firstopen = B_TRUE;
2891
428870ff
BB
2892 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2893 &policy);
2894 if (policy.zrp_request & ZPOOL_DO_REWIND)
2895 state = SPA_LOAD_RECOVER;
34dc7c2f 2896
fb5f0bc8 2897 spa_activate(spa, spa_mode_global);
34dc7c2f 2898
428870ff
BB
2899 if (state != SPA_LOAD_RECOVER)
2900 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2901
2902 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2903 policy.zrp_request);
34dc7c2f
BB
2904
2905 if (error == EBADF) {
2906 /*
2907 * If vdev_validate() returns failure (indicated by
2908 * EBADF), it indicates that one of the vdevs indicates
2909 * that the pool has been exported or destroyed. If
2910 * this is the case, the config cache is out of sync and
2911 * we should remove the pool from the namespace.
2912 */
34dc7c2f
BB
2913 spa_unload(spa);
2914 spa_deactivate(spa);
b128c09f 2915 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 2916 spa_remove(spa);
34dc7c2f
BB
2917 if (locked)
2918 mutex_exit(&spa_namespace_lock);
2919 return (ENOENT);
2920 }
2921
2922 if (error) {
2923 /*
2924 * We can't open the pool, but we still have useful
2925 * information: the state of each vdev after the
2926 * attempted vdev_open(). Return this to the user.
2927 */
572e2857 2928 if (config != NULL && spa->spa_config) {
428870ff 2929 VERIFY(nvlist_dup(spa->spa_config, config,
b8d06fca 2930 KM_PUSHPAGE) == 0);
572e2857
BB
2931 VERIFY(nvlist_add_nvlist(*config,
2932 ZPOOL_CONFIG_LOAD_INFO,
2933 spa->spa_load_info) == 0);
2934 }
34dc7c2f
BB
2935 spa_unload(spa);
2936 spa_deactivate(spa);
428870ff 2937 spa->spa_last_open_failed = error;
34dc7c2f
BB
2938 if (locked)
2939 mutex_exit(&spa_namespace_lock);
2940 *spapp = NULL;
2941 return (error);
34dc7c2f 2942 }
34dc7c2f
BB
2943 }
2944
2945 spa_open_ref(spa, tag);
2946
b128c09f 2947 if (config != NULL)
34dc7c2f 2948 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f 2949
572e2857
BB
2950 /*
2951 * If we've recovered the pool, pass back any information we
2952 * gathered while doing the load.
2953 */
2954 if (state == SPA_LOAD_RECOVER) {
2955 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
2956 spa->spa_load_info) == 0);
2957 }
2958
428870ff
BB
2959 if (locked) {
2960 spa->spa_last_open_failed = 0;
2961 spa->spa_last_ubsync_txg = 0;
2962 spa->spa_load_txg = 0;
2963 mutex_exit(&spa_namespace_lock);
2964 }
2965
526af785
PJD
2966#ifdef _KERNEL
2967 if (firstopen)
2968 zvol_create_minors(spa->spa_name);
2969#endif
2970
428870ff
BB
2971 *spapp = spa;
2972
34dc7c2f
BB
2973 return (0);
2974}
2975
428870ff
BB
2976int
2977spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
2978 nvlist_t **config)
2979{
2980 return (spa_open_common(name, spapp, tag, policy, config));
2981}
2982
34dc7c2f
BB
2983int
2984spa_open(const char *name, spa_t **spapp, void *tag)
2985{
428870ff 2986 return (spa_open_common(name, spapp, tag, NULL, NULL));
34dc7c2f
BB
2987}
2988
2989/*
2990 * Lookup the given spa_t, incrementing the inject count in the process,
2991 * preventing it from being exported or destroyed.
2992 */
2993spa_t *
2994spa_inject_addref(char *name)
2995{
2996 spa_t *spa;
2997
2998 mutex_enter(&spa_namespace_lock);
2999 if ((spa = spa_lookup(name)) == NULL) {
3000 mutex_exit(&spa_namespace_lock);
3001 return (NULL);
3002 }
3003 spa->spa_inject_ref++;
3004 mutex_exit(&spa_namespace_lock);
3005
3006 return (spa);
3007}
3008
3009void
3010spa_inject_delref(spa_t *spa)
3011{
3012 mutex_enter(&spa_namespace_lock);
3013 spa->spa_inject_ref--;
3014 mutex_exit(&spa_namespace_lock);
3015}
3016
3017/*
3018 * Add spares device information to the nvlist.
3019 */
3020static void
3021spa_add_spares(spa_t *spa, nvlist_t *config)
3022{
3023 nvlist_t **spares;
3024 uint_t i, nspares;
3025 nvlist_t *nvroot;
3026 uint64_t guid;
3027 vdev_stat_t *vs;
3028 uint_t vsc;
3029 uint64_t pool;
3030
9babb374
BB
3031 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3032
34dc7c2f
BB
3033 if (spa->spa_spares.sav_count == 0)
3034 return;
3035
3036 VERIFY(nvlist_lookup_nvlist(config,
3037 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3038 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3039 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3040 if (nspares != 0) {
3041 VERIFY(nvlist_add_nvlist_array(nvroot,
3042 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3043 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3044 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3045
3046 /*
3047 * Go through and find any spares which have since been
3048 * repurposed as an active spare. If this is the case, update
3049 * their status appropriately.
3050 */
3051 for (i = 0; i < nspares; i++) {
3052 VERIFY(nvlist_lookup_uint64(spares[i],
3053 ZPOOL_CONFIG_GUID, &guid) == 0);
b128c09f
BB
3054 if (spa_spare_exists(guid, &pool, NULL) &&
3055 pool != 0ULL) {
34dc7c2f 3056 VERIFY(nvlist_lookup_uint64_array(
428870ff 3057 spares[i], ZPOOL_CONFIG_VDEV_STATS,
34dc7c2f
BB
3058 (uint64_t **)&vs, &vsc) == 0);
3059 vs->vs_state = VDEV_STATE_CANT_OPEN;
3060 vs->vs_aux = VDEV_AUX_SPARED;
3061 }
3062 }
3063 }
3064}
3065
3066/*
3067 * Add l2cache device information to the nvlist, including vdev stats.
3068 */
3069static void
3070spa_add_l2cache(spa_t *spa, nvlist_t *config)
3071{
3072 nvlist_t **l2cache;
3073 uint_t i, j, nl2cache;
3074 nvlist_t *nvroot;
3075 uint64_t guid;
3076 vdev_t *vd;
3077 vdev_stat_t *vs;
3078 uint_t vsc;
3079
9babb374
BB
3080 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3081
34dc7c2f
BB
3082 if (spa->spa_l2cache.sav_count == 0)
3083 return;
3084
34dc7c2f
BB
3085 VERIFY(nvlist_lookup_nvlist(config,
3086 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3087 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3088 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3089 if (nl2cache != 0) {
3090 VERIFY(nvlist_add_nvlist_array(nvroot,
3091 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3092 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3093 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3094
3095 /*
3096 * Update level 2 cache device stats.
3097 */
3098
3099 for (i = 0; i < nl2cache; i++) {
3100 VERIFY(nvlist_lookup_uint64(l2cache[i],
3101 ZPOOL_CONFIG_GUID, &guid) == 0);
3102
3103 vd = NULL;
3104 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3105 if (guid ==
3106 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3107 vd = spa->spa_l2cache.sav_vdevs[j];
3108 break;
3109 }
3110 }
3111 ASSERT(vd != NULL);
3112
3113 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
428870ff
BB
3114 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3115 == 0);
34dc7c2f
BB
3116 vdev_get_stats(vd, vs);
3117 }
3118 }
34dc7c2f
BB
3119}
3120
9ae529ec
CS
3121static void
3122spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3123{
3124 nvlist_t *features;
3125 zap_cursor_t zc;
3126 zap_attribute_t za;
3127
3128 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3129 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3130
3131 if (spa->spa_feat_for_read_obj != 0) {
3132 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3133 spa->spa_feat_for_read_obj);
3134 zap_cursor_retrieve(&zc, &za) == 0;
3135 zap_cursor_advance(&zc)) {
3136 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3137 za.za_num_integers == 1);
3138 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3139 za.za_first_integer));
3140 }
3141 zap_cursor_fini(&zc);
3142 }
3143
3144 if (spa->spa_feat_for_write_obj != 0) {
3145 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3146 spa->spa_feat_for_write_obj);
3147 zap_cursor_retrieve(&zc, &za) == 0;
3148 zap_cursor_advance(&zc)) {
3149 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3150 za.za_num_integers == 1);
3151 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3152 za.za_first_integer));
3153 }
3154 zap_cursor_fini(&zc);
3155 }
3156
3157 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3158 features) == 0);
3159 nvlist_free(features);
3160}
3161
34dc7c2f 3162int
9ae529ec
CS
3163spa_get_stats(const char *name, nvlist_t **config,
3164 char *altroot, size_t buflen)
34dc7c2f
BB
3165{
3166 int error;
3167 spa_t *spa;
3168
3169 *config = NULL;
428870ff 3170 error = spa_open_common(name, &spa, FTAG, NULL, config);
34dc7c2f 3171
9babb374
BB
3172 if (spa != NULL) {
3173 /*
3174 * This still leaves a window of inconsistency where the spares
3175 * or l2cache devices could change and the config would be
3176 * self-inconsistent.
3177 */
3178 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f 3179
9babb374 3180 if (*config != NULL) {
572e2857
BB
3181 uint64_t loadtimes[2];
3182
3183 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3184 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3185 VERIFY(nvlist_add_uint64_array(*config,
3186 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3187
b128c09f 3188 VERIFY(nvlist_add_uint64(*config,
9babb374
BB
3189 ZPOOL_CONFIG_ERRCOUNT,
3190 spa_get_errlog_size(spa)) == 0);
3191
3192 if (spa_suspended(spa))
3193 VERIFY(nvlist_add_uint64(*config,
3194 ZPOOL_CONFIG_SUSPENDED,
3195 spa->spa_failmode) == 0);
b128c09f 3196
9babb374
BB
3197 spa_add_spares(spa, *config);
3198 spa_add_l2cache(spa, *config);
9ae529ec 3199 spa_add_feature_stats(spa, *config);
9babb374 3200 }
34dc7c2f
BB
3201 }
3202
3203 /*
3204 * We want to get the alternate root even for faulted pools, so we cheat
3205 * and call spa_lookup() directly.
3206 */
3207 if (altroot) {
3208 if (spa == NULL) {
3209 mutex_enter(&spa_namespace_lock);
3210 spa = spa_lookup(name);
3211 if (spa)
3212 spa_altroot(spa, altroot, buflen);
3213 else
3214 altroot[0] = '\0';
3215 spa = NULL;
3216 mutex_exit(&spa_namespace_lock);
3217 } else {
3218 spa_altroot(spa, altroot, buflen);
3219 }
3220 }
3221
9babb374
BB
3222 if (spa != NULL) {
3223 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 3224 spa_close(spa, FTAG);
9babb374 3225 }
34dc7c2f
BB
3226
3227 return (error);
3228}
3229
3230/*
3231 * Validate that the auxiliary device array is well formed. We must have an
3232 * array of nvlists, each which describes a valid leaf vdev. If this is an
3233 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3234 * specified, as long as they are well-formed.
3235 */
3236static int
3237spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3238 spa_aux_vdev_t *sav, const char *config, uint64_t version,
3239 vdev_labeltype_t label)
3240{
3241 nvlist_t **dev;
3242 uint_t i, ndev;
3243 vdev_t *vd;
3244 int error;
3245
b128c09f
BB
3246 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3247
34dc7c2f
BB
3248 /*
3249 * It's acceptable to have no devs specified.
3250 */
3251 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3252 return (0);
3253
3254 if (ndev == 0)
3255 return (EINVAL);
3256
3257 /*
3258 * Make sure the pool is formatted with a version that supports this
3259 * device type.
3260 */
3261 if (spa_version(spa) < version)
3262 return (ENOTSUP);
3263
3264 /*
3265 * Set the pending device list so we correctly handle device in-use
3266 * checking.
3267 */
3268 sav->sav_pending = dev;
3269 sav->sav_npending = ndev;
3270
3271 for (i = 0; i < ndev; i++) {
3272 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3273 mode)) != 0)
3274 goto out;
3275
3276 if (!vd->vdev_ops->vdev_op_leaf) {
3277 vdev_free(vd);
3278 error = EINVAL;
3279 goto out;
3280 }
3281
3282 /*
b128c09f
BB
3283 * The L2ARC currently only supports disk devices in
3284 * kernel context. For user-level testing, we allow it.
34dc7c2f 3285 */
b128c09f 3286#ifdef _KERNEL
34dc7c2f
BB
3287 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3288 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3289 error = ENOTBLK;
5ffb9d1d 3290 vdev_free(vd);
34dc7c2f
BB
3291 goto out;
3292 }
b128c09f 3293#endif
34dc7c2f
BB
3294 vd->vdev_top = vd;
3295
3296 if ((error = vdev_open(vd)) == 0 &&
3297 (error = vdev_label_init(vd, crtxg, label)) == 0) {
3298 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3299 vd->vdev_guid) == 0);
3300 }
3301
3302 vdev_free(vd);
3303
3304 if (error &&
3305 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3306 goto out;
3307 else
3308 error = 0;
3309 }
3310
3311out:
3312 sav->sav_pending = NULL;
3313 sav->sav_npending = 0;
3314 return (error);
3315}
3316
3317static int
3318spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3319{
3320 int error;
3321
b128c09f
BB
3322 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3323
34dc7c2f
BB
3324 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3325 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3326 VDEV_LABEL_SPARE)) != 0) {
3327 return (error);
3328 }
3329
3330 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3331 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3332 VDEV_LABEL_L2CACHE));
3333}
3334
3335static void
3336spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3337 const char *config)
3338{
3339 int i;
3340
3341 if (sav->sav_config != NULL) {
3342 nvlist_t **olddevs;
3343 uint_t oldndevs;
3344 nvlist_t **newdevs;
3345
3346 /*
3347 * Generate new dev list by concatentating with the
3348 * current dev list.
3349 */
3350 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3351 &olddevs, &oldndevs) == 0);
3352
3353 newdevs = kmem_alloc(sizeof (void *) *
b8d06fca 3354 (ndevs + oldndevs), KM_PUSHPAGE);
34dc7c2f
BB
3355 for (i = 0; i < oldndevs; i++)
3356 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
b8d06fca 3357 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3358 for (i = 0; i < ndevs; i++)
3359 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
b8d06fca 3360 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3361
3362 VERIFY(nvlist_remove(sav->sav_config, config,
3363 DATA_TYPE_NVLIST_ARRAY) == 0);
3364
3365 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3366 config, newdevs, ndevs + oldndevs) == 0);
3367 for (i = 0; i < oldndevs + ndevs; i++)
3368 nvlist_free(newdevs[i]);
3369 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3370 } else {
3371 /*
3372 * Generate a new dev list.
3373 */
3374 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
b8d06fca 3375 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3376 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3377 devs, ndevs) == 0);
3378 }
3379}
3380
3381/*
3382 * Stop and drop level 2 ARC devices
3383 */
3384void
3385spa_l2cache_drop(spa_t *spa)
3386{
3387 vdev_t *vd;
3388 int i;
3389 spa_aux_vdev_t *sav = &spa->spa_l2cache;
3390
3391 for (i = 0; i < sav->sav_count; i++) {
3392 uint64_t pool;
3393
3394 vd = sav->sav_vdevs[i];
3395 ASSERT(vd != NULL);
3396
fb5f0bc8
BB
3397 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3398 pool != 0ULL && l2arc_vdev_present(vd))
34dc7c2f 3399 l2arc_remove_vdev(vd);
34dc7c2f
BB
3400 }
3401}
3402
3403/*
3404 * Pool Creation
3405 */
3406int
3407spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
6f1ffb06 3408 nvlist_t *zplprops)
34dc7c2f
BB
3409{
3410 spa_t *spa;
3411 char *altroot = NULL;
3412 vdev_t *rvd;
3413 dsl_pool_t *dp;
3414 dmu_tx_t *tx;
9babb374 3415 int error = 0;
34dc7c2f
BB
3416 uint64_t txg = TXG_INITIAL;
3417 nvlist_t **spares, **l2cache;
3418 uint_t nspares, nl2cache;
428870ff 3419 uint64_t version, obj;
9ae529ec
CS
3420 boolean_t has_features;
3421 nvpair_t *elem;
d6320ddb 3422 int c;
34dc7c2f
BB
3423
3424 /*
3425 * If this pool already exists, return failure.
3426 */
3427 mutex_enter(&spa_namespace_lock);
3428 if (spa_lookup(pool) != NULL) {
3429 mutex_exit(&spa_namespace_lock);
3430 return (EEXIST);
3431 }
3432
3433 /*
3434 * Allocate a new spa_t structure.
3435 */
3436 (void) nvlist_lookup_string(props,
3437 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
428870ff 3438 spa = spa_add(pool, NULL, altroot);
fb5f0bc8 3439 spa_activate(spa, spa_mode_global);
34dc7c2f 3440
34dc7c2f 3441 if (props && (error = spa_prop_validate(spa, props))) {
34dc7c2f
BB
3442 spa_deactivate(spa);
3443 spa_remove(spa);
b128c09f 3444 mutex_exit(&spa_namespace_lock);
34dc7c2f
BB
3445 return (error);
3446 }
3447
9ae529ec
CS
3448 has_features = B_FALSE;
3449 for (elem = nvlist_next_nvpair(props, NULL);
3450 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3451 if (zpool_prop_feature(nvpair_name(elem)))
3452 has_features = B_TRUE;
3453 }
3454
3455 if (has_features || nvlist_lookup_uint64(props,
3456 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
34dc7c2f 3457 version = SPA_VERSION;
9ae529ec
CS
3458 }
3459 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
428870ff
BB
3460
3461 spa->spa_first_txg = txg;
3462 spa->spa_uberblock.ub_txg = txg - 1;
34dc7c2f
BB
3463 spa->spa_uberblock.ub_version = version;
3464 spa->spa_ubsync = spa->spa_uberblock;
3465
9babb374
BB
3466 /*
3467 * Create "The Godfather" zio to hold all async IOs
3468 */
3469 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
3470 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
3471
34dc7c2f
BB
3472 /*
3473 * Create the root vdev.
3474 */
b128c09f 3475 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
3476
3477 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3478
3479 ASSERT(error != 0 || rvd != NULL);
3480 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3481
3482 if (error == 0 && !zfs_allocatable_devs(nvroot))
3483 error = EINVAL;
3484
3485 if (error == 0 &&
3486 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3487 (error = spa_validate_aux(spa, nvroot, txg,
3488 VDEV_ALLOC_ADD)) == 0) {
d6320ddb 3489 for (c = 0; c < rvd->vdev_children; c++) {
9babb374
BB
3490 vdev_metaslab_set_size(rvd->vdev_child[c]);
3491 vdev_expand(rvd->vdev_child[c], txg);
3492 }
34dc7c2f
BB
3493 }
3494
b128c09f 3495 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3496
3497 if (error != 0) {
3498 spa_unload(spa);
3499 spa_deactivate(spa);
3500 spa_remove(spa);
3501 mutex_exit(&spa_namespace_lock);
3502 return (error);
3503 }
3504
3505 /*
3506 * Get the list of spares, if specified.
3507 */
3508 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3509 &spares, &nspares) == 0) {
3510 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
b8d06fca 3511 KM_PUSHPAGE) == 0);
34dc7c2f
BB
3512 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3513 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 3514 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3515 spa_load_spares(spa);
b128c09f 3516 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3517 spa->spa_spares.sav_sync = B_TRUE;
3518 }
3519
3520 /*
3521 * Get the list of level 2 cache devices, if specified.
3522 */
3523 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3524 &l2cache, &nl2cache) == 0) {
3525 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
b8d06fca 3526 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
3527 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3528 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 3529 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3530 spa_load_l2cache(spa);
b128c09f 3531 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3532 spa->spa_l2cache.sav_sync = B_TRUE;
3533 }
3534
9ae529ec 3535 spa->spa_is_initializing = B_TRUE;
b128c09f 3536 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
34dc7c2f 3537 spa->spa_meta_objset = dp->dp_meta_objset;
9ae529ec 3538 spa->spa_is_initializing = B_FALSE;
34dc7c2f 3539
428870ff
BB
3540 /*
3541 * Create DDTs (dedup tables).
3542 */
3543 ddt_create(spa);
3544
3545 spa_update_dspace(spa);
3546
34dc7c2f
BB
3547 tx = dmu_tx_create_assigned(dp, txg);
3548
3549 /*
3550 * Create the pool config object.
3551 */
3552 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
b128c09f 3553 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
34dc7c2f
BB
3554 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3555
3556 if (zap_add(spa->spa_meta_objset,
3557 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3558 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3559 cmn_err(CE_PANIC, "failed to add pool config");
3560 }
3561
9ae529ec
CS
3562 if (spa_version(spa) >= SPA_VERSION_FEATURES)
3563 spa_feature_create_zap_objects(spa, tx);
3564
428870ff
BB
3565 if (zap_add(spa->spa_meta_objset,
3566 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3567 sizeof (uint64_t), 1, &version, tx) != 0) {
3568 cmn_err(CE_PANIC, "failed to add pool version");
3569 }
3570
34dc7c2f
BB
3571 /* Newly created pools with the right version are always deflated. */
3572 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3573 spa->spa_deflate = TRUE;
3574 if (zap_add(spa->spa_meta_objset,
3575 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3576 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3577 cmn_err(CE_PANIC, "failed to add deflate");
3578 }
3579 }
3580
3581 /*
428870ff 3582 * Create the deferred-free bpobj. Turn off compression
34dc7c2f
BB
3583 * because sync-to-convergence takes longer if the blocksize
3584 * keeps changing.
3585 */
428870ff
BB
3586 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3587 dmu_object_set_compress(spa->spa_meta_objset, obj,
34dc7c2f 3588 ZIO_COMPRESS_OFF, tx);
34dc7c2f 3589 if (zap_add(spa->spa_meta_objset,
428870ff
BB
3590 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3591 sizeof (uint64_t), 1, &obj, tx) != 0) {
3592 cmn_err(CE_PANIC, "failed to add bpobj");
34dc7c2f 3593 }
428870ff
BB
3594 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3595 spa->spa_meta_objset, obj));
34dc7c2f
BB
3596
3597 /*
3598 * Create the pool's history object.
3599 */
3600 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3601 spa_history_create_obj(spa, tx);
3602
3603 /*
3604 * Set pool properties.
3605 */
3606 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3607 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3608 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
9babb374 3609 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
428870ff 3610
d164b209
BB
3611 if (props != NULL) {
3612 spa_configfile_set(spa, props, B_FALSE);
428870ff 3613 spa_sync_props(spa, props, tx);
d164b209 3614 }
34dc7c2f
BB
3615
3616 dmu_tx_commit(tx);
3617
3618 spa->spa_sync_on = B_TRUE;
3619 txg_sync_start(spa->spa_dsl_pool);
3620
3621 /*
3622 * We explicitly wait for the first transaction to complete so that our
3623 * bean counters are appropriately updated.
3624 */
3625 txg_wait_synced(spa->spa_dsl_pool, txg);
3626
b128c09f 3627 spa_config_sync(spa, B_FALSE, B_TRUE);
34dc7c2f 3628
6f1ffb06 3629 spa_history_log_version(spa, "create");
34dc7c2f 3630
b128c09f
BB
3631 spa->spa_minref = refcount_count(&spa->spa_refcount);
3632
d164b209
BB
3633 mutex_exit(&spa_namespace_lock);
3634
34dc7c2f
BB
3635 return (0);
3636}
3637
9babb374 3638#ifdef _KERNEL
34dc7c2f 3639/*
9babb374
BB
3640 * Get the root pool information from the root disk, then import the root pool
3641 * during the system boot up time.
34dc7c2f 3642 */
9babb374
BB
3643extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3644
3645static nvlist_t *
3646spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3647{
3648 nvlist_t *config;
3649 nvlist_t *nvtop, *nvroot;
3650 uint64_t pgid;
3651
3652 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3653 return (NULL);
3654
3655 /*
3656 * Add this top-level vdev to the child array.
3657 */
3658 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3659 &nvtop) == 0);
3660 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3661 &pgid) == 0);
3662 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3663
3664 /*
3665 * Put this pool's top-level vdevs into a root vdev.
3666 */
b8d06fca 3667 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
9babb374
BB
3668 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3669 VDEV_TYPE_ROOT) == 0);
3670 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3671 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3672 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3673 &nvtop, 1) == 0);
3674
3675 /*
3676 * Replace the existing vdev_tree with the new root vdev in
3677 * this pool's configuration (remove the old, add the new).
3678 */
3679 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3680 nvlist_free(nvroot);
3681 return (config);
3682}
3683
3684/*
3685 * Walk the vdev tree and see if we can find a device with "better"
3686 * configuration. A configuration is "better" if the label on that
3687 * device has a more recent txg.
3688 */
3689static void
3690spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3691{
d6320ddb
BB
3692 int c;
3693
3694 for (c = 0; c < vd->vdev_children; c++)
9babb374
BB
3695 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3696
3697 if (vd->vdev_ops->vdev_op_leaf) {
3698 nvlist_t *label;
3699 uint64_t label_txg;
3700
3701 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3702 &label) != 0)
3703 return;
3704
3705 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3706 &label_txg) == 0);
3707
3708 /*
3709 * Do we have a better boot device?
3710 */
3711 if (label_txg > *txg) {
3712 *txg = label_txg;
3713 *avd = vd;
3714 }
3715 nvlist_free(label);
3716 }
3717}
3718
3719/*
3720 * Import a root pool.
3721 *
3722 * For x86. devpath_list will consist of devid and/or physpath name of
3723 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3724 * The GRUB "findroot" command will return the vdev we should boot.
3725 *
3726 * For Sparc, devpath_list consists the physpath name of the booting device
3727 * no matter the rootpool is a single device pool or a mirrored pool.
3728 * e.g.
3729 * "/pci@1f,0/ide@d/disk@0,0:a"
3730 */
3731int
3732spa_import_rootpool(char *devpath, char *devid)
3733{
3734 spa_t *spa;
3735 vdev_t *rvd, *bvd, *avd = NULL;
3736 nvlist_t *config, *nvtop;
3737 uint64_t guid, txg;
3738 char *pname;
3739 int error;
3740
3741 /*
3742 * Read the label from the boot device and generate a configuration.
3743 */
428870ff
BB
3744 config = spa_generate_rootconf(devpath, devid, &guid);
3745#if defined(_OBP) && defined(_KERNEL)
3746 if (config == NULL) {
3747 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3748 /* iscsi boot */
3749 get_iscsi_bootpath_phy(devpath);
3750 config = spa_generate_rootconf(devpath, devid, &guid);
3751 }
3752 }
3753#endif
3754 if (config == NULL) {
9ae529ec 3755 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
9babb374
BB
3756 devpath);
3757 return (EIO);
3758 }
3759
3760 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3761 &pname) == 0);
3762 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3763
3764 mutex_enter(&spa_namespace_lock);
3765 if ((spa = spa_lookup(pname)) != NULL) {
3766 /*
3767 * Remove the existing root pool from the namespace so that we
3768 * can replace it with the correct config we just read in.
3769 */
3770 spa_remove(spa);
3771 }
3772
428870ff 3773 spa = spa_add(pname, config, NULL);
9babb374 3774 spa->spa_is_root = B_TRUE;
572e2857 3775 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
9babb374
BB
3776
3777 /*
3778 * Build up a vdev tree based on the boot device's label config.
3779 */
3780 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3781 &nvtop) == 0);
3782 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3783 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3784 VDEV_ALLOC_ROOTPOOL);
3785 spa_config_exit(spa, SCL_ALL, FTAG);
3786 if (error) {
3787 mutex_exit(&spa_namespace_lock);
3788 nvlist_free(config);
3789 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3790 pname);
3791 return (error);
3792 }
3793
3794 /*
3795 * Get the boot vdev.
3796 */
3797 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3798 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3799 (u_longlong_t)guid);
3800 error = ENOENT;
3801 goto out;
3802 }
3803
3804 /*
3805 * Determine if there is a better boot device.
3806 */
3807 avd = bvd;
3808 spa_alt_rootvdev(rvd, &avd, &txg);
3809 if (avd != bvd) {
3810 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3811 "try booting from '%s'", avd->vdev_path);
3812 error = EINVAL;
3813 goto out;
3814 }
3815
3816 /*
3817 * If the boot device is part of a spare vdev then ensure that
3818 * we're booting off the active spare.
3819 */
3820 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3821 !bvd->vdev_isspare) {
3822 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3823 "try booting from '%s'",
572e2857
BB
3824 bvd->vdev_parent->
3825 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
9babb374
BB
3826 error = EINVAL;
3827 goto out;
3828 }
3829
9babb374
BB
3830 error = 0;
3831out:
3832 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3833 vdev_free(rvd);
3834 spa_config_exit(spa, SCL_ALL, FTAG);
3835 mutex_exit(&spa_namespace_lock);
3836
3837 nvlist_free(config);
3838 return (error);
3839}
3840
3841#endif
3842
9babb374
BB
3843/*
3844 * Import a non-root pool into the system.
3845 */
3846int
572e2857 3847spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
34dc7c2f
BB
3848{
3849 spa_t *spa;
3850 char *altroot = NULL;
428870ff
BB
3851 spa_load_state_t state = SPA_LOAD_IMPORT;
3852 zpool_rewind_policy_t policy;
572e2857
BB
3853 uint64_t mode = spa_mode_global;
3854 uint64_t readonly = B_FALSE;
9babb374 3855 int error;
34dc7c2f
BB
3856 nvlist_t *nvroot;
3857 nvlist_t **spares, **l2cache;
3858 uint_t nspares, nl2cache;
34dc7c2f
BB
3859
3860 /*
3861 * If a pool with this name exists, return failure.
3862 */
3863 mutex_enter(&spa_namespace_lock);
428870ff 3864 if (spa_lookup(pool) != NULL) {
9babb374
BB
3865 mutex_exit(&spa_namespace_lock);
3866 return (EEXIST);
34dc7c2f
BB
3867 }
3868
3869 /*
3870 * Create and initialize the spa structure.
3871 */
3872 (void) nvlist_lookup_string(props,
3873 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
572e2857
BB
3874 (void) nvlist_lookup_uint64(props,
3875 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3876 if (readonly)
3877 mode = FREAD;
428870ff 3878 spa = spa_add(pool, config, altroot);
572e2857
BB
3879 spa->spa_import_flags = flags;
3880
3881 /*
3882 * Verbatim import - Take a pool and insert it into the namespace
3883 * as if it had been loaded at boot.
3884 */
3885 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
3886 if (props != NULL)
3887 spa_configfile_set(spa, props, B_FALSE);
3888
3889 spa_config_sync(spa, B_FALSE, B_TRUE);
3890
3891 mutex_exit(&spa_namespace_lock);
6f1ffb06 3892 spa_history_log_version(spa, "import");
572e2857
BB
3893
3894 return (0);
3895 }
3896
3897 spa_activate(spa, mode);
34dc7c2f 3898
9babb374
BB
3899 /*
3900 * Don't start async tasks until we know everything is healthy.
3901 */
3902 spa_async_suspend(spa);
b128c09f 3903
572e2857
BB
3904 zpool_get_rewind_policy(config, &policy);
3905 if (policy.zrp_request & ZPOOL_DO_REWIND)
3906 state = SPA_LOAD_RECOVER;
3907
34dc7c2f 3908 /*
9babb374
BB
3909 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
3910 * because the user-supplied config is actually the one to trust when
b128c09f 3911 * doing an import.
34dc7c2f 3912 */
428870ff
BB
3913 if (state != SPA_LOAD_RECOVER)
3914 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
572e2857 3915
428870ff
BB
3916 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
3917 policy.zrp_request);
3918
3919 /*
572e2857
BB
3920 * Propagate anything learned while loading the pool and pass it
3921 * back to caller (i.e. rewind info, missing devices, etc).
428870ff 3922 */
572e2857
BB
3923 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3924 spa->spa_load_info) == 0);
34dc7c2f 3925
b128c09f 3926 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3927 /*
9babb374
BB
3928 * Toss any existing sparelist, as it doesn't have any validity
3929 * anymore, and conflicts with spa_has_spare().
34dc7c2f 3930 */
9babb374 3931 if (spa->spa_spares.sav_config) {
34dc7c2f
BB
3932 nvlist_free(spa->spa_spares.sav_config);
3933 spa->spa_spares.sav_config = NULL;
3934 spa_load_spares(spa);
3935 }
9babb374 3936 if (spa->spa_l2cache.sav_config) {
34dc7c2f
BB
3937 nvlist_free(spa->spa_l2cache.sav_config);
3938 spa->spa_l2cache.sav_config = NULL;
3939 spa_load_l2cache(spa);
3940 }
3941
3942 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3943 &nvroot) == 0);
3944 if (error == 0)
9babb374
BB
3945 error = spa_validate_aux(spa, nvroot, -1ULL,
3946 VDEV_ALLOC_SPARE);
34dc7c2f
BB
3947 if (error == 0)
3948 error = spa_validate_aux(spa, nvroot, -1ULL,
3949 VDEV_ALLOC_L2CACHE);
b128c09f 3950 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f 3951
d164b209
BB
3952 if (props != NULL)
3953 spa_configfile_set(spa, props, B_FALSE);
3954
fb5f0bc8
BB
3955 if (error != 0 || (props && spa_writeable(spa) &&
3956 (error = spa_prop_set(spa, props)))) {
9babb374
BB
3957 spa_unload(spa);
3958 spa_deactivate(spa);
3959 spa_remove(spa);
34dc7c2f
BB
3960 mutex_exit(&spa_namespace_lock);
3961 return (error);
3962 }
3963
572e2857
BB
3964 spa_async_resume(spa);
3965
34dc7c2f
BB
3966 /*
3967 * Override any spares and level 2 cache devices as specified by
3968 * the user, as these may have correct device names/devids, etc.
3969 */
3970 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3971 &spares, &nspares) == 0) {
3972 if (spa->spa_spares.sav_config)
3973 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
3974 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
3975 else
3976 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
b8d06fca 3977 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
3978 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3979 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
b128c09f 3980 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3981 spa_load_spares(spa);
b128c09f 3982 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3983 spa->spa_spares.sav_sync = B_TRUE;
3984 }
3985 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3986 &l2cache, &nl2cache) == 0) {
3987 if (spa->spa_l2cache.sav_config)
3988 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
3989 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
3990 else
3991 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
b8d06fca 3992 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
3993 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3994 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
b128c09f 3995 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 3996 spa_load_l2cache(spa);
b128c09f 3997 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
3998 spa->spa_l2cache.sav_sync = B_TRUE;
3999 }
4000
428870ff
BB
4001 /*
4002 * Check for any removed devices.
4003 */
4004 if (spa->spa_autoreplace) {
4005 spa_aux_check_removed(&spa->spa_spares);
4006 spa_aux_check_removed(&spa->spa_l2cache);
4007 }
4008
fb5f0bc8 4009 if (spa_writeable(spa)) {
b128c09f
BB
4010 /*
4011 * Update the config cache to include the newly-imported pool.
4012 */
45d1cae3 4013 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
b128c09f 4014 }
34dc7c2f 4015
34dc7c2f 4016 /*
9babb374
BB
4017 * It's possible that the pool was expanded while it was exported.
4018 * We kick off an async task to handle this for us.
34dc7c2f 4019 */
9babb374 4020 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
b128c09f 4021
9babb374 4022 mutex_exit(&spa_namespace_lock);
6f1ffb06 4023 spa_history_log_version(spa, "import");
b128c09f 4024
526af785
PJD
4025#ifdef _KERNEL
4026 zvol_create_minors(pool);
4027#endif
4028
b128c09f
BB
4029 return (0);
4030}
4031
34dc7c2f
BB
4032nvlist_t *
4033spa_tryimport(nvlist_t *tryconfig)
4034{
4035 nvlist_t *config = NULL;
4036 char *poolname;
4037 spa_t *spa;
4038 uint64_t state;
d164b209 4039 int error;
34dc7c2f
BB
4040
4041 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4042 return (NULL);
4043
4044 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4045 return (NULL);
4046
4047 /*
4048 * Create and initialize the spa structure.
4049 */
4050 mutex_enter(&spa_namespace_lock);
428870ff 4051 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
fb5f0bc8 4052 spa_activate(spa, FREAD);
34dc7c2f
BB
4053
4054 /*
4055 * Pass off the heavy lifting to spa_load().
4056 * Pass TRUE for mosconfig because the user-supplied config
4057 * is actually the one to trust when doing an import.
4058 */
428870ff 4059 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
34dc7c2f
BB
4060
4061 /*
4062 * If 'tryconfig' was at least parsable, return the current config.
4063 */
4064 if (spa->spa_root_vdev != NULL) {
34dc7c2f 4065 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
34dc7c2f
BB
4066 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4067 poolname) == 0);
4068 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4069 state) == 0);
4070 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4071 spa->spa_uberblock.ub_timestamp) == 0);
9ae529ec
CS
4072 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4073 spa->spa_load_info) == 0);
34dc7c2f
BB
4074
4075 /*
4076 * If the bootfs property exists on this pool then we
4077 * copy it out so that external consumers can tell which
4078 * pools are bootable.
4079 */
d164b209 4080 if ((!error || error == EEXIST) && spa->spa_bootfs) {
b8d06fca 4081 char *tmpname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
34dc7c2f
BB
4082
4083 /*
4084 * We have to play games with the name since the
4085 * pool was opened as TRYIMPORT_NAME.
4086 */
b128c09f 4087 if (dsl_dsobj_to_dsname(spa_name(spa),
34dc7c2f
BB
4088 spa->spa_bootfs, tmpname) == 0) {
4089 char *cp;
b8d06fca 4090 char *dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
34dc7c2f
BB
4091
4092 cp = strchr(tmpname, '/');
4093 if (cp == NULL) {
4094 (void) strlcpy(dsname, tmpname,
4095 MAXPATHLEN);
4096 } else {
4097 (void) snprintf(dsname, MAXPATHLEN,
4098 "%s/%s", poolname, ++cp);
4099 }
4100 VERIFY(nvlist_add_string(config,
4101 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4102 kmem_free(dsname, MAXPATHLEN);
4103 }
4104 kmem_free(tmpname, MAXPATHLEN);
4105 }
4106
4107 /*
4108 * Add the list of hot spares and level 2 cache devices.
4109 */
9babb374 4110 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
4111 spa_add_spares(spa, config);
4112 spa_add_l2cache(spa, config);
9babb374 4113 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f
BB
4114 }
4115
4116 spa_unload(spa);
4117 spa_deactivate(spa);
4118 spa_remove(spa);
4119 mutex_exit(&spa_namespace_lock);
4120
4121 return (config);
4122}
4123
4124/*
4125 * Pool export/destroy
4126 *
4127 * The act of destroying or exporting a pool is very simple. We make sure there
4128 * is no more pending I/O and any references to the pool are gone. Then, we
4129 * update the pool state and sync all the labels to disk, removing the
fb5f0bc8
BB
4130 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4131 * we don't sync the labels or remove the configuration cache.
34dc7c2f
BB
4132 */
4133static int
b128c09f 4134spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
fb5f0bc8 4135 boolean_t force, boolean_t hardforce)
34dc7c2f
BB
4136{
4137 spa_t *spa;
4138
4139 if (oldconfig)
4140 *oldconfig = NULL;
4141
fb5f0bc8 4142 if (!(spa_mode_global & FWRITE))
34dc7c2f
BB
4143 return (EROFS);
4144
4145 mutex_enter(&spa_namespace_lock);
4146 if ((spa = spa_lookup(pool)) == NULL) {
4147 mutex_exit(&spa_namespace_lock);
4148 return (ENOENT);
4149 }
4150
4151 /*
4152 * Put a hold on the pool, drop the namespace lock, stop async tasks,
4153 * reacquire the namespace lock, and see if we can export.
4154 */
4155 spa_open_ref(spa, FTAG);
4156 mutex_exit(&spa_namespace_lock);
4157 spa_async_suspend(spa);
4158 mutex_enter(&spa_namespace_lock);
4159 spa_close(spa, FTAG);
4160
4161 /*
4162 * The pool will be in core if it's openable,
4163 * in which case we can modify its state.
4164 */
4165 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4166 /*
4167 * Objsets may be open only because they're dirty, so we
4168 * have to force it to sync before checking spa_refcnt.
4169 */
34dc7c2f
BB
4170 txg_wait_synced(spa->spa_dsl_pool, 0);
4171
4172 /*
4173 * A pool cannot be exported or destroyed if there are active
4174 * references. If we are resetting a pool, allow references by
4175 * fault injection handlers.
4176 */
4177 if (!spa_refcount_zero(spa) ||
4178 (spa->spa_inject_ref != 0 &&
4179 new_state != POOL_STATE_UNINITIALIZED)) {
34dc7c2f
BB
4180 spa_async_resume(spa);
4181 mutex_exit(&spa_namespace_lock);
4182 return (EBUSY);
4183 }
4184
b128c09f
BB
4185 /*
4186 * A pool cannot be exported if it has an active shared spare.
4187 * This is to prevent other pools stealing the active spare
4188 * from an exported pool. At user's own will, such pool can
4189 * be forcedly exported.
4190 */
4191 if (!force && new_state == POOL_STATE_EXPORTED &&
4192 spa_has_active_shared_spare(spa)) {
4193 spa_async_resume(spa);
4194 mutex_exit(&spa_namespace_lock);
4195 return (EXDEV);
4196 }
34dc7c2f
BB
4197
4198 /*
4199 * We want this to be reflected on every label,
4200 * so mark them all dirty. spa_unload() will do the
4201 * final sync that pushes these changes out.
4202 */
fb5f0bc8 4203 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
b128c09f 4204 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f 4205 spa->spa_state = new_state;
428870ff
BB
4206 spa->spa_final_txg = spa_last_synced_txg(spa) +
4207 TXG_DEFER_SIZE + 1;
34dc7c2f 4208 vdev_config_dirty(spa->spa_root_vdev);
b128c09f 4209 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
4210 }
4211 }
4212
26685276 4213 spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_DESTROY);
34dc7c2f
BB
4214
4215 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4216 spa_unload(spa);
4217 spa_deactivate(spa);
4218 }
4219
4220 if (oldconfig && spa->spa_config)
4221 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4222
4223 if (new_state != POOL_STATE_UNINITIALIZED) {
fb5f0bc8
BB
4224 if (!hardforce)
4225 spa_config_sync(spa, B_TRUE, B_TRUE);
34dc7c2f 4226 spa_remove(spa);
34dc7c2f
BB
4227 }
4228 mutex_exit(&spa_namespace_lock);
4229
4230 return (0);
4231}
4232
4233/*
4234 * Destroy a storage pool.
4235 */
4236int
4237spa_destroy(char *pool)
4238{
fb5f0bc8
BB
4239 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4240 B_FALSE, B_FALSE));
34dc7c2f
BB
4241}
4242
4243/*
4244 * Export a storage pool.
4245 */
4246int
fb5f0bc8
BB
4247spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4248 boolean_t hardforce)
34dc7c2f 4249{
fb5f0bc8
BB
4250 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4251 force, hardforce));
34dc7c2f
BB
4252}
4253
4254/*
4255 * Similar to spa_export(), this unloads the spa_t without actually removing it
4256 * from the namespace in any way.
4257 */
4258int
4259spa_reset(char *pool)
4260{
b128c09f 4261 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
fb5f0bc8 4262 B_FALSE, B_FALSE));
34dc7c2f
BB
4263}
4264
34dc7c2f
BB
4265/*
4266 * ==========================================================================
4267 * Device manipulation
4268 * ==========================================================================
4269 */
4270
4271/*
4272 * Add a device to a storage pool.
4273 */
4274int
4275spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4276{
428870ff 4277 uint64_t txg, id;
fb5f0bc8 4278 int error;
34dc7c2f
BB
4279 vdev_t *rvd = spa->spa_root_vdev;
4280 vdev_t *vd, *tvd;
4281 nvlist_t **spares, **l2cache;
4282 uint_t nspares, nl2cache;
d6320ddb 4283 int c;
34dc7c2f 4284
572e2857
BB
4285 ASSERT(spa_writeable(spa));
4286
34dc7c2f
BB
4287 txg = spa_vdev_enter(spa);
4288
4289 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4290 VDEV_ALLOC_ADD)) != 0)
4291 return (spa_vdev_exit(spa, NULL, txg, error));
4292
b128c09f 4293 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
34dc7c2f
BB
4294
4295 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4296 &nspares) != 0)
4297 nspares = 0;
4298
4299 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4300 &nl2cache) != 0)
4301 nl2cache = 0;
4302
b128c09f 4303 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
34dc7c2f 4304 return (spa_vdev_exit(spa, vd, txg, EINVAL));
34dc7c2f 4305
b128c09f
BB
4306 if (vd->vdev_children != 0 &&
4307 (error = vdev_create(vd, txg, B_FALSE)) != 0)
4308 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4309
4310 /*
4311 * We must validate the spares and l2cache devices after checking the
4312 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
4313 */
b128c09f 4314 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
34dc7c2f 4315 return (spa_vdev_exit(spa, vd, txg, error));
34dc7c2f
BB
4316
4317 /*
4318 * Transfer each new top-level vdev from vd to rvd.
4319 */
d6320ddb 4320 for (c = 0; c < vd->vdev_children; c++) {
428870ff
BB
4321
4322 /*
4323 * Set the vdev id to the first hole, if one exists.
4324 */
4325 for (id = 0; id < rvd->vdev_children; id++) {
4326 if (rvd->vdev_child[id]->vdev_ishole) {
4327 vdev_free(rvd->vdev_child[id]);
4328 break;
4329 }
4330 }
34dc7c2f
BB
4331 tvd = vd->vdev_child[c];
4332 vdev_remove_child(vd, tvd);
428870ff 4333 tvd->vdev_id = id;
34dc7c2f
BB
4334 vdev_add_child(rvd, tvd);
4335 vdev_config_dirty(tvd);
4336 }
4337
4338 if (nspares != 0) {
4339 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4340 ZPOOL_CONFIG_SPARES);
4341 spa_load_spares(spa);
4342 spa->spa_spares.sav_sync = B_TRUE;
4343 }
4344
4345 if (nl2cache != 0) {
4346 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4347 ZPOOL_CONFIG_L2CACHE);
4348 spa_load_l2cache(spa);
4349 spa->spa_l2cache.sav_sync = B_TRUE;
4350 }
4351
4352 /*
4353 * We have to be careful when adding new vdevs to an existing pool.
4354 * If other threads start allocating from these vdevs before we
4355 * sync the config cache, and we lose power, then upon reboot we may
4356 * fail to open the pool because there are DVAs that the config cache
4357 * can't translate. Therefore, we first add the vdevs without
4358 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4359 * and then let spa_config_update() initialize the new metaslabs.
4360 *
4361 * spa_load() checks for added-but-not-initialized vdevs, so that
4362 * if we lose power at any point in this sequence, the remaining
4363 * steps will be completed the next time we load the pool.
4364 */
4365 (void) spa_vdev_exit(spa, vd, txg, 0);
4366
4367 mutex_enter(&spa_namespace_lock);
4368 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4369 mutex_exit(&spa_namespace_lock);
4370
4371 return (0);
4372}
4373
4374/*
4375 * Attach a device to a mirror. The arguments are the path to any device
4376 * in the mirror, and the nvroot for the new device. If the path specifies
4377 * a device that is not mirrored, we automatically insert the mirror vdev.
4378 *
4379 * If 'replacing' is specified, the new device is intended to replace the
4380 * existing device; in this case the two devices are made into their own
4381 * mirror using the 'replacing' vdev, which is functionally identical to
4382 * the mirror vdev (it actually reuses all the same ops) but has a few
4383 * extra rules: you can't attach to it after it's been created, and upon
4384 * completion of resilvering, the first disk (the one being replaced)
4385 * is automatically detached.
4386 */
4387int
4388spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4389{
428870ff 4390 uint64_t txg, dtl_max_txg;
1fde1e37 4391 ASSERTV(vdev_t *rvd = spa->spa_root_vdev;)
34dc7c2f
BB
4392 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4393 vdev_ops_t *pvops;
b128c09f
BB
4394 char *oldvdpath, *newvdpath;
4395 int newvd_isspare;
4396 int error;
34dc7c2f 4397
572e2857
BB
4398 ASSERT(spa_writeable(spa));
4399
34dc7c2f
BB
4400 txg = spa_vdev_enter(spa);
4401
b128c09f 4402 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4403
4404 if (oldvd == NULL)
4405 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4406
4407 if (!oldvd->vdev_ops->vdev_op_leaf)
4408 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4409
4410 pvd = oldvd->vdev_parent;
4411
4412 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
5ffb9d1d 4413 VDEV_ALLOC_ATTACH)) != 0)
34dc7c2f
BB
4414 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4415
4416 if (newrootvd->vdev_children != 1)
4417 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4418
4419 newvd = newrootvd->vdev_child[0];
4420
4421 if (!newvd->vdev_ops->vdev_op_leaf)
4422 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4423
4424 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4425 return (spa_vdev_exit(spa, newrootvd, txg, error));
4426
4427 /*
4428 * Spares can't replace logs
4429 */
b128c09f 4430 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
34dc7c2f
BB
4431 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4432
4433 if (!replacing) {
4434 /*
4435 * For attach, the only allowable parent is a mirror or the root
4436 * vdev.
4437 */
4438 if (pvd->vdev_ops != &vdev_mirror_ops &&
4439 pvd->vdev_ops != &vdev_root_ops)
4440 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4441
4442 pvops = &vdev_mirror_ops;
4443 } else {
4444 /*
4445 * Active hot spares can only be replaced by inactive hot
4446 * spares.
4447 */
4448 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857 4449 oldvd->vdev_isspare &&
34dc7c2f
BB
4450 !spa_has_spare(spa, newvd->vdev_guid))
4451 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4452
4453 /*
4454 * If the source is a hot spare, and the parent isn't already a
4455 * spare, then we want to create a new hot spare. Otherwise, we
4456 * want to create a replacing vdev. The user is not allowed to
4457 * attach to a spared vdev child unless the 'isspare' state is
4458 * the same (spare replaces spare, non-spare replaces
4459 * non-spare).
4460 */
572e2857
BB
4461 if (pvd->vdev_ops == &vdev_replacing_ops &&
4462 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
34dc7c2f 4463 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4464 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4465 newvd->vdev_isspare != oldvd->vdev_isspare) {
34dc7c2f 4466 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
572e2857
BB
4467 }
4468
4469 if (newvd->vdev_isspare)
34dc7c2f
BB
4470 pvops = &vdev_spare_ops;
4471 else
4472 pvops = &vdev_replacing_ops;
4473 }
4474
4475 /*
9babb374 4476 * Make sure the new device is big enough.
34dc7c2f 4477 */
9babb374 4478 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
34dc7c2f
BB
4479 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4480
4481 /*
4482 * The new device cannot have a higher alignment requirement
4483 * than the top-level vdev.
4484 */
4485 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4486 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4487
4488 /*
4489 * If this is an in-place replacement, update oldvd's path and devid
4490 * to make it distinguishable from newvd, and unopenable from now on.
4491 */
4492 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4493 spa_strfree(oldvd->vdev_path);
4494 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
b8d06fca 4495 KM_PUSHPAGE);
34dc7c2f
BB
4496 (void) sprintf(oldvd->vdev_path, "%s/%s",
4497 newvd->vdev_path, "old");
4498 if (oldvd->vdev_devid != NULL) {
4499 spa_strfree(oldvd->vdev_devid);
4500 oldvd->vdev_devid = NULL;
4501 }
4502 }
4503
572e2857
BB
4504 /* mark the device being resilvered */
4505 newvd->vdev_resilvering = B_TRUE;
4506
34dc7c2f
BB
4507 /*
4508 * If the parent is not a mirror, or if we're replacing, insert the new
4509 * mirror/replacing/spare vdev above oldvd.
4510 */
4511 if (pvd->vdev_ops != pvops)
4512 pvd = vdev_add_parent(oldvd, pvops);
4513
4514 ASSERT(pvd->vdev_top->vdev_parent == rvd);
4515 ASSERT(pvd->vdev_ops == pvops);
4516 ASSERT(oldvd->vdev_parent == pvd);
4517
4518 /*
4519 * Extract the new device from its root and add it to pvd.
4520 */
4521 vdev_remove_child(newrootvd, newvd);
4522 newvd->vdev_id = pvd->vdev_children;
428870ff 4523 newvd->vdev_crtxg = oldvd->vdev_crtxg;
34dc7c2f
BB
4524 vdev_add_child(pvd, newvd);
4525
34dc7c2f
BB
4526 tvd = newvd->vdev_top;
4527 ASSERT(pvd->vdev_top == tvd);
4528 ASSERT(tvd->vdev_parent == rvd);
4529
4530 vdev_config_dirty(tvd);
4531
4532 /*
428870ff
BB
4533 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4534 * for any dmu_sync-ed blocks. It will propagate upward when
4535 * spa_vdev_exit() calls vdev_dtl_reassess().
34dc7c2f 4536 */
428870ff 4537 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
34dc7c2f 4538
428870ff
BB
4539 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4540 dtl_max_txg - TXG_INITIAL);
34dc7c2f 4541
9babb374 4542 if (newvd->vdev_isspare) {
34dc7c2f 4543 spa_spare_activate(newvd);
26685276 4544 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_DEVICE_SPARE);
9babb374
BB
4545 }
4546
b128c09f
BB
4547 oldvdpath = spa_strdup(oldvd->vdev_path);
4548 newvdpath = spa_strdup(newvd->vdev_path);
4549 newvd_isspare = newvd->vdev_isspare;
34dc7c2f
BB
4550
4551 /*
4552 * Mark newvd's DTL dirty in this txg.
4553 */
4554 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4555
428870ff
BB
4556 /*
4557 * Restart the resilver
4558 */
4559 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4560
4561 /*
4562 * Commit the config
4563 */
4564 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
34dc7c2f 4565
6f1ffb06 4566 spa_history_log_internal(spa, "vdev attach", NULL,
428870ff 4567 "%s vdev=%s %s vdev=%s",
45d1cae3
BB
4568 replacing && newvd_isspare ? "spare in" :
4569 replacing ? "replace" : "attach", newvdpath,
4570 replacing ? "for" : "to", oldvdpath);
b128c09f
BB
4571
4572 spa_strfree(oldvdpath);
4573 spa_strfree(newvdpath);
4574
572e2857 4575 if (spa->spa_bootfs)
26685276 4576 spa_event_notify(spa, newvd, FM_EREPORT_ZFS_BOOTFS_VDEV_ATTACH);
572e2857 4577
34dc7c2f
BB
4578 return (0);
4579}
4580
4581/*
4582 * Detach a device from a mirror or replacing vdev.
4583 * If 'replace_done' is specified, only detach if the parent
4584 * is a replacing vdev.
4585 */
4586int
fb5f0bc8 4587spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
34dc7c2f
BB
4588{
4589 uint64_t txg;
fb5f0bc8 4590 int error;
1fde1e37 4591 ASSERTV(vdev_t *rvd = spa->spa_root_vdev;)
34dc7c2f
BB
4592 vdev_t *vd, *pvd, *cvd, *tvd;
4593 boolean_t unspare = B_FALSE;
d4ed6673 4594 uint64_t unspare_guid = 0;
428870ff 4595 char *vdpath;
d6320ddb 4596 int c, t;
34dc7c2f 4597
572e2857
BB
4598 ASSERT(spa_writeable(spa));
4599
34dc7c2f
BB
4600 txg = spa_vdev_enter(spa);
4601
b128c09f 4602 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
4603
4604 if (vd == NULL)
4605 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4606
4607 if (!vd->vdev_ops->vdev_op_leaf)
4608 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4609
4610 pvd = vd->vdev_parent;
4611
fb5f0bc8
BB
4612 /*
4613 * If the parent/child relationship is not as expected, don't do it.
4614 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4615 * vdev that's replacing B with C. The user's intent in replacing
4616 * is to go from M(A,B) to M(A,C). If the user decides to cancel
4617 * the replace by detaching C, the expected behavior is to end up
4618 * M(A,B). But suppose that right after deciding to detach C,
4619 * the replacement of B completes. We would have M(A,C), and then
4620 * ask to detach C, which would leave us with just A -- not what
4621 * the user wanted. To prevent this, we make sure that the
4622 * parent/child relationship hasn't changed -- in this example,
4623 * that C's parent is still the replacing vdev R.
4624 */
4625 if (pvd->vdev_guid != pguid && pguid != 0)
4626 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4627
34dc7c2f 4628 /*
572e2857 4629 * Only 'replacing' or 'spare' vdevs can be replaced.
34dc7c2f 4630 */
572e2857
BB
4631 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4632 pvd->vdev_ops != &vdev_spare_ops)
4633 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
34dc7c2f
BB
4634
4635 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4636 spa_version(spa) >= SPA_VERSION_SPARES);
4637
4638 /*
4639 * Only mirror, replacing, and spare vdevs support detach.
4640 */
4641 if (pvd->vdev_ops != &vdev_replacing_ops &&
4642 pvd->vdev_ops != &vdev_mirror_ops &&
4643 pvd->vdev_ops != &vdev_spare_ops)
4644 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4645
4646 /*
fb5f0bc8
BB
4647 * If this device has the only valid copy of some data,
4648 * we cannot safely detach it.
34dc7c2f 4649 */
fb5f0bc8 4650 if (vdev_dtl_required(vd))
34dc7c2f
BB
4651 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4652
fb5f0bc8 4653 ASSERT(pvd->vdev_children >= 2);
34dc7c2f 4654
b128c09f
BB
4655 /*
4656 * If we are detaching the second disk from a replacing vdev, then
4657 * check to see if we changed the original vdev's path to have "/old"
4658 * at the end in spa_vdev_attach(). If so, undo that change now.
4659 */
572e2857
BB
4660 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4661 vd->vdev_path != NULL) {
4662 size_t len = strlen(vd->vdev_path);
4663
d6320ddb 4664 for (c = 0; c < pvd->vdev_children; c++) {
572e2857
BB
4665 cvd = pvd->vdev_child[c];
4666
4667 if (cvd == vd || cvd->vdev_path == NULL)
4668 continue;
4669
4670 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4671 strcmp(cvd->vdev_path + len, "/old") == 0) {
4672 spa_strfree(cvd->vdev_path);
4673 cvd->vdev_path = spa_strdup(vd->vdev_path);
4674 break;
4675 }
b128c09f
BB
4676 }
4677 }
4678
34dc7c2f
BB
4679 /*
4680 * If we are detaching the original disk from a spare, then it implies
4681 * that the spare should become a real disk, and be removed from the
4682 * active spare list for the pool.
4683 */
4684 if (pvd->vdev_ops == &vdev_spare_ops &&
572e2857
BB
4685 vd->vdev_id == 0 &&
4686 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
34dc7c2f
BB
4687 unspare = B_TRUE;
4688
4689 /*
4690 * Erase the disk labels so the disk can be used for other things.
4691 * This must be done after all other error cases are handled,
4692 * but before we disembowel vd (so we can still do I/O to it).
4693 * But if we can't do it, don't treat the error as fatal --
4694 * it may be that the unwritability of the disk is the reason
4695 * it's being detached!
4696 */
4697 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4698
4699 /*
4700 * Remove vd from its parent and compact the parent's children.
4701 */
4702 vdev_remove_child(pvd, vd);
4703 vdev_compact_children(pvd);
4704
4705 /*
4706 * Remember one of the remaining children so we can get tvd below.
4707 */
572e2857 4708 cvd = pvd->vdev_child[pvd->vdev_children - 1];
34dc7c2f
BB
4709
4710 /*
4711 * If we need to remove the remaining child from the list of hot spares,
fb5f0bc8
BB
4712 * do it now, marking the vdev as no longer a spare in the process.
4713 * We must do this before vdev_remove_parent(), because that can
4714 * change the GUID if it creates a new toplevel GUID. For a similar
4715 * reason, we must remove the spare now, in the same txg as the detach;
4716 * otherwise someone could attach a new sibling, change the GUID, and
4717 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
34dc7c2f
BB
4718 */
4719 if (unspare) {
4720 ASSERT(cvd->vdev_isspare);
4721 spa_spare_remove(cvd);
4722 unspare_guid = cvd->vdev_guid;
fb5f0bc8 4723 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
572e2857 4724 cvd->vdev_unspare = B_TRUE;
34dc7c2f
BB
4725 }
4726
428870ff
BB
4727 /*
4728 * If the parent mirror/replacing vdev only has one child,
4729 * the parent is no longer needed. Remove it from the tree.
4730 */
572e2857
BB
4731 if (pvd->vdev_children == 1) {
4732 if (pvd->vdev_ops == &vdev_spare_ops)
4733 cvd->vdev_unspare = B_FALSE;
428870ff 4734 vdev_remove_parent(cvd);
572e2857
BB
4735 cvd->vdev_resilvering = B_FALSE;
4736 }
4737
428870ff
BB
4738
4739 /*
4740 * We don't set tvd until now because the parent we just removed
4741 * may have been the previous top-level vdev.
4742 */
4743 tvd = cvd->vdev_top;
4744 ASSERT(tvd->vdev_parent == rvd);
4745
4746 /*
4747 * Reevaluate the parent vdev state.
4748 */
4749 vdev_propagate_state(cvd);
4750
4751 /*
4752 * If the 'autoexpand' property is set on the pool then automatically
4753 * try to expand the size of the pool. For example if the device we
4754 * just detached was smaller than the others, it may be possible to
4755 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4756 * first so that we can obtain the updated sizes of the leaf vdevs.
4757 */
4758 if (spa->spa_autoexpand) {
4759 vdev_reopen(tvd);
4760 vdev_expand(tvd, txg);
4761 }
4762
4763 vdev_config_dirty(tvd);
4764
4765 /*
4766 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
4767 * vd->vdev_detached is set and free vd's DTL object in syncing context.
4768 * But first make sure we're not on any *other* txg's DTL list, to
4769 * prevent vd from being accessed after it's freed.
4770 */
4771 vdpath = spa_strdup(vd->vdev_path);
d6320ddb 4772 for (t = 0; t < TXG_SIZE; t++)
428870ff
BB
4773 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4774 vd->vdev_detached = B_TRUE;
4775 vdev_dirty(tvd, VDD_DTL, vd, txg);
4776
26685276 4777 spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_REMOVE);
428870ff 4778
572e2857
BB
4779 /* hang on to the spa before we release the lock */
4780 spa_open_ref(spa, FTAG);
4781
428870ff
BB
4782 error = spa_vdev_exit(spa, vd, txg, 0);
4783
6f1ffb06 4784 spa_history_log_internal(spa, "detach", NULL,
428870ff
BB
4785 "vdev=%s", vdpath);
4786 spa_strfree(vdpath);
4787
4788 /*
4789 * If this was the removal of the original device in a hot spare vdev,
4790 * then we want to go through and remove the device from the hot spare
4791 * list of every other pool.
4792 */
4793 if (unspare) {
572e2857
BB
4794 spa_t *altspa = NULL;
4795
428870ff 4796 mutex_enter(&spa_namespace_lock);
572e2857
BB
4797 while ((altspa = spa_next(altspa)) != NULL) {
4798 if (altspa->spa_state != POOL_STATE_ACTIVE ||
4799 altspa == spa)
428870ff 4800 continue;
572e2857
BB
4801
4802 spa_open_ref(altspa, FTAG);
428870ff 4803 mutex_exit(&spa_namespace_lock);
572e2857 4804 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
428870ff 4805 mutex_enter(&spa_namespace_lock);
572e2857 4806 spa_close(altspa, FTAG);
428870ff
BB
4807 }
4808 mutex_exit(&spa_namespace_lock);
572e2857
BB
4809
4810 /* search the rest of the vdevs for spares to remove */
4811 spa_vdev_resilver_done(spa);
428870ff
BB
4812 }
4813
572e2857
BB
4814 /* all done with the spa; OK to release */
4815 mutex_enter(&spa_namespace_lock);
4816 spa_close(spa, FTAG);
4817 mutex_exit(&spa_namespace_lock);
4818
428870ff
BB
4819 return (error);
4820}
4821
4822/*
4823 * Split a set of devices from their mirrors, and create a new pool from them.
4824 */
4825int
4826spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4827 nvlist_t *props, boolean_t exp)
4828{
4829 int error = 0;
4830 uint64_t txg, *glist;
4831 spa_t *newspa;
4832 uint_t c, children, lastlog;
4833 nvlist_t **child, *nvl, *tmp;
4834 dmu_tx_t *tx;
4835 char *altroot = NULL;
4836 vdev_t *rvd, **vml = NULL; /* vdev modify list */
4837 boolean_t activate_slog;
4838
572e2857 4839 ASSERT(spa_writeable(spa));
428870ff
BB
4840
4841 txg = spa_vdev_enter(spa);
4842
4843 /* clear the log and flush everything up to now */
4844 activate_slog = spa_passivate_log(spa);
4845 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4846 error = spa_offline_log(spa);
4847 txg = spa_vdev_config_enter(spa);
4848
4849 if (activate_slog)
4850 spa_activate_log(spa);
4851
4852 if (error != 0)
4853 return (spa_vdev_exit(spa, NULL, txg, error));
4854
4855 /* check new spa name before going any further */
4856 if (spa_lookup(newname) != NULL)
4857 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
4858
4859 /*
4860 * scan through all the children to ensure they're all mirrors
4861 */
4862 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
4863 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
4864 &children) != 0)
4865 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4866
4867 /* first, check to ensure we've got the right child count */
4868 rvd = spa->spa_root_vdev;
4869 lastlog = 0;
4870 for (c = 0; c < rvd->vdev_children; c++) {
4871 vdev_t *vd = rvd->vdev_child[c];
4872
4873 /* don't count the holes & logs as children */
4874 if (vd->vdev_islog || vd->vdev_ishole) {
4875 if (lastlog == 0)
4876 lastlog = c;
4877 continue;
4878 }
4879
4880 lastlog = 0;
4881 }
4882 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
4883 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4884
4885 /* next, ensure no spare or cache devices are part of the split */
4886 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
4887 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
4888 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4889
b8d06fca
RY
4890 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_PUSHPAGE);
4891 glist = kmem_zalloc(children * sizeof (uint64_t), KM_PUSHPAGE);
428870ff
BB
4892
4893 /* then, loop over each vdev and validate it */
4894 for (c = 0; c < children; c++) {
4895 uint64_t is_hole = 0;
4896
4897 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4898 &is_hole);
4899
4900 if (is_hole != 0) {
4901 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
4902 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
4903 continue;
4904 } else {
4905 error = EINVAL;
4906 break;
4907 }
4908 }
4909
4910 /* which disk is going to be split? */
4911 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4912 &glist[c]) != 0) {
4913 error = EINVAL;
4914 break;
4915 }
4916
4917 /* look it up in the spa */
4918 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
4919 if (vml[c] == NULL) {
4920 error = ENODEV;
4921 break;
4922 }
4923
4924 /* make sure there's nothing stopping the split */
4925 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
4926 vml[c]->vdev_islog ||
4927 vml[c]->vdev_ishole ||
4928 vml[c]->vdev_isspare ||
4929 vml[c]->vdev_isl2cache ||
4930 !vdev_writeable(vml[c]) ||
4931 vml[c]->vdev_children != 0 ||
4932 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
4933 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
4934 error = EINVAL;
4935 break;
4936 }
4937
4938 if (vdev_dtl_required(vml[c])) {
4939 error = EBUSY;
4940 break;
4941 }
4942
4943 /* we need certain info from the top level */
4944 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
4945 vml[c]->vdev_top->vdev_ms_array) == 0);
4946 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
4947 vml[c]->vdev_top->vdev_ms_shift) == 0);
4948 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
4949 vml[c]->vdev_top->vdev_asize) == 0);
4950 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
4951 vml[c]->vdev_top->vdev_ashift) == 0);
4952 }
4953
4954 if (error != 0) {
4955 kmem_free(vml, children * sizeof (vdev_t *));
4956 kmem_free(glist, children * sizeof (uint64_t));
4957 return (spa_vdev_exit(spa, NULL, txg, error));
4958 }
4959
4960 /* stop writers from using the disks */
4961 for (c = 0; c < children; c++) {
4962 if (vml[c] != NULL)
4963 vml[c]->vdev_offline = B_TRUE;
4964 }
4965 vdev_reopen(spa->spa_root_vdev);
34dc7c2f
BB
4966
4967 /*
428870ff
BB
4968 * Temporarily record the splitting vdevs in the spa config. This
4969 * will disappear once the config is regenerated.
34dc7c2f 4970 */
b8d06fca 4971 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
428870ff
BB
4972 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
4973 glist, children) == 0);
4974 kmem_free(glist, children * sizeof (uint64_t));
34dc7c2f 4975
428870ff
BB
4976 mutex_enter(&spa->spa_props_lock);
4977 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
4978 nvl) == 0);
4979 mutex_exit(&spa->spa_props_lock);
4980 spa->spa_config_splitting = nvl;
4981 vdev_config_dirty(spa->spa_root_vdev);
4982
4983 /* configure and create the new pool */
4984 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
4985 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4986 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
4987 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
4988 spa_version(spa)) == 0);
4989 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
4990 spa->spa_config_txg) == 0);
4991 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4992 spa_generate_guid(NULL)) == 0);
4993 (void) nvlist_lookup_string(props,
4994 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
34dc7c2f 4995
428870ff
BB
4996 /* add the new pool to the namespace */
4997 newspa = spa_add(newname, config, altroot);
4998 newspa->spa_config_txg = spa->spa_config_txg;
4999 spa_set_log_state(newspa, SPA_LOG_CLEAR);
5000
5001 /* release the spa config lock, retaining the namespace lock */
5002 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5003
5004 if (zio_injection_enabled)
5005 zio_handle_panic_injection(spa, FTAG, 1);
5006
5007 spa_activate(newspa, spa_mode_global);
5008 spa_async_suspend(newspa);
5009
5010 /* create the new pool from the disks of the original pool */
5011 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5012 if (error)
5013 goto out;
5014
5015 /* if that worked, generate a real config for the new pool */
5016 if (newspa->spa_root_vdev != NULL) {
5017 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
b8d06fca 5018 NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
428870ff
BB
5019 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5020 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5021 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5022 B_TRUE));
9babb374 5023 }
34dc7c2f 5024
428870ff
BB
5025 /* set the props */
5026 if (props != NULL) {
5027 spa_configfile_set(newspa, props, B_FALSE);
5028 error = spa_prop_set(newspa, props);
5029 if (error)
5030 goto out;
5031 }
34dc7c2f 5032
428870ff
BB
5033 /* flush everything */
5034 txg = spa_vdev_config_enter(newspa);
5035 vdev_config_dirty(newspa->spa_root_vdev);
5036 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
34dc7c2f 5037
428870ff
BB
5038 if (zio_injection_enabled)
5039 zio_handle_panic_injection(spa, FTAG, 2);
34dc7c2f 5040
428870ff 5041 spa_async_resume(newspa);
34dc7c2f 5042
428870ff
BB
5043 /* finally, update the original pool's config */
5044 txg = spa_vdev_config_enter(spa);
5045 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5046 error = dmu_tx_assign(tx, TXG_WAIT);
5047 if (error != 0)
5048 dmu_tx_abort(tx);
5049 for (c = 0; c < children; c++) {
5050 if (vml[c] != NULL) {
5051 vdev_split(vml[c]);
5052 if (error == 0)
6f1ffb06
MA
5053 spa_history_log_internal(spa, "detach", tx,
5054 "vdev=%s", vml[c]->vdev_path);
428870ff 5055 vdev_free(vml[c]);
34dc7c2f 5056 }
34dc7c2f 5057 }
428870ff
BB
5058 vdev_config_dirty(spa->spa_root_vdev);
5059 spa->spa_config_splitting = NULL;
5060 nvlist_free(nvl);
5061 if (error == 0)
5062 dmu_tx_commit(tx);
5063 (void) spa_vdev_exit(spa, NULL, txg, 0);
5064
5065 if (zio_injection_enabled)
5066 zio_handle_panic_injection(spa, FTAG, 3);
5067
5068 /* split is complete; log a history record */
6f1ffb06
MA
5069 spa_history_log_internal(newspa, "split", NULL,
5070 "from pool %s", spa_name(spa));
428870ff
BB
5071
5072 kmem_free(vml, children * sizeof (vdev_t *));
5073
5074 /* if we're not going to mount the filesystems in userland, export */
5075 if (exp)
5076 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5077 B_FALSE, B_FALSE);
5078
5079 return (error);
5080
5081out:
5082 spa_unload(newspa);
5083 spa_deactivate(newspa);
5084 spa_remove(newspa);
5085
5086 txg = spa_vdev_config_enter(spa);
5087
5088 /* re-online all offlined disks */
5089 for (c = 0; c < children; c++) {
5090 if (vml[c] != NULL)
5091 vml[c]->vdev_offline = B_FALSE;
5092 }
5093 vdev_reopen(spa->spa_root_vdev);
5094
5095 nvlist_free(spa->spa_config_splitting);
5096 spa->spa_config_splitting = NULL;
5097 (void) spa_vdev_exit(spa, NULL, txg, error);
34dc7c2f 5098
428870ff 5099 kmem_free(vml, children * sizeof (vdev_t *));
34dc7c2f
BB
5100 return (error);
5101}
5102
b128c09f
BB
5103static nvlist_t *
5104spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
34dc7c2f 5105{
d6320ddb
BB
5106 int i;
5107
5108 for (i = 0; i < count; i++) {
b128c09f 5109 uint64_t guid;
34dc7c2f 5110
b128c09f
BB
5111 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5112 &guid) == 0);
34dc7c2f 5113
b128c09f
BB
5114 if (guid == target_guid)
5115 return (nvpp[i]);
34dc7c2f
BB
5116 }
5117
b128c09f 5118 return (NULL);
34dc7c2f
BB
5119}
5120
b128c09f
BB
5121static void
5122spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5123 nvlist_t *dev_to_remove)
34dc7c2f 5124{
b128c09f 5125 nvlist_t **newdev = NULL;
d6320ddb 5126 int i, j;
34dc7c2f 5127
b128c09f 5128 if (count > 1)
b8d06fca 5129 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_PUSHPAGE);
34dc7c2f 5130
d6320ddb 5131 for (i = 0, j = 0; i < count; i++) {
b128c09f
BB
5132 if (dev[i] == dev_to_remove)
5133 continue;
b8d06fca 5134 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_PUSHPAGE) == 0);
34dc7c2f
BB
5135 }
5136
b128c09f
BB
5137 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5138 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
34dc7c2f 5139
d6320ddb 5140 for (i = 0; i < count - 1; i++)
b128c09f 5141 nvlist_free(newdev[i]);
34dc7c2f 5142
b128c09f
BB
5143 if (count > 1)
5144 kmem_free(newdev, (count - 1) * sizeof (void *));
34dc7c2f
BB
5145}
5146
428870ff
BB
5147/*
5148 * Evacuate the device.
5149 */
5150static int
5151spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5152{
5153 uint64_t txg;
5154 int error = 0;
5155
5156 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5157 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5158 ASSERT(vd == vd->vdev_top);
5159
5160 /*
5161 * Evacuate the device. We don't hold the config lock as writer
5162 * since we need to do I/O but we do keep the
5163 * spa_namespace_lock held. Once this completes the device
5164 * should no longer have any blocks allocated on it.
5165 */
5166 if (vd->vdev_islog) {
5167 if (vd->vdev_stat.vs_alloc != 0)
5168 error = spa_offline_log(spa);
5169 } else {
5170 error = ENOTSUP;
5171 }
5172
5173 if (error)
5174 return (error);
5175
5176 /*
5177 * The evacuation succeeded. Remove any remaining MOS metadata
5178 * associated with this vdev, and wait for these changes to sync.
5179 */
c99c9001 5180 ASSERT0(vd->vdev_stat.vs_alloc);
428870ff
BB
5181 txg = spa_vdev_config_enter(spa);
5182 vd->vdev_removing = B_TRUE;
5183 vdev_dirty(vd, 0, NULL, txg);
5184 vdev_config_dirty(vd);
5185 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5186
5187 return (0);
5188}
5189
5190/*
5191 * Complete the removal by cleaning up the namespace.
5192 */
5193static void
5194spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5195{
5196 vdev_t *rvd = spa->spa_root_vdev;
5197 uint64_t id = vd->vdev_id;
5198 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5199
5200 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5201 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5202 ASSERT(vd == vd->vdev_top);
5203
5204 /*
5205 * Only remove any devices which are empty.
5206 */
5207 if (vd->vdev_stat.vs_alloc != 0)
5208 return;
5209
5210 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5211
5212 if (list_link_active(&vd->vdev_state_dirty_node))
5213 vdev_state_clean(vd);
5214 if (list_link_active(&vd->vdev_config_dirty_node))
5215 vdev_config_clean(vd);
5216
5217 vdev_free(vd);
5218
5219 if (last_vdev) {
5220 vdev_compact_children(rvd);
5221 } else {
5222 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5223 vdev_add_child(rvd, vd);
5224 }
5225 vdev_config_dirty(rvd);
5226
5227 /*
5228 * Reassess the health of our root vdev.
5229 */
5230 vdev_reopen(rvd);
5231}
5232
5233/*
5234 * Remove a device from the pool -
5235 *
5236 * Removing a device from the vdev namespace requires several steps
5237 * and can take a significant amount of time. As a result we use
5238 * the spa_vdev_config_[enter/exit] functions which allow us to
5239 * grab and release the spa_config_lock while still holding the namespace
5240 * lock. During each step the configuration is synced out.
5241 */
5242
34dc7c2f
BB
5243/*
5244 * Remove a device from the pool. Currently, this supports removing only hot
428870ff 5245 * spares, slogs, and level 2 ARC devices.
34dc7c2f
BB
5246 */
5247int
5248spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5249{
5250 vdev_t *vd;
428870ff 5251 metaslab_group_t *mg;
b128c09f 5252 nvlist_t **spares, **l2cache, *nv;
fb5f0bc8 5253 uint64_t txg = 0;
428870ff 5254 uint_t nspares, nl2cache;
34dc7c2f 5255 int error = 0;
fb5f0bc8 5256 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
34dc7c2f 5257
572e2857
BB
5258 ASSERT(spa_writeable(spa));
5259
fb5f0bc8
BB
5260 if (!locked)
5261 txg = spa_vdev_enter(spa);
34dc7c2f 5262
b128c09f 5263 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
34dc7c2f
BB
5264
5265 if (spa->spa_spares.sav_vdevs != NULL &&
34dc7c2f 5266 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
b128c09f
BB
5267 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5268 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5269 /*
5270 * Only remove the hot spare if it's not currently in use
5271 * in this pool.
5272 */
5273 if (vd == NULL || unspare) {
5274 spa_vdev_remove_aux(spa->spa_spares.sav_config,
5275 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5276 spa_load_spares(spa);
5277 spa->spa_spares.sav_sync = B_TRUE;
5278 } else {
5279 error = EBUSY;
5280 }
5281 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
34dc7c2f 5282 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
b128c09f
BB
5283 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5284 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5285 /*
5286 * Cache devices can always be removed.
5287 */
5288 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5289 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
34dc7c2f
BB
5290 spa_load_l2cache(spa);
5291 spa->spa_l2cache.sav_sync = B_TRUE;
428870ff
BB
5292 } else if (vd != NULL && vd->vdev_islog) {
5293 ASSERT(!locked);
5294 ASSERT(vd == vd->vdev_top);
5295
5296 /*
5297 * XXX - Once we have bp-rewrite this should
5298 * become the common case.
5299 */
5300
5301 mg = vd->vdev_mg;
5302
5303 /*
5304 * Stop allocating from this vdev.
5305 */
5306 metaslab_group_passivate(mg);
5307
5308 /*
5309 * Wait for the youngest allocations and frees to sync,
5310 * and then wait for the deferral of those frees to finish.
5311 */
5312 spa_vdev_config_exit(spa, NULL,
5313 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5314
5315 /*
5316 * Attempt to evacuate the vdev.
5317 */
5318 error = spa_vdev_remove_evacuate(spa, vd);
5319
5320 txg = spa_vdev_config_enter(spa);
5321
5322 /*
5323 * If we couldn't evacuate the vdev, unwind.
5324 */
5325 if (error) {
5326 metaslab_group_activate(mg);
5327 return (spa_vdev_exit(spa, NULL, txg, error));
5328 }
5329
5330 /*
5331 * Clean up the vdev namespace.
5332 */
5333 spa_vdev_remove_from_namespace(spa, vd);
5334
b128c09f
BB
5335 } else if (vd != NULL) {
5336 /*
5337 * Normal vdevs cannot be removed (yet).
5338 */
5339 error = ENOTSUP;
5340 } else {
5341 /*
5342 * There is no vdev of any kind with the specified guid.
5343 */
5344 error = ENOENT;
34dc7c2f
BB
5345 }
5346
fb5f0bc8
BB
5347 if (!locked)
5348 return (spa_vdev_exit(spa, NULL, txg, error));
5349
5350 return (error);
34dc7c2f
BB
5351}
5352
5353/*
5354 * Find any device that's done replacing, or a vdev marked 'unspare' that's
5355 * current spared, so we can detach it.
5356 */
5357static vdev_t *
5358spa_vdev_resilver_done_hunt(vdev_t *vd)
5359{
5360 vdev_t *newvd, *oldvd;
d6320ddb 5361 int c;
34dc7c2f 5362
d6320ddb 5363 for (c = 0; c < vd->vdev_children; c++) {
34dc7c2f
BB
5364 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5365 if (oldvd != NULL)
5366 return (oldvd);
5367 }
5368
5369 /*
572e2857
BB
5370 * Check for a completed replacement. We always consider the first
5371 * vdev in the list to be the oldest vdev, and the last one to be
5372 * the newest (see spa_vdev_attach() for how that works). In
5373 * the case where the newest vdev is faulted, we will not automatically
5374 * remove it after a resilver completes. This is OK as it will require
5375 * user intervention to determine which disk the admin wishes to keep.
34dc7c2f 5376 */
572e2857
BB
5377 if (vd->vdev_ops == &vdev_replacing_ops) {
5378 ASSERT(vd->vdev_children > 1);
5379
5380 newvd = vd->vdev_child[vd->vdev_children - 1];
34dc7c2f 5381 oldvd = vd->vdev_child[0];
34dc7c2f 5382
fb5f0bc8 5383 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5384 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
fb5f0bc8 5385 !vdev_dtl_required(oldvd))
34dc7c2f 5386 return (oldvd);
34dc7c2f
BB
5387 }
5388
5389 /*
5390 * Check for a completed resilver with the 'unspare' flag set.
5391 */
572e2857
BB
5392 if (vd->vdev_ops == &vdev_spare_ops) {
5393 vdev_t *first = vd->vdev_child[0];
5394 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5395
5396 if (last->vdev_unspare) {
5397 oldvd = first;
5398 newvd = last;
5399 } else if (first->vdev_unspare) {
5400 oldvd = last;
5401 newvd = first;
5402 } else {
5403 oldvd = NULL;
5404 }
34dc7c2f 5405
572e2857 5406 if (oldvd != NULL &&
fb5f0bc8 5407 vdev_dtl_empty(newvd, DTL_MISSING) &&
428870ff 5408 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
572e2857 5409 !vdev_dtl_required(oldvd))
34dc7c2f 5410 return (oldvd);
572e2857
BB
5411
5412 /*
5413 * If there are more than two spares attached to a disk,
5414 * and those spares are not required, then we want to
5415 * attempt to free them up now so that they can be used
5416 * by other pools. Once we're back down to a single
5417 * disk+spare, we stop removing them.
5418 */
5419 if (vd->vdev_children > 2) {
5420 newvd = vd->vdev_child[1];
5421
5422 if (newvd->vdev_isspare && last->vdev_isspare &&
5423 vdev_dtl_empty(last, DTL_MISSING) &&
5424 vdev_dtl_empty(last, DTL_OUTAGE) &&
5425 !vdev_dtl_required(newvd))
5426 return (newvd);
34dc7c2f 5427 }
34dc7c2f
BB
5428 }
5429
5430 return (NULL);
5431}
5432
5433static void
5434spa_vdev_resilver_done(spa_t *spa)
5435{
fb5f0bc8
BB
5436 vdev_t *vd, *pvd, *ppvd;
5437 uint64_t guid, sguid, pguid, ppguid;
34dc7c2f 5438
fb5f0bc8 5439 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5440
5441 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
fb5f0bc8
BB
5442 pvd = vd->vdev_parent;
5443 ppvd = pvd->vdev_parent;
34dc7c2f 5444 guid = vd->vdev_guid;
fb5f0bc8
BB
5445 pguid = pvd->vdev_guid;
5446 ppguid = ppvd->vdev_guid;
5447 sguid = 0;
34dc7c2f
BB
5448 /*
5449 * If we have just finished replacing a hot spared device, then
5450 * we need to detach the parent's first child (the original hot
5451 * spare) as well.
5452 */
572e2857
BB
5453 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5454 ppvd->vdev_children == 2) {
34dc7c2f 5455 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
fb5f0bc8 5456 sguid = ppvd->vdev_child[1]->vdev_guid;
34dc7c2f 5457 }
fb5f0bc8
BB
5458 spa_config_exit(spa, SCL_ALL, FTAG);
5459 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
34dc7c2f 5460 return;
fb5f0bc8 5461 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
34dc7c2f 5462 return;
fb5f0bc8 5463 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
5464 }
5465
fb5f0bc8 5466 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
5467}
5468
5469/*
428870ff 5470 * Update the stored path or FRU for this vdev.
34dc7c2f
BB
5471 */
5472int
9babb374
BB
5473spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5474 boolean_t ispath)
34dc7c2f 5475{
b128c09f 5476 vdev_t *vd;
428870ff 5477 boolean_t sync = B_FALSE;
34dc7c2f 5478
572e2857
BB
5479 ASSERT(spa_writeable(spa));
5480
428870ff 5481 spa_vdev_state_enter(spa, SCL_ALL);
34dc7c2f 5482
9babb374 5483 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
428870ff 5484 return (spa_vdev_state_exit(spa, NULL, ENOENT));
34dc7c2f
BB
5485
5486 if (!vd->vdev_ops->vdev_op_leaf)
428870ff 5487 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
34dc7c2f 5488
9babb374 5489 if (ispath) {
428870ff
BB
5490 if (strcmp(value, vd->vdev_path) != 0) {
5491 spa_strfree(vd->vdev_path);
5492 vd->vdev_path = spa_strdup(value);
5493 sync = B_TRUE;
5494 }
9babb374 5495 } else {
428870ff
BB
5496 if (vd->vdev_fru == NULL) {
5497 vd->vdev_fru = spa_strdup(value);
5498 sync = B_TRUE;
5499 } else if (strcmp(value, vd->vdev_fru) != 0) {
9babb374 5500 spa_strfree(vd->vdev_fru);
428870ff
BB
5501 vd->vdev_fru = spa_strdup(value);
5502 sync = B_TRUE;
5503 }
9babb374 5504 }
34dc7c2f 5505
428870ff 5506 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
34dc7c2f
BB
5507}
5508
9babb374
BB
5509int
5510spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5511{
5512 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5513}
5514
5515int
5516spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5517{
5518 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5519}
5520
34dc7c2f
BB
5521/*
5522 * ==========================================================================
428870ff 5523 * SPA Scanning
34dc7c2f
BB
5524 * ==========================================================================
5525 */
5526
34dc7c2f 5527int
428870ff
BB
5528spa_scan_stop(spa_t *spa)
5529{
5530 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5531 if (dsl_scan_resilvering(spa->spa_dsl_pool))
5532 return (EBUSY);
5533 return (dsl_scan_cancel(spa->spa_dsl_pool));
5534}
5535
5536int
5537spa_scan(spa_t *spa, pool_scan_func_t func)
34dc7c2f 5538{
b128c09f 5539 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
34dc7c2f 5540
428870ff 5541 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
34dc7c2f
BB
5542 return (ENOTSUP);
5543
34dc7c2f 5544 /*
b128c09f
BB
5545 * If a resilver was requested, but there is no DTL on a
5546 * writeable leaf device, we have nothing to do.
34dc7c2f 5547 */
428870ff 5548 if (func == POOL_SCAN_RESILVER &&
b128c09f
BB
5549 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5550 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
34dc7c2f
BB
5551 return (0);
5552 }
5553
428870ff 5554 return (dsl_scan(spa->spa_dsl_pool, func));
34dc7c2f
BB
5555}
5556
5557/*
5558 * ==========================================================================
5559 * SPA async task processing
5560 * ==========================================================================
5561 */
5562
5563static void
5564spa_async_remove(spa_t *spa, vdev_t *vd)
5565{
d6320ddb
BB
5566 int c;
5567
b128c09f 5568 if (vd->vdev_remove_wanted) {
428870ff
BB
5569 vd->vdev_remove_wanted = B_FALSE;
5570 vd->vdev_delayed_close = B_FALSE;
b128c09f 5571 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
428870ff
BB
5572
5573 /*
5574 * We want to clear the stats, but we don't want to do a full
5575 * vdev_clear() as that will cause us to throw away
5576 * degraded/faulted state as well as attempt to reopen the
5577 * device, all of which is a waste.
5578 */
5579 vd->vdev_stat.vs_read_errors = 0;
5580 vd->vdev_stat.vs_write_errors = 0;
5581 vd->vdev_stat.vs_checksum_errors = 0;
5582
b128c09f
BB
5583 vdev_state_dirty(vd->vdev_top);
5584 }
34dc7c2f 5585
d6320ddb 5586 for (c = 0; c < vd->vdev_children; c++)
b128c09f
BB
5587 spa_async_remove(spa, vd->vdev_child[c]);
5588}
5589
5590static void
5591spa_async_probe(spa_t *spa, vdev_t *vd)
5592{
d6320ddb
BB
5593 int c;
5594
b128c09f 5595 if (vd->vdev_probe_wanted) {
428870ff 5596 vd->vdev_probe_wanted = B_FALSE;
b128c09f 5597 vdev_reopen(vd); /* vdev_open() does the actual probe */
34dc7c2f 5598 }
b128c09f 5599
d6320ddb 5600 for (c = 0; c < vd->vdev_children; c++)
b128c09f 5601 spa_async_probe(spa, vd->vdev_child[c]);
34dc7c2f
BB
5602}
5603
9babb374
BB
5604static void
5605spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5606{
d6320ddb 5607 int c;
9babb374
BB
5608
5609 if (!spa->spa_autoexpand)
5610 return;
5611
d6320ddb 5612 for (c = 0; c < vd->vdev_children; c++) {
9babb374
BB
5613 vdev_t *cvd = vd->vdev_child[c];
5614 spa_async_autoexpand(spa, cvd);
5615 }
5616
5617 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5618 return;
5619
26685276 5620 spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_AUTOEXPAND);
9babb374
BB
5621}
5622
34dc7c2f
BB
5623static void
5624spa_async_thread(spa_t *spa)
5625{
d6320ddb 5626 int tasks, i;
34dc7c2f
BB
5627
5628 ASSERT(spa->spa_sync_on);
5629
5630 mutex_enter(&spa->spa_async_lock);
5631 tasks = spa->spa_async_tasks;
5632 spa->spa_async_tasks = 0;
5633 mutex_exit(&spa->spa_async_lock);
5634
5635 /*
5636 * See if the config needs to be updated.
5637 */
5638 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
428870ff 5639 uint64_t old_space, new_space;
9babb374 5640
34dc7c2f 5641 mutex_enter(&spa_namespace_lock);
428870ff 5642 old_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5643 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
428870ff 5644 new_space = metaslab_class_get_space(spa_normal_class(spa));
34dc7c2f 5645 mutex_exit(&spa_namespace_lock);
9babb374
BB
5646
5647 /*
5648 * If the pool grew as a result of the config update,
5649 * then log an internal history event.
5650 */
428870ff 5651 if (new_space != old_space) {
6f1ffb06 5652 spa_history_log_internal(spa, "vdev online", NULL,
45d1cae3 5653 "pool '%s' size: %llu(+%llu)",
428870ff 5654 spa_name(spa), new_space, new_space - old_space);
9babb374 5655 }
34dc7c2f
BB
5656 }
5657
5658 /*
5659 * See if any devices need to be marked REMOVED.
34dc7c2f 5660 */
b128c09f 5661 if (tasks & SPA_ASYNC_REMOVE) {
428870ff 5662 spa_vdev_state_enter(spa, SCL_NONE);
34dc7c2f 5663 spa_async_remove(spa, spa->spa_root_vdev);
d6320ddb 5664 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
b128c09f 5665 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
d6320ddb 5666 for (i = 0; i < spa->spa_spares.sav_count; i++)
b128c09f
BB
5667 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5668 (void) spa_vdev_state_exit(spa, NULL, 0);
34dc7c2f
BB
5669 }
5670
9babb374
BB
5671 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5672 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5673 spa_async_autoexpand(spa, spa->spa_root_vdev);
5674 spa_config_exit(spa, SCL_CONFIG, FTAG);
5675 }
5676
34dc7c2f 5677 /*
b128c09f 5678 * See if any devices need to be probed.
34dc7c2f 5679 */
b128c09f 5680 if (tasks & SPA_ASYNC_PROBE) {
428870ff 5681 spa_vdev_state_enter(spa, SCL_NONE);
b128c09f
BB
5682 spa_async_probe(spa, spa->spa_root_vdev);
5683 (void) spa_vdev_state_exit(spa, NULL, 0);
5684 }
34dc7c2f
BB
5685
5686 /*
b128c09f 5687 * If any devices are done replacing, detach them.
34dc7c2f 5688 */
b128c09f
BB
5689 if (tasks & SPA_ASYNC_RESILVER_DONE)
5690 spa_vdev_resilver_done(spa);
34dc7c2f
BB
5691
5692 /*
5693 * Kick off a resilver.
5694 */
b128c09f 5695 if (tasks & SPA_ASYNC_RESILVER)
428870ff 5696 dsl_resilver_restart(spa->spa_dsl_pool, 0);
34dc7c2f
BB
5697
5698 /*
5699 * Let the world know that we're done.
5700 */
5701 mutex_enter(&spa->spa_async_lock);
5702 spa->spa_async_thread = NULL;
5703 cv_broadcast(&spa->spa_async_cv);
5704 mutex_exit(&spa->spa_async_lock);
5705 thread_exit();
5706}
5707
5708void
5709spa_async_suspend(spa_t *spa)
5710{
5711 mutex_enter(&spa->spa_async_lock);
5712 spa->spa_async_suspended++;
5713 while (spa->spa_async_thread != NULL)
5714 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5715 mutex_exit(&spa->spa_async_lock);
5716}
5717
5718void
5719spa_async_resume(spa_t *spa)
5720{
5721 mutex_enter(&spa->spa_async_lock);
5722 ASSERT(spa->spa_async_suspended != 0);
5723 spa->spa_async_suspended--;
5724 mutex_exit(&spa->spa_async_lock);
5725}
5726
5727static void
5728spa_async_dispatch(spa_t *spa)
5729{
5730 mutex_enter(&spa->spa_async_lock);
5731 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
5732 spa->spa_async_thread == NULL &&
5733 rootdir != NULL && !vn_is_readonly(rootdir))
5734 spa->spa_async_thread = thread_create(NULL, 0,
5735 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5736 mutex_exit(&spa->spa_async_lock);
5737}
5738
5739void
5740spa_async_request(spa_t *spa, int task)
5741{
428870ff 5742 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
34dc7c2f
BB
5743 mutex_enter(&spa->spa_async_lock);
5744 spa->spa_async_tasks |= task;
5745 mutex_exit(&spa->spa_async_lock);
5746}
5747
5748/*
5749 * ==========================================================================
5750 * SPA syncing routines
5751 * ==========================================================================
5752 */
5753
428870ff
BB
5754static int
5755bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
34dc7c2f 5756{
428870ff
BB
5757 bpobj_t *bpo = arg;
5758 bpobj_enqueue(bpo, bp, tx);
5759 return (0);
5760}
34dc7c2f 5761
428870ff
BB
5762static int
5763spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5764{
5765 zio_t *zio = arg;
34dc7c2f 5766
428870ff
BB
5767 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5768 zio->io_flags));
5769 return (0);
34dc7c2f
BB
5770}
5771
5772static void
5773spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5774{
5775 char *packed = NULL;
b128c09f 5776 size_t bufsize;
34dc7c2f
BB
5777 size_t nvsize = 0;
5778 dmu_buf_t *db;
5779
5780 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5781
b128c09f
BB
5782 /*
5783 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5784 * information. This avoids the dbuf_will_dirty() path and
5785 * saves us a pre-read to get data we don't actually care about.
5786 */
9ae529ec 5787 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
b8d06fca 5788 packed = vmem_alloc(bufsize, KM_PUSHPAGE);
34dc7c2f
BB
5789
5790 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
b8d06fca 5791 KM_PUSHPAGE) == 0);
b128c09f 5792 bzero(packed + nvsize, bufsize - nvsize);
34dc7c2f 5793
b128c09f 5794 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
34dc7c2f 5795
00b46022 5796 vmem_free(packed, bufsize);
34dc7c2f
BB
5797
5798 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5799 dmu_buf_will_dirty(db, tx);
5800 *(uint64_t *)db->db_data = nvsize;
5801 dmu_buf_rele(db, FTAG);
5802}
5803
5804static void
5805spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5806 const char *config, const char *entry)
5807{
5808 nvlist_t *nvroot;
5809 nvlist_t **list;
5810 int i;
5811
5812 if (!sav->sav_sync)
5813 return;
5814
5815 /*
5816 * Update the MOS nvlist describing the list of available devices.
5817 * spa_validate_aux() will have already made sure this nvlist is
5818 * valid and the vdevs are labeled appropriately.
5819 */
5820 if (sav->sav_object == 0) {
5821 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5822 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5823 sizeof (uint64_t), tx);
5824 VERIFY(zap_update(spa->spa_meta_objset,
5825 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5826 &sav->sav_object, tx) == 0);
5827 }
5828
b8d06fca 5829 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
34dc7c2f
BB
5830 if (sav->sav_count == 0) {
5831 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5832 } else {
b8d06fca 5833 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
34dc7c2f
BB
5834 for (i = 0; i < sav->sav_count; i++)
5835 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
428870ff 5836 B_FALSE, VDEV_CONFIG_L2CACHE);
34dc7c2f
BB
5837 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
5838 sav->sav_count) == 0);
5839 for (i = 0; i < sav->sav_count; i++)
5840 nvlist_free(list[i]);
5841 kmem_free(list, sav->sav_count * sizeof (void *));
5842 }
5843
5844 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
5845 nvlist_free(nvroot);
5846
5847 sav->sav_sync = B_FALSE;
5848}
5849
5850static void
5851spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
5852{
5853 nvlist_t *config;
5854
b128c09f 5855 if (list_is_empty(&spa->spa_config_dirty_list))
34dc7c2f
BB
5856 return;
5857
b128c09f
BB
5858 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5859
5860 config = spa_config_generate(spa, spa->spa_root_vdev,
5861 dmu_tx_get_txg(tx), B_FALSE);
5862
ea0b2538
GW
5863 /*
5864 * If we're upgrading the spa version then make sure that
5865 * the config object gets updated with the correct version.
5866 */
5867 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
5868 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5869 spa->spa_uberblock.ub_version);
5870
b128c09f 5871 spa_config_exit(spa, SCL_STATE, FTAG);
34dc7c2f
BB
5872
5873 if (spa->spa_config_syncing)
5874 nvlist_free(spa->spa_config_syncing);
5875 spa->spa_config_syncing = config;
5876
5877 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5878}
5879
9ae529ec
CS
5880static void
5881spa_sync_version(void *arg1, void *arg2, dmu_tx_t *tx)
5882{
5883 spa_t *spa = arg1;
5884 uint64_t version = *(uint64_t *)arg2;
5885
5886 /*
5887 * Setting the version is special cased when first creating the pool.
5888 */
5889 ASSERT(tx->tx_txg != TXG_INITIAL);
5890
8dca0a9a 5891 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
9ae529ec
CS
5892 ASSERT(version >= spa_version(spa));
5893
5894 spa->spa_uberblock.ub_version = version;
5895 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06 5896 spa_history_log_internal(spa, "set", tx, "version=%lld", version);
9ae529ec
CS
5897}
5898
34dc7c2f
BB
5899/*
5900 * Set zpool properties.
5901 */
5902static void
428870ff 5903spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
34dc7c2f
BB
5904{
5905 spa_t *spa = arg1;
5906 objset_t *mos = spa->spa_meta_objset;
5907 nvlist_t *nvp = arg2;
9ae529ec 5908 nvpair_t *elem = NULL;
b128c09f
BB
5909
5910 mutex_enter(&spa->spa_props_lock);
34dc7c2f 5911
34dc7c2f 5912 while ((elem = nvlist_next_nvpair(nvp, elem))) {
9ae529ec
CS
5913 uint64_t intval;
5914 char *strval, *fname;
5915 zpool_prop_t prop;
5916 const char *propname;
5917 zprop_type_t proptype;
5918 zfeature_info_t *feature;
5919
5920 prop = zpool_name_to_prop(nvpair_name(elem));
5921 switch ((int)prop) {
5922 case ZPROP_INVAL:
5923 /*
5924 * We checked this earlier in spa_prop_validate().
5925 */
5926 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5927
5928 fname = strchr(nvpair_name(elem), '@') + 1;
5929 VERIFY3U(0, ==, zfeature_lookup_name(fname, &feature));
5930
5931 spa_feature_enable(spa, feature, tx);
6f1ffb06
MA
5932 spa_history_log_internal(spa, "set", tx,
5933 "%s=enabled", nvpair_name(elem));
9ae529ec
CS
5934 break;
5935
34dc7c2f 5936 case ZPOOL_PROP_VERSION:
9ae529ec 5937 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
34dc7c2f 5938 /*
9ae529ec
CS
5939 * The version is synced seperatly before other
5940 * properties and should be correct by now.
34dc7c2f 5941 */
9ae529ec 5942 ASSERT3U(spa_version(spa), >=, intval);
34dc7c2f
BB
5943 break;
5944
5945 case ZPOOL_PROP_ALTROOT:
5946 /*
5947 * 'altroot' is a non-persistent property. It should
5948 * have been set temporarily at creation or import time.
5949 */
5950 ASSERT(spa->spa_root != NULL);
5951 break;
5952
572e2857 5953 case ZPOOL_PROP_READONLY:
34dc7c2f
BB
5954 case ZPOOL_PROP_CACHEFILE:
5955 /*
572e2857
BB
5956 * 'readonly' and 'cachefile' are also non-persisitent
5957 * properties.
34dc7c2f 5958 */
34dc7c2f 5959 break;
d96eb2b1
DM
5960 case ZPOOL_PROP_COMMENT:
5961 VERIFY(nvpair_value_string(elem, &strval) == 0);
5962 if (spa->spa_comment != NULL)
5963 spa_strfree(spa->spa_comment);
5964 spa->spa_comment = spa_strdup(strval);
5965 /*
5966 * We need to dirty the configuration on all the vdevs
5967 * so that their labels get updated. It's unnecessary
5968 * to do this for pool creation since the vdev's
5969 * configuratoin has already been dirtied.
5970 */
5971 if (tx->tx_txg != TXG_INITIAL)
5972 vdev_config_dirty(spa->spa_root_vdev);
6f1ffb06
MA
5973 spa_history_log_internal(spa, "set", tx,
5974 "%s=%s", nvpair_name(elem), strval);
d96eb2b1 5975 break;
34dc7c2f
BB
5976 default:
5977 /*
5978 * Set pool property values in the poolprops mos object.
5979 */
34dc7c2f 5980 if (spa->spa_pool_props_object == 0) {
9ae529ec
CS
5981 spa->spa_pool_props_object =
5982 zap_create_link(mos, DMU_OT_POOL_PROPS,
34dc7c2f 5983 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
9ae529ec 5984 tx);
34dc7c2f 5985 }
34dc7c2f
BB
5986
5987 /* normalize the property name */
5988 propname = zpool_prop_to_name(prop);
5989 proptype = zpool_prop_get_type(prop);
5990
5991 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5992 ASSERT(proptype == PROP_TYPE_STRING);
5993 VERIFY(nvpair_value_string(elem, &strval) == 0);
5994 VERIFY(zap_update(mos,
5995 spa->spa_pool_props_object, propname,
5996 1, strlen(strval) + 1, strval, tx) == 0);
6f1ffb06
MA
5997 spa_history_log_internal(spa, "set", tx,
5998 "%s=%s", nvpair_name(elem), strval);
34dc7c2f
BB
5999 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6000 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
6001
6002 if (proptype == PROP_TYPE_INDEX) {
6003 const char *unused;
6004 VERIFY(zpool_prop_index_to_string(
6005 prop, intval, &unused) == 0);
6006 }
6007 VERIFY(zap_update(mos,
6008 spa->spa_pool_props_object, propname,
6009 8, 1, &intval, tx) == 0);
6f1ffb06
MA
6010 spa_history_log_internal(spa, "set", tx,
6011 "%s=%lld", nvpair_name(elem), intval);
34dc7c2f
BB
6012 } else {
6013 ASSERT(0); /* not allowed */
6014 }
6015
6016 switch (prop) {
6017 case ZPOOL_PROP_DELEGATION:
6018 spa->spa_delegation = intval;
6019 break;
6020 case ZPOOL_PROP_BOOTFS:
6021 spa->spa_bootfs = intval;
6022 break;
6023 case ZPOOL_PROP_FAILUREMODE:
6024 spa->spa_failmode = intval;
6025 break;
9babb374
BB
6026 case ZPOOL_PROP_AUTOEXPAND:
6027 spa->spa_autoexpand = intval;
428870ff
BB
6028 if (tx->tx_txg != TXG_INITIAL)
6029 spa_async_request(spa,
6030 SPA_ASYNC_AUTOEXPAND);
6031 break;
6032 case ZPOOL_PROP_DEDUPDITTO:
6033 spa->spa_dedup_ditto = intval;
9babb374 6034 break;
34dc7c2f
BB
6035 default:
6036 break;
6037 }
6038 }
6039
34dc7c2f 6040 }
b128c09f
BB
6041
6042 mutex_exit(&spa->spa_props_lock);
34dc7c2f
BB
6043}
6044
428870ff
BB
6045/*
6046 * Perform one-time upgrade on-disk changes. spa_version() does not
6047 * reflect the new version this txg, so there must be no changes this
6048 * txg to anything that the upgrade code depends on after it executes.
6049 * Therefore this must be called after dsl_pool_sync() does the sync
6050 * tasks.
6051 */
6052static void
6053spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6054{
6055 dsl_pool_t *dp = spa->spa_dsl_pool;
6056
6057 ASSERT(spa->spa_sync_pass == 1);
6058
6059 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6060 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6061 dsl_pool_create_origin(dp, tx);
6062
6063 /* Keeping the origin open increases spa_minref */
6064 spa->spa_minref += 3;
6065 }
6066
6067 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6068 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6069 dsl_pool_upgrade_clones(dp, tx);
6070 }
6071
6072 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6073 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6074 dsl_pool_upgrade_dir_clones(dp, tx);
6075
6076 /* Keeping the freedir open increases spa_minref */
6077 spa->spa_minref += 3;
6078 }
9ae529ec
CS
6079
6080 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6081 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6082 spa_feature_create_zap_objects(spa, tx);
6083 }
428870ff
BB
6084}
6085
34dc7c2f
BB
6086/*
6087 * Sync the specified transaction group. New blocks may be dirtied as
6088 * part of the process, so we iterate until it converges.
6089 */
6090void
6091spa_sync(spa_t *spa, uint64_t txg)
6092{
6093 dsl_pool_t *dp = spa->spa_dsl_pool;
6094 objset_t *mos = spa->spa_meta_objset;
428870ff
BB
6095 bpobj_t *defer_bpo = &spa->spa_deferred_bpobj;
6096 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
34dc7c2f
BB
6097 vdev_t *rvd = spa->spa_root_vdev;
6098 vdev_t *vd;
34dc7c2f 6099 dmu_tx_t *tx;
b128c09f 6100 int error;
d6320ddb 6101 int c;
34dc7c2f 6102
572e2857
BB
6103 VERIFY(spa_writeable(spa));
6104
34dc7c2f
BB
6105 /*
6106 * Lock out configuration changes.
6107 */
b128c09f 6108 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
34dc7c2f
BB
6109
6110 spa->spa_syncing_txg = txg;
6111 spa->spa_sync_pass = 0;
6112
b128c09f
BB
6113 /*
6114 * If there are any pending vdev state changes, convert them
6115 * into config changes that go out with this transaction group.
6116 */
6117 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
fb5f0bc8
BB
6118 while (list_head(&spa->spa_state_dirty_list) != NULL) {
6119 /*
6120 * We need the write lock here because, for aux vdevs,
6121 * calling vdev_config_dirty() modifies sav_config.
6122 * This is ugly and will become unnecessary when we
6123 * eliminate the aux vdev wart by integrating all vdevs
6124 * into the root vdev tree.
6125 */
6126 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6127 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6128 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6129 vdev_state_clean(vd);
6130 vdev_config_dirty(vd);
6131 }
6132 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6133 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
b128c09f
BB
6134 }
6135 spa_config_exit(spa, SCL_STATE, FTAG);
6136
34dc7c2f
BB
6137 tx = dmu_tx_create_assigned(dp, txg);
6138
cc92e9d0
GW
6139 spa->spa_sync_starttime = gethrtime();
6140 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6141 spa->spa_deadman_tqid = taskq_dispatch_delay(system_taskq,
6142 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
6143 NSEC_TO_TICK(spa->spa_deadman_synctime));
6144
34dc7c2f
BB
6145 /*
6146 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6147 * set spa_deflate if we have no raid-z vdevs.
6148 */
6149 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6150 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6151 int i;
6152
6153 for (i = 0; i < rvd->vdev_children; i++) {
6154 vd = rvd->vdev_child[i];
6155 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6156 break;
6157 }
6158 if (i == rvd->vdev_children) {
6159 spa->spa_deflate = TRUE;
6160 VERIFY(0 == zap_add(spa->spa_meta_objset,
6161 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6162 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6163 }
6164 }
6165
6166 /*
428870ff
BB
6167 * If anything has changed in this txg, or if someone is waiting
6168 * for this txg to sync (eg, spa_vdev_remove()), push the
6169 * deferred frees from the previous txg. If not, leave them
6170 * alone so that we don't generate work on an otherwise idle
6171 * system.
34dc7c2f
BB
6172 */
6173 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
6174 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
428870ff
BB
6175 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
6176 ((dsl_scan_active(dp->dp_scan) ||
6177 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
6178 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6179 VERIFY3U(bpobj_iterate(defer_bpo,
6180 spa_free_sync_cb, zio, tx), ==, 0);
c99c9001 6181 VERIFY0(zio_wait(zio));
428870ff 6182 }
34dc7c2f
BB
6183
6184 /*
6185 * Iterate to convergence.
6186 */
6187 do {
428870ff 6188 int pass = ++spa->spa_sync_pass;
34dc7c2f
BB
6189
6190 spa_sync_config_object(spa, tx);
6191 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6192 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6193 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6194 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6195 spa_errlog_sync(spa, txg);
6196 dsl_pool_sync(dp, txg);
6197
55d85d5a 6198 if (pass < zfs_sync_pass_deferred_free) {
428870ff
BB
6199 zio_t *zio = zio_root(spa, NULL, NULL, 0);
6200 bplist_iterate(free_bpl, spa_free_sync_cb,
6201 zio, tx);
6202 VERIFY(zio_wait(zio) == 0);
6203 } else {
6204 bplist_iterate(free_bpl, bpobj_enqueue_cb,
6205 defer_bpo, tx);
34dc7c2f
BB
6206 }
6207
428870ff
BB
6208 ddt_sync(spa, txg);
6209 dsl_scan_sync(dp, tx);
34dc7c2f 6210
c65aa5b2 6211 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
428870ff
BB
6212 vdev_sync(vd, txg);
6213
6214 if (pass == 1)
6215 spa_sync_upgrades(spa, tx);
34dc7c2f 6216
428870ff 6217 } while (dmu_objset_is_dirty(mos, txg));
34dc7c2f
BB
6218
6219 /*
6220 * Rewrite the vdev configuration (which includes the uberblock)
6221 * to commit the transaction group.
6222 *
6223 * If there are no dirty vdevs, we sync the uberblock to a few
6224 * random top-level vdevs that are known to be visible in the
b128c09f
BB
6225 * config cache (see spa_vdev_add() for a complete description).
6226 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
34dc7c2f 6227 */
b128c09f
BB
6228 for (;;) {
6229 /*
6230 * We hold SCL_STATE to prevent vdev open/close/etc.
6231 * while we're attempting to write the vdev labels.
6232 */
6233 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6234
6235 if (list_is_empty(&spa->spa_config_dirty_list)) {
6236 vdev_t *svd[SPA_DVAS_PER_BP];
6237 int svdcount = 0;
6238 int children = rvd->vdev_children;
6239 int c0 = spa_get_random(children);
b128c09f 6240
d6320ddb 6241 for (c = 0; c < children; c++) {
b128c09f
BB
6242 vd = rvd->vdev_child[(c0 + c) % children];
6243 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6244 continue;
6245 svd[svdcount++] = vd;
6246 if (svdcount == SPA_DVAS_PER_BP)
6247 break;
6248 }
9babb374
BB
6249 error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
6250 if (error != 0)
6251 error = vdev_config_sync(svd, svdcount, txg,
6252 B_TRUE);
b128c09f
BB
6253 } else {
6254 error = vdev_config_sync(rvd->vdev_child,
9babb374
BB
6255 rvd->vdev_children, txg, B_FALSE);
6256 if (error != 0)
6257 error = vdev_config_sync(rvd->vdev_child,
6258 rvd->vdev_children, txg, B_TRUE);
34dc7c2f 6259 }
34dc7c2f 6260
3bc7e0fb
GW
6261 if (error == 0)
6262 spa->spa_last_synced_guid = rvd->vdev_guid;
6263
b128c09f
BB
6264 spa_config_exit(spa, SCL_STATE, FTAG);
6265
6266 if (error == 0)
6267 break;
6268 zio_suspend(spa, NULL);
6269 zio_resume_wait(spa);
6270 }
34dc7c2f
BB
6271 dmu_tx_commit(tx);
6272
cc92e9d0
GW
6273 taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
6274 spa->spa_deadman_tqid = 0;
6275
34dc7c2f
BB
6276 /*
6277 * Clear the dirty config list.
6278 */
b128c09f 6279 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
34dc7c2f
BB
6280 vdev_config_clean(vd);
6281
6282 /*
6283 * Now that the new config has synced transactionally,
6284 * let it become visible to the config cache.
6285 */
6286 if (spa->spa_config_syncing != NULL) {
6287 spa_config_set(spa, spa->spa_config_syncing);
6288 spa->spa_config_txg = txg;
6289 spa->spa_config_syncing = NULL;
6290 }
6291
34dc7c2f 6292 spa->spa_ubsync = spa->spa_uberblock;
34dc7c2f 6293
428870ff 6294 dsl_pool_sync_done(dp, txg);
34dc7c2f
BB
6295
6296 /*
6297 * Update usable space statistics.
6298 */
c65aa5b2 6299 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
34dc7c2f
BB
6300 vdev_sync_done(vd, txg);
6301
428870ff
BB
6302 spa_update_dspace(spa);
6303
34dc7c2f
BB
6304 /*
6305 * It had better be the case that we didn't dirty anything
6306 * since vdev_config_sync().
6307 */
6308 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
6309 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6310 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
428870ff
BB
6311
6312 spa->spa_sync_pass = 0;
34dc7c2f 6313
b128c09f 6314 spa_config_exit(spa, SCL_CONFIG, FTAG);
34dc7c2f 6315
428870ff
BB
6316 spa_handle_ignored_writes(spa);
6317
34dc7c2f
BB
6318 /*
6319 * If any async tasks have been requested, kick them off.
6320 */
6321 spa_async_dispatch(spa);
6322}
6323
6324/*
6325 * Sync all pools. We don't want to hold the namespace lock across these
6326 * operations, so we take a reference on the spa_t and drop the lock during the
6327 * sync.
6328 */
6329void
6330spa_sync_allpools(void)
6331{
6332 spa_t *spa = NULL;
6333 mutex_enter(&spa_namespace_lock);
6334 while ((spa = spa_next(spa)) != NULL) {
572e2857
BB
6335 if (spa_state(spa) != POOL_STATE_ACTIVE ||
6336 !spa_writeable(spa) || spa_suspended(spa))
34dc7c2f
BB
6337 continue;
6338 spa_open_ref(spa, FTAG);
6339 mutex_exit(&spa_namespace_lock);
6340 txg_wait_synced(spa_get_dsl(spa), 0);
6341 mutex_enter(&spa_namespace_lock);
6342 spa_close(spa, FTAG);
6343 }
6344 mutex_exit(&spa_namespace_lock);
6345}
6346
6347/*
6348 * ==========================================================================
6349 * Miscellaneous routines
6350 * ==========================================================================
6351 */
6352
6353/*
6354 * Remove all pools in the system.
6355 */
6356void
6357spa_evict_all(void)
6358{
6359 spa_t *spa;
6360
6361 /*
6362 * Remove all cached state. All pools should be closed now,
6363 * so every spa in the AVL tree should be unreferenced.
6364 */
6365 mutex_enter(&spa_namespace_lock);
6366 while ((spa = spa_next(NULL)) != NULL) {
6367 /*
6368 * Stop async tasks. The async thread may need to detach
6369 * a device that's been replaced, which requires grabbing
6370 * spa_namespace_lock, so we must drop it here.
6371 */
6372 spa_open_ref(spa, FTAG);
6373 mutex_exit(&spa_namespace_lock);
6374 spa_async_suspend(spa);
6375 mutex_enter(&spa_namespace_lock);
34dc7c2f
BB
6376 spa_close(spa, FTAG);
6377
6378 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6379 spa_unload(spa);
6380 spa_deactivate(spa);
6381 }
6382 spa_remove(spa);
6383 }
6384 mutex_exit(&spa_namespace_lock);
6385}
6386
6387vdev_t *
9babb374 6388spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
34dc7c2f 6389{
b128c09f
BB
6390 vdev_t *vd;
6391 int i;
6392
6393 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
6394 return (vd);
6395
9babb374 6396 if (aux) {
b128c09f
BB
6397 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
6398 vd = spa->spa_l2cache.sav_vdevs[i];
9babb374
BB
6399 if (vd->vdev_guid == guid)
6400 return (vd);
6401 }
6402
6403 for (i = 0; i < spa->spa_spares.sav_count; i++) {
6404 vd = spa->spa_spares.sav_vdevs[i];
b128c09f
BB
6405 if (vd->vdev_guid == guid)
6406 return (vd);
6407 }
6408 }
6409
6410 return (NULL);
34dc7c2f
BB
6411}
6412
6413void
6414spa_upgrade(spa_t *spa, uint64_t version)
6415{
572e2857
BB
6416 ASSERT(spa_writeable(spa));
6417
b128c09f 6418 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
6419
6420 /*
6421 * This should only be called for a non-faulted pool, and since a
6422 * future version would result in an unopenable pool, this shouldn't be
6423 * possible.
6424 */
8dca0a9a 6425 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
34dc7c2f
BB
6426 ASSERT(version >= spa->spa_uberblock.ub_version);
6427
6428 spa->spa_uberblock.ub_version = version;
6429 vdev_config_dirty(spa->spa_root_vdev);
6430
b128c09f 6431 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
6432
6433 txg_wait_synced(spa_get_dsl(spa), 0);
6434}
6435
6436boolean_t
6437spa_has_spare(spa_t *spa, uint64_t guid)
6438{
6439 int i;
6440 uint64_t spareguid;
6441 spa_aux_vdev_t *sav = &spa->spa_spares;
6442
6443 for (i = 0; i < sav->sav_count; i++)
6444 if (sav->sav_vdevs[i]->vdev_guid == guid)
6445 return (B_TRUE);
6446
6447 for (i = 0; i < sav->sav_npending; i++) {
6448 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
6449 &spareguid) == 0 && spareguid == guid)
6450 return (B_TRUE);
6451 }
6452
6453 return (B_FALSE);
6454}
6455
b128c09f
BB
6456/*
6457 * Check if a pool has an active shared spare device.
6458 * Note: reference count of an active spare is 2, as a spare and as a replace
6459 */
6460static boolean_t
6461spa_has_active_shared_spare(spa_t *spa)
6462{
6463 int i, refcnt;
6464 uint64_t pool;
6465 spa_aux_vdev_t *sav = &spa->spa_spares;
6466
6467 for (i = 0; i < sav->sav_count; i++) {
6468 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
6469 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
6470 refcnt > 2)
6471 return (B_TRUE);
6472 }
6473
6474 return (B_FALSE);
6475}
6476
34dc7c2f 6477/*
26685276 6478 * Post a FM_EREPORT_ZFS_* event from sys/fm/fs/zfs.h. The payload will be
34dc7c2f
BB
6479 * filled in from the spa and (optionally) the vdev. This doesn't do anything
6480 * in the userland libzpool, as we don't want consumers to misinterpret ztest
6481 * or zdb as real changes.
6482 */
6483void
6484spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
6485{
6486#ifdef _KERNEL
26685276 6487 zfs_ereport_post(name, spa, vd, NULL, 0, 0);
34dc7c2f
BB
6488#endif
6489}
c28b2279
BB
6490
6491#if defined(_KERNEL) && defined(HAVE_SPL)
6492/* state manipulation functions */
6493EXPORT_SYMBOL(spa_open);
6494EXPORT_SYMBOL(spa_open_rewind);
6495EXPORT_SYMBOL(spa_get_stats);
6496EXPORT_SYMBOL(spa_create);
6497EXPORT_SYMBOL(spa_import_rootpool);
6498EXPORT_SYMBOL(spa_import);
6499EXPORT_SYMBOL(spa_tryimport);
6500EXPORT_SYMBOL(spa_destroy);
6501EXPORT_SYMBOL(spa_export);
6502EXPORT_SYMBOL(spa_reset);
6503EXPORT_SYMBOL(spa_async_request);
6504EXPORT_SYMBOL(spa_async_suspend);
6505EXPORT_SYMBOL(spa_async_resume);
6506EXPORT_SYMBOL(spa_inject_addref);
6507EXPORT_SYMBOL(spa_inject_delref);
6508EXPORT_SYMBOL(spa_scan_stat_init);
6509EXPORT_SYMBOL(spa_scan_get_stats);
6510
6511/* device maniion */
6512EXPORT_SYMBOL(spa_vdev_add);
6513EXPORT_SYMBOL(spa_vdev_attach);
6514EXPORT_SYMBOL(spa_vdev_detach);
6515EXPORT_SYMBOL(spa_vdev_remove);
6516EXPORT_SYMBOL(spa_vdev_setpath);
6517EXPORT_SYMBOL(spa_vdev_setfru);
6518EXPORT_SYMBOL(spa_vdev_split_mirror);
6519
6520/* spare statech is global across all pools) */
6521EXPORT_SYMBOL(spa_spare_add);
6522EXPORT_SYMBOL(spa_spare_remove);
6523EXPORT_SYMBOL(spa_spare_exists);
6524EXPORT_SYMBOL(spa_spare_activate);
6525
6526/* L2ARC statech is global across all pools) */
6527EXPORT_SYMBOL(spa_l2cache_add);
6528EXPORT_SYMBOL(spa_l2cache_remove);
6529EXPORT_SYMBOL(spa_l2cache_exists);
6530EXPORT_SYMBOL(spa_l2cache_activate);
6531EXPORT_SYMBOL(spa_l2cache_drop);
6532
6533/* scanning */
6534EXPORT_SYMBOL(spa_scan);
6535EXPORT_SYMBOL(spa_scan_stop);
6536
6537/* spa syncing */
6538EXPORT_SYMBOL(spa_sync); /* only for DMU use */
6539EXPORT_SYMBOL(spa_sync_allpools);
6540
6541/* properties */
6542EXPORT_SYMBOL(spa_prop_set);
6543EXPORT_SYMBOL(spa_prop_get);
6544EXPORT_SYMBOL(spa_prop_clear_bootfs);
6545
6546/* asynchronous event notification */
6547EXPORT_SYMBOL(spa_event_notify);
6548#endif