X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=module%2Fzfs%2Fspa.c;h=7052eec4ababd8bd5cc69dbe3a905bcc52b226f3;hb=d1d7e2689db9e03f11c069ebc9f1ba12829e5dac;hp=868a0d9d270407f8091d736bdc6f7ee4abf57cb1;hpb=3cee2262a6efd06031a2ff511f66e6a51f743e6a;p=mirror_zfs.git diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 868a0d9d2..7052eec4a 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -21,9 +21,13 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013 by Delphix. All rights reserved. + * Copyright 2013 Nexenta Systems, Inc. All rights reserved. */ /* + * SPA: Storage Pool Allocator + * * This file contains all the routines used when modifying on-disk SPA state. * This includes opening, importing, destroying, exporting a pool, and syncing a * pool. @@ -61,6 +65,9 @@ #include #include #include +#include +#include +#include #ifdef _KERNEL #include @@ -75,23 +82,24 @@ #include "zfs_comutil.h" typedef enum zti_modes { - zti_mode_fixed, /* value is # of threads (min 1) */ - zti_mode_online_percent, /* value is % of online CPUs */ - zti_mode_batch, /* cpu-intensive; value is ignored */ - zti_mode_null, /* don't create a taskq */ - zti_nmodes + ZTI_MODE_FIXED, /* value is # of threads (min 1) */ + ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ + ZTI_MODE_NULL, /* don't create a taskq */ + ZTI_NMODES } zti_modes_t; -#define ZTI_FIX(n) { zti_mode_fixed, (n) } -#define ZTI_PCT(n) { zti_mode_online_percent, (n) } -#define ZTI_BATCH { zti_mode_batch, 0 } -#define ZTI_NULL { zti_mode_null, 0 } +#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } +#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 } +#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } +#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } -#define ZTI_ONE ZTI_FIX(1) +#define ZTI_N(n) ZTI_P(n, 1) +#define ZTI_ONE ZTI_N(1) typedef struct zio_taskq_info { - enum zti_modes zti_mode; + zti_modes_t zti_mode; uint_t zti_value; + uint_t zti_count; } zio_taskq_info_t; static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { @@ -99,27 +107,41 @@ static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { }; /* - * Define the taskq threads for the following I/O types: - * NULL, READ, WRITE, FREE, CLAIM, and IOCTL + * This table defines the taskq settings for each ZFS I/O type. When + * initializing a pool, we use this table to create an appropriately sized + * taskq. Some operations are low volume and therefore have a small, static + * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE + * macros. Other operations process a large amount of data; the ZTI_BATCH + * macro causes us to create a taskq oriented for throughput. Some operations + * are so high frequency and short-lived that the taskq itself can become a a + * point of lock contention. The ZTI_P(#, #) macro indicates that we need an + * additional degree of parallelism specified by the number of threads per- + * taskq and the number of taskqs; when dispatching an event in this case, the + * particular taskq is chosen at random. + * + * The different taskq priorities are to handle the different contexts (issue + * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that + * need to be handled with minimum delay. */ const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ - { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, - { ZTI_FIX(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, - { ZTI_BATCH, ZTI_FIX(5), ZTI_FIX(16), ZTI_FIX(5) }, - { ZTI_FIX(100), ZTI_NULL, ZTI_ONE, ZTI_NULL }, - { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, - { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, + { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ + { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */ + { ZTI_BATCH, ZTI_N(5), ZTI_N(16), ZTI_N(5) }, /* WRITE */ + { ZTI_P(4, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ + { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ + { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ }; -static dsl_syncfunc_t spa_sync_props; +static void spa_sync_version(void *arg, dmu_tx_t *tx); +static void spa_sync_props(void *arg, dmu_tx_t *tx); static boolean_t spa_has_active_shared_spare(spa_t *spa); static inline int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, char **ereport); static void spa_vdev_resilver_done(spa_t *spa); -uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */ +uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ id_t zio_taskq_psrset_bind = PS_NONE; boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ uint_t zio_taskq_basedc = 80; /* base duty cycle */ @@ -148,7 +170,7 @@ spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, const char *propname = zpool_prop_to_name(prop); nvlist_t *propval; - VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); if (strval != NULL) @@ -166,15 +188,19 @@ spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, static void spa_prop_get_config(spa_t *spa, nvlist_t **nvp) { + vdev_t *rvd = spa->spa_root_vdev; + dsl_pool_t *pool = spa->spa_dsl_pool; uint64_t size; uint64_t alloc; + uint64_t space; uint64_t cap, version; zprop_source_t src = ZPROP_SRC_NONE; spa_config_dirent_t *dp; + int c; ASSERT(MUTEX_HELD(&spa->spa_props_lock)); - if (spa->spa_root_vdev != NULL) { + if (rvd != NULL) { alloc = metaslab_class_get_alloc(spa_normal_class(spa)); size = metaslab_class_get_space(spa_normal_class(spa)); spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); @@ -182,6 +208,15 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp) spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, size - alloc, src); + + space = 0; + for (c = 0; c < rvd->vdev_children; c++) { + vdev_t *tvd = rvd->vdev_child[c]; + space += tvd->vdev_max_asize - tvd->vdev_asize; + } + spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space, + src); + spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, (spa_mode(spa) == FREAD), src); @@ -192,7 +227,7 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp) ddt_get_pool_dedup_ratio(spa), src); spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, - spa->spa_root_vdev->vdev_state, src); + rvd->vdev_state, src); version = spa_version(spa); if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) @@ -202,8 +237,29 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp) spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); } + if (pool != NULL) { + dsl_dir_t *freedir = pool->dp_free_dir; + + /* + * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, + * when opening pools before this version freedir will be NULL. + */ + if (freedir != NULL) { + spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, + freedir->dd_phys->dd_used_bytes, src); + } else { + spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, + NULL, 0, src); + } + } + spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); + if (spa->spa_comment != NULL) { + spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, + 0, ZPROP_SRC_LOCAL); + } + if (spa->spa_root != NULL) spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 0, ZPROP_SRC_LOCAL); @@ -230,9 +286,9 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp) zap_attribute_t za; int err; - err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP); + err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE); if (err) - return err; + return (err); mutex_enter(&spa->spa_props_lock); @@ -273,19 +329,19 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp) dsl_dataset_t *ds = NULL; dp = spa_get_dsl(spa); - rw_enter(&dp->dp_config_rwlock, RW_READER); + dsl_pool_config_enter(dp, FTAG); if ((err = dsl_dataset_hold_obj(dp, za.za_first_integer, FTAG, &ds))) { - rw_exit(&dp->dp_config_rwlock); + dsl_pool_config_exit(dp, FTAG); break; } strval = kmem_alloc( MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, - KM_SLEEP); + KM_PUSHPAGE); dsl_dataset_name(ds, strval); dsl_dataset_rele(ds, FTAG); - rw_exit(&dp->dp_config_rwlock); + dsl_pool_config_exit(dp, FTAG); } else { strval = NULL; intval = za.za_first_integer; @@ -301,7 +357,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp) case 1: /* string property */ - strval = kmem_alloc(za.za_num_integers, KM_SLEEP); + strval = kmem_alloc(za.za_num_integers, KM_PUSHPAGE); err = zap_lookup(mos, spa->spa_pool_props_object, za.za_name, 1, za.za_num_integers, strval); if (err) { @@ -338,26 +394,56 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) nvpair_t *elem; int error = 0, reset_bootfs = 0; uint64_t objnum = 0; + boolean_t has_feature = B_FALSE; elem = NULL; while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { - zpool_prop_t prop; - char *propname, *strval; uint64_t intval; - objset_t *os; - char *slash; + char *strval, *slash, *check, *fname; + const char *propname = nvpair_name(elem); + zpool_prop_t prop = zpool_name_to_prop(propname); + + switch ((int)prop) { + case ZPROP_INVAL: + if (!zpool_prop_feature(propname)) { + error = SET_ERROR(EINVAL); + break; + } + + /* + * Sanitize the input. + */ + if (nvpair_type(elem) != DATA_TYPE_UINT64) { + error = SET_ERROR(EINVAL); + break; + } + + if (nvpair_value_uint64(elem, &intval) != 0) { + error = SET_ERROR(EINVAL); + break; + } + + if (intval != 0) { + error = SET_ERROR(EINVAL); + break; + } - propname = nvpair_name(elem); + fname = strchr(propname, '@') + 1; + if (zfeature_lookup_name(fname, NULL) != 0) { + error = SET_ERROR(EINVAL); + break; + } - if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) - return (EINVAL); + has_feature = B_TRUE; + break; - switch (prop) { case ZPOOL_PROP_VERSION: error = nvpair_value_uint64(elem, &intval); if (!error && - (intval < spa_version(spa) || intval > SPA_VERSION)) - error = EINVAL; + (intval < spa_version(spa) || + intval > SPA_VERSION_BEFORE_FEATURES || + has_feature)) + error = SET_ERROR(EINVAL); break; case ZPOOL_PROP_DELEGATION: @@ -366,7 +452,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) case ZPOOL_PROP_AUTOEXPAND: error = nvpair_value_uint64(elem, &intval); if (!error && intval > 1) - error = EINVAL; + error = SET_ERROR(EINVAL); break; case ZPOOL_PROP_BOOTFS: @@ -376,7 +462,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) * the bootfs property cannot be set. */ if (spa_version(spa) < SPA_VERSION_BOOTFS) { - error = ENOTSUP; + error = SET_ERROR(ENOTSUP); break; } @@ -384,7 +470,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) * Make sure the vdev config is bootable */ if (!vdev_is_bootable(spa->spa_root_vdev)) { - error = ENOTSUP; + error = SET_ERROR(ENOTSUP); break; } @@ -393,6 +479,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) error = nvpair_value_string(elem, &strval); if (!error) { + objset_t *os; uint64_t compress; if (strval == NULL || strval[0] == '\0') { @@ -401,18 +488,20 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) break; } - if ((error = dmu_objset_hold(strval,FTAG,&os))) + error = dmu_objset_hold(strval, FTAG, &os); + if (error) break; /* Must be ZPL and not gzip compressed. */ if (dmu_objset_type(os) != DMU_OST_ZFS) { - error = ENOTSUP; - } else if ((error = dsl_prop_get_integer(strval, + error = SET_ERROR(ENOTSUP); + } else if ((error = + dsl_prop_get_int_ds(dmu_objset_ds(os), zfs_prop_to_name(ZFS_PROP_COMPRESSION), - &compress, NULL)) == 0 && + &compress)) == 0 && !BOOTFS_COMPRESS_VALID(compress)) { - error = ENOTSUP; + error = SET_ERROR(ENOTSUP); } else { objnum = dmu_objset_id(os); } @@ -424,7 +513,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) error = nvpair_value_uint64(elem, &intval); if (!error && (intval < ZIO_FAILURE_MODE_WAIT || intval > ZIO_FAILURE_MODE_PANIC)) - error = EINVAL; + error = SET_ERROR(EINVAL); /* * This is a special case which only occurs when @@ -438,7 +527,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) */ if (!error && spa_suspended(spa)) { spa->spa_failmode = intval; - error = EIO; + error = SET_ERROR(EIO); } break; @@ -453,7 +542,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) break; if (strval[0] != '/') { - error = EINVAL; + error = SET_ERROR(EINVAL); break; } @@ -462,17 +551,31 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || strcmp(slash, "/..") == 0) - error = EINVAL; + error = SET_ERROR(EINVAL); + break; + + case ZPOOL_PROP_COMMENT: + if ((error = nvpair_value_string(elem, &strval)) != 0) + break; + for (check = strval; *check != '\0'; check++) { + if (!isprint(*check)) { + error = SET_ERROR(EINVAL); + break; + } + check++; + } + if (strlen(strval) > ZPROP_MAX_COMMENT) + error = SET_ERROR(E2BIG); break; case ZPOOL_PROP_DEDUPDITTO: if (spa_version(spa) < SPA_VERSION_DEDUP) - error = ENOTSUP; + error = SET_ERROR(ENOTSUP); else error = nvpair_value_uint64(elem, &intval); if (error == 0 && intval != 0 && intval < ZIO_DEDUPDITTO_MIN) - error = EINVAL; + error = SET_ERROR(EINVAL); break; default: @@ -507,7 +610,7 @@ spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) return; dp = kmem_alloc(sizeof (spa_config_dirent_t), - KM_SLEEP); + KM_PUSHPAGE); if (cachefile[0] == '\0') dp->scd_path = spa_strdup(spa_config_path); @@ -525,33 +628,58 @@ int spa_prop_set(spa_t *spa, nvlist_t *nvp) { int error; - nvpair_t *elem; + nvpair_t *elem = NULL; boolean_t need_sync = B_FALSE; - zpool_prop_t prop; if ((error = spa_prop_validate(spa, nvp)) != 0) return (error); - elem = NULL; while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { - if ((prop = zpool_name_to_prop( - nvpair_name(elem))) == ZPROP_INVAL) - return (EINVAL); + zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT || prop == ZPOOL_PROP_READONLY) continue; + if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { + uint64_t ver; + + if (prop == ZPOOL_PROP_VERSION) { + VERIFY(nvpair_value_uint64(elem, &ver) == 0); + } else { + ASSERT(zpool_prop_feature(nvpair_name(elem))); + ver = SPA_VERSION_FEATURES; + need_sync = B_TRUE; + } + + /* Save time if the version is already set. */ + if (ver == spa_version(spa)) + continue; + + /* + * In addition to the pool directory object, we might + * create the pool properties object, the features for + * read object, the features for write object, or the + * feature descriptions object. + */ + error = dsl_sync_task(spa->spa_name, NULL, + spa_sync_version, &ver, 6); + if (error) + return (error); + continue; + } + need_sync = B_TRUE; break; } - if (need_sync) - return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, - spa, nvp, 3)); - else - return (0); + if (need_sync) { + return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, + nvp, 6)); + } + + return (0); } /* @@ -568,6 +696,80 @@ spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) } } +/*ARGSUSED*/ +static int +spa_change_guid_check(void *arg, dmu_tx_t *tx) +{ + spa_t *spa = dmu_tx_pool(tx)->dp_spa; + vdev_t *rvd = spa->spa_root_vdev; + uint64_t vdev_state; + ASSERTV(uint64_t *newguid = arg); + + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + vdev_state = rvd->vdev_state; + spa_config_exit(spa, SCL_STATE, FTAG); + + if (vdev_state != VDEV_STATE_HEALTHY) + return (SET_ERROR(ENXIO)); + + ASSERT3U(spa_guid(spa), !=, *newguid); + + return (0); +} + +static void +spa_change_guid_sync(void *arg, dmu_tx_t *tx) +{ + uint64_t *newguid = arg; + spa_t *spa = dmu_tx_pool(tx)->dp_spa; + uint64_t oldguid; + vdev_t *rvd = spa->spa_root_vdev; + + oldguid = spa_guid(spa); + + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + rvd->vdev_guid = *newguid; + rvd->vdev_guid_sum += (*newguid - oldguid); + vdev_config_dirty(rvd); + spa_config_exit(spa, SCL_STATE, FTAG); + + spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", + oldguid, *newguid); +} + +/* + * Change the GUID for the pool. This is done so that we can later + * re-import a pool built from a clone of our own vdevs. We will modify + * the root vdev's guid, our own pool guid, and then mark all of our + * vdevs dirty. Note that we must make sure that all our vdevs are + * online when we do this, or else any vdevs that weren't present + * would be orphaned from our pool. We are also going to issue a + * sysevent to update any watchers. + */ +int +spa_change_guid(spa_t *spa) +{ + int error; + uint64_t guid; + + mutex_enter(&spa->spa_vdev_top_lock); + mutex_enter(&spa_namespace_lock); + guid = spa_generate_guid(NULL); + + error = dsl_sync_task(spa->spa_name, spa_change_guid_check, + spa_change_guid_sync, &guid, 5); + + if (error == 0) { + spa_config_sync(spa, B_FALSE, B_TRUE); + spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_REGUID); + } + + mutex_exit(&spa_namespace_lock); + mutex_exit(&spa->spa_vdev_top_lock); + + return (error); +} + /* * ========================================================================== * SPA state manipulation (open/create/destroy/import/export) @@ -612,47 +814,151 @@ spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) offsetof(spa_error_entry_t, se_avl)); } -static taskq_t * -spa_taskq_create(spa_t *spa, const char *name, enum zti_modes mode, - uint_t value, uint_t flags) +static void +spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) { + const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; + enum zti_modes mode = ztip->zti_mode; + uint_t value = ztip->zti_value; + uint_t count = ztip->zti_count; + spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; + char name[32]; + uint_t i, flags = 0; boolean_t batch = B_FALSE; - switch (mode) { - case zti_mode_null: - return (NULL); /* no taskq needed */ + if (mode == ZTI_MODE_NULL) { + tqs->stqs_count = 0; + tqs->stqs_taskq = NULL; + return; + } + + ASSERT3U(count, >, 0); + + tqs->stqs_count = count; + tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); - case zti_mode_fixed: + switch (mode) { + case ZTI_MODE_FIXED: ASSERT3U(value, >=, 1); value = MAX(value, 1); break; - case zti_mode_batch: + case ZTI_MODE_BATCH: batch = B_TRUE; flags |= TASKQ_THREADS_CPU_PCT; value = zio_taskq_batch_pct; break; - case zti_mode_online_percent: - flags |= TASKQ_THREADS_CPU_PCT; - break; - default: - panic("unrecognized mode for %s taskq (%u:%u) in " + panic("unrecognized mode for %s_%s taskq (%u:%u) in " "spa_activate()", - name, mode, value); + zio_type_name[t], zio_taskq_types[q], mode, value); break; } - if (zio_taskq_sysdc && spa->spa_proc != &p0) { - if (batch) - flags |= TASKQ_DC_BATCH; + for (i = 0; i < count; i++) { + taskq_t *tq; + + if (count > 1) { + (void) snprintf(name, sizeof (name), "%s_%s_%u", + zio_type_name[t], zio_taskq_types[q], i); + } else { + (void) snprintf(name, sizeof (name), "%s_%s", + zio_type_name[t], zio_taskq_types[q]); + } + + if (zio_taskq_sysdc && spa->spa_proc != &p0) { + if (batch) + flags |= TASKQ_DC_BATCH; + + tq = taskq_create_sysdc(name, value, 50, INT_MAX, + spa->spa_proc, zio_taskq_basedc, flags); + } else { + pri_t pri = maxclsyspri; + /* + * The write issue taskq can be extremely CPU + * intensive. Run it at slightly lower priority + * than the other taskqs. + */ + if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) + pri--; + + tq = taskq_create_proc(name, value, pri, 50, + INT_MAX, spa->spa_proc, flags); + } + + tqs->stqs_taskq[i] = tq; + } +} + +static void +spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) +{ + spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; + uint_t i; + + if (tqs->stqs_taskq == NULL) { + ASSERT3U(tqs->stqs_count, ==, 0); + return; + } + + for (i = 0; i < tqs->stqs_count; i++) { + ASSERT3P(tqs->stqs_taskq[i], !=, NULL); + taskq_destroy(tqs->stqs_taskq[i]); + } + + kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); + tqs->stqs_taskq = NULL; +} + +/* + * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. + * Note that a type may have multiple discrete taskqs to avoid lock contention + * on the taskq itself. In that case we choose which taskq at random by using + * the low bits of gethrtime(). + */ +void +spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, + task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) +{ + spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; + taskq_t *tq; + + ASSERT3P(tqs->stqs_taskq, !=, NULL); + ASSERT3U(tqs->stqs_count, !=, 0); + + if (tqs->stqs_count == 1) { + tq = tqs->stqs_taskq[0]; + } else { + tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; + } + + taskq_dispatch_ent(tq, func, arg, flags, ent); +} + +/* + * Same as spa_taskq_dispatch_ent() but block on the task until completion. + */ +void +spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q, + task_func_t *func, void *arg, uint_t flags) +{ + spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; + taskq_t *tq; + taskqid_t id; + + ASSERT3P(tqs->stqs_taskq, !=, NULL); + ASSERT3U(tqs->stqs_count, !=, 0); - return (taskq_create_sysdc(name, value, 50, INT_MAX, - spa->spa_proc, zio_taskq_basedc, flags)); + if (tqs->stqs_count == 1) { + tq = tqs->stqs_taskq[0]; + } else { + tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; } - return (taskq_create_proc(name, value, maxclsyspri, 50, INT_MAX, - spa->spa_proc, flags)); + + id = taskq_dispatch(tq, func, arg, flags); + if (id) + taskq_wait_id(tq, id); } static void @@ -662,20 +968,7 @@ spa_create_zio_taskqs(spa_t *spa) for (t = 0; t < ZIO_TYPES; t++) { for (q = 0; q < ZIO_TASKQ_TYPES; q++) { - const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; - enum zti_modes mode = ztip->zti_mode; - uint_t value = ztip->zti_value; - uint_t flags = TASKQ_PREPOPULATE; - char name[32]; - - if (t == ZIO_TYPE_WRITE) - flags |= TASKQ_NORECLAIM; - - (void) snprintf(name, sizeof (name), - "%s_%s", zio_type_name[t], zio_taskq_types[q]); - - spa->spa_zio_taskq[t][q] = - spa_taskq_create(spa, name, mode, value, flags); + spa_taskqs_init(spa, t, q); } } } @@ -834,11 +1127,11 @@ spa_deactivate(spa_t *spa) list_destroy(&spa->spa_config_dirty_list); list_destroy(&spa->spa_state_dirty_list); + taskq_cancel_id(system_taskq, spa->spa_deadman_tqid); + for (t = 0; t < ZIO_TYPES; t++) { for (q = 0; q < ZIO_TASKQ_TYPES; q++) { - if (spa->spa_zio_taskq[t][q] != NULL) - taskq_destroy(spa->spa_zio_taskq[t][q]); - spa->spa_zio_taskq[t][q] = NULL; + spa_taskqs_fini(spa, t, q); } } @@ -915,7 +1208,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, if (error) { vdev_free(*vdp); *vdp = NULL; - return (EINVAL); + return (SET_ERROR(EINVAL)); } for (c = 0; c < children; c++) { @@ -1004,8 +1297,10 @@ spa_unload(spa_t *spa) } spa->spa_spares.sav_count = 0; - for (i = 0; i < spa->spa_l2cache.sav_count; i++) + for (i = 0; i < spa->spa_l2cache.sav_count; i++) { + vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); vdev_free(spa->spa_l2cache.sav_vdevs[i]); + } if (spa->spa_l2cache.sav_vdevs) { kmem_free(spa->spa_l2cache.sav_vdevs, spa->spa_l2cache.sav_count * sizeof (void *)); @@ -1019,6 +1314,11 @@ spa_unload(spa_t *spa) spa->spa_async_suspended = 0; + if (spa->spa_comment != NULL) { + spa_strfree(spa->spa_comment); + spa->spa_comment = NULL; + } + spa_config_exit(spa, SCL_ALL, FTAG); } @@ -1078,7 +1378,7 @@ spa_load_spares(spa_t *spa) * active configuration, then we also mark this vdev as an active spare. */ spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), - KM_SLEEP); + KM_PUSHPAGE); for (i = 0; i < spa->spa_spares.sav_count; i++) { VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, VDEV_ALLOC_SPARE) == 0); @@ -1126,7 +1426,7 @@ spa_load_spares(spa_t *spa) DATA_TYPE_NVLIST_ARRAY) == 0); spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), - KM_SLEEP); + KM_PUSHPAGE); for (i = 0; i < spa->spa_spares.sav_count; i++) spares[i] = vdev_config_generate(spa, spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); @@ -1152,7 +1452,7 @@ spa_load_l2cache(spa_t *spa) uint_t nl2cache; int i, j, oldnvdevs; uint64_t guid; - vdev_t *vd, **oldvdevs, **newvdevs = NULL; + vdev_t *vd, **oldvdevs, **newvdevs; spa_aux_vdev_t *sav = &spa->spa_l2cache; ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); @@ -1160,9 +1460,10 @@ spa_load_l2cache(spa_t *spa) if (sav->sav_config != NULL) { VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); - newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); + newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_PUSHPAGE); } else { nl2cache = 0; + newvdevs = NULL; } oldvdevs = sav->sav_vdevs; @@ -1228,11 +1529,13 @@ spa_load_l2cache(spa_t *spa) vd = oldvdevs[i]; if (vd != NULL) { + ASSERT(vd->vdev_isl2cache); + if (spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && l2arc_vdev_present(vd)) l2arc_remove_vdev(vd); - (void) vdev_close(vd); - spa_l2cache_remove(vd); + vdev_clear_stats(vd); + vdev_free(vd); } } @@ -1252,7 +1555,7 @@ spa_load_l2cache(spa_t *spa) VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); - l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); + l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE); for (i = 0; i < sav->sav_count; i++) l2cache[i] = vdev_config_generate(spa, sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); @@ -1274,11 +1577,14 @@ load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) int error; *value = NULL; - VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); + error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); + if (error) + return (error); + nvsize = *(uint64_t *)db->db_data; dmu_buf_rele(db, FTAG); - packed = kmem_alloc(nvsize, KM_SLEEP | KM_NODEBUG); + packed = kmem_alloc(nvsize, KM_PUSHPAGE | KM_NODEBUG); error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, DMU_READ_PREFETCH); if (error == 0) @@ -1300,7 +1606,8 @@ spa_check_removed(vdev_t *vd) for (c = 0; c < vd->vdev_children; c++) spa_check_removed(vd->vdev_child[c]); - if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { + if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && + !vd->vdev_ishole) { zfs_ereport_post(FM_EREPORT_RESOURCE_AUTOREPLACE, vd->vdev_spa, vd, NULL, 0, 0); spa_event_notify(vd->vdev_spa, vd, FM_EREPORT_ZFS_DEVICE_CHECK); @@ -1334,8 +1641,8 @@ spa_config_valid(spa_t *spa, nvlist_t *config) uint64_t idx = 0; child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), - KM_SLEEP); - VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); + KM_PUSHPAGE); + VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); for (c = 0; c < rvd->vdev_children; c++) { vdev_t *tvd = rvd->vdev_child[c]; @@ -1433,23 +1740,24 @@ spa_config_valid(spa_t *spa, nvlist_t *config) /* * Check for missing log devices */ -static int +static boolean_t spa_check_logs(spa_t *spa) { + boolean_t rv = B_FALSE; + switch (spa->spa_log_state) { default: break; case SPA_LOG_MISSING: /* need to recheck in case slog has been restored */ case SPA_LOG_UNKNOWN: - if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, - DS_FIND_CHILDREN)) { + rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain, + NULL, DS_FIND_CHILDREN) != 0); + if (rv) spa_set_log_state(spa, SPA_LOG_MISSING); - return (1); - } break; } - return (0); + return (rv); } static boolean_t @@ -1497,11 +1805,11 @@ spa_activate_log(spa_t *spa) int spa_offline_log(spa_t *spa) { - int error = 0; - - if ((error = dmu_objset_find(spa_name(spa), zil_vdev_offline, - NULL, DS_FIND_CHILDREN)) == 0) { + int error; + error = dmu_objset_find(spa_name(spa), zil_vdev_offline, + NULL, DS_FIND_CHILDREN); + if (error == 0) { /* * We successfully offlined the log device, sync out the * current txg so that the "stubby" block can be removed @@ -1549,7 +1857,7 @@ spa_load_verify_done(zio_t *zio) int error = zio->io_error; if (error) { - if ((BP_GET_LEVEL(bp) != 0 || dmu_ot[type].ot_metadata) && + if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && type != DMU_OT_INTENT_LOG) atomic_add_64(&sle->sle_meta_count, 1); else @@ -1561,7 +1869,7 @@ spa_load_verify_done(zio_t *zio) /*ARGSUSED*/ static int spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, - arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) + const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) { if (bp != NULL) { zio_t *rio = arg; @@ -1622,7 +1930,7 @@ spa_load_verify(spa_t *spa) if (error) { if (error != ENXIO && error != EIO) - error = EIO; + error = SET_ERROR(EIO); return (error); } @@ -1690,7 +1998,7 @@ spa_try_repair(spa_t *spa, nvlist_t *config) &glist, &gcount) != 0) return; - vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); + vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_PUSHPAGE); /* attempt to online all the vdevs & validate */ attempt_reopen = B_TRUE; @@ -1744,12 +2052,17 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, { nvlist_t *config = spa->spa_config; char *ereport = FM_EREPORT_ZFS_POOL; + char *comment; int error; uint64_t pool_guid; nvlist_t *nvl; if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) - return (EINVAL); + return (SET_ERROR(EINVAL)); + + ASSERT(spa->spa_comment == NULL); + if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) + spa->spa_comment = spa_strdup(comment); /* * Versioning wasn't explicitly added to the label until later, so if @@ -1764,16 +2077,19 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && spa_guid_exists(pool_guid, 0)) { - error = EEXIST; + error = SET_ERROR(EEXIST); } else { - spa->spa_load_guid = pool_guid; + spa->spa_config_guid = pool_guid; if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) { VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, - KM_SLEEP) == 0); + KM_PUSHPAGE) == 0); } + nvlist_free(spa->spa_load_info); + spa->spa_load_info = fnvlist_alloc(); + gethrestime(&spa->spa_loaded_ts); error = spa_load_impl(spa, pool_guid, config, state, type, mosconfig, &ereport); @@ -1807,12 +2123,14 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, { int error = 0; nvlist_t *nvroot = NULL; + nvlist_t *label; vdev_t *rvd; uberblock_t *ub = &spa->spa_uberblock; uint64_t children, config_cache_txg = spa->spa_config_txg; int orig_mode = spa->spa_mode; int parse; uint64_t obj; + boolean_t missing_feat_write = B_FALSE; /* * If this is an untrusted config, access the pool in read-only mode. @@ -1826,7 +2144,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, spa->spa_load_state = state; if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) - return (EINVAL); + return (SET_ERROR(EINVAL)); parse = (type == SPA_IMPORT_EXISTING ? VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); @@ -1879,32 +2197,92 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, */ if (type != SPA_IMPORT_ASSEMBLE) { spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); - error = vdev_validate(rvd); + error = vdev_validate(rvd, mosconfig); spa_config_exit(spa, SCL_ALL, FTAG); if (error != 0) return (error); if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) - return (ENXIO); + return (SET_ERROR(ENXIO)); } /* * Find the best uberblock. */ - vdev_uberblock_load(NULL, rvd, ub); + vdev_uberblock_load(rvd, ub, &label); /* * If we weren't able to find a single valid uberblock, return failure. */ - if (ub->ub_txg == 0) + if (ub->ub_txg == 0) { + nvlist_free(label); return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); + } /* - * If the pool is newer than the code, we can't open it. + * If the pool has an unsupported version we can't open it. */ - if (ub->ub_version > SPA_VERSION) + if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { + nvlist_free(label); return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); + } + + if (ub->ub_version >= SPA_VERSION_FEATURES) { + nvlist_t *features; + + /* + * If we weren't able to find what's necessary for reading the + * MOS in the label, return failure. + */ + if (label == NULL || nvlist_lookup_nvlist(label, + ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { + nvlist_free(label); + return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, + ENXIO)); + } + + /* + * Update our in-core representation with the definitive values + * from the label. + */ + nvlist_free(spa->spa_label_features); + VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); + } + + nvlist_free(label); + + /* + * Look through entries in the label nvlist's features_for_read. If + * there is a feature listed there which we don't understand then we + * cannot open a pool. + */ + if (ub->ub_version >= SPA_VERSION_FEATURES) { + nvlist_t *unsup_feat; + nvpair_t *nvp; + + VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == + 0); + + for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL); + nvp != NULL; + nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { + if (!zfeature_is_supported(nvpair_name(nvp))) { + VERIFY(nvlist_add_string(unsup_feat, + nvpair_name(nvp), "") == 0); + } + } + + if (!nvlist_empty(unsup_feat)) { + VERIFY(nvlist_add_nvlist(spa->spa_load_info, + ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); + nvlist_free(unsup_feat); + return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, + ENOTSUP)); + } + + nvlist_free(unsup_feat); + } /* * If the vdev guid sum doesn't match the uberblock, we have an @@ -1938,7 +2316,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, spa->spa_claim_max_txg = spa->spa_first_txg; spa->spa_prev_software_version = ub->ub_software_version; - error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); + error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); if (error) return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; @@ -1946,6 +2324,89 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); + if (spa_version(spa) >= SPA_VERSION_FEATURES) { + boolean_t missing_feat_read = B_FALSE; + nvlist_t *unsup_feat, *enabled_feat; + + if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, + &spa->spa_feat_for_read_obj) != 0) { + return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); + } + + if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, + &spa->spa_feat_for_write_obj) != 0) { + return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); + } + + if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, + &spa->spa_feat_desc_obj) != 0) { + return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); + } + + enabled_feat = fnvlist_alloc(); + unsup_feat = fnvlist_alloc(); + + if (!feature_is_supported(spa->spa_meta_objset, + spa->spa_feat_for_read_obj, spa->spa_feat_desc_obj, + unsup_feat, enabled_feat)) + missing_feat_read = B_TRUE; + + if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { + if (!feature_is_supported(spa->spa_meta_objset, + spa->spa_feat_for_write_obj, spa->spa_feat_desc_obj, + unsup_feat, enabled_feat)) { + missing_feat_write = B_TRUE; + } + } + + fnvlist_add_nvlist(spa->spa_load_info, + ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); + + if (!nvlist_empty(unsup_feat)) { + fnvlist_add_nvlist(spa->spa_load_info, + ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); + } + + fnvlist_free(enabled_feat); + fnvlist_free(unsup_feat); + + if (!missing_feat_read) { + fnvlist_add_boolean(spa->spa_load_info, + ZPOOL_CONFIG_CAN_RDONLY); + } + + /* + * If the state is SPA_LOAD_TRYIMPORT, our objective is + * twofold: to determine whether the pool is available for + * import in read-write mode and (if it is not) whether the + * pool is available for import in read-only mode. If the pool + * is available for import in read-write mode, it is displayed + * as available in userland; if it is not available for import + * in read-only mode, it is displayed as unavailable in + * userland. If the pool is available for import in read-only + * mode but not read-write mode, it is displayed as unavailable + * in userland with a special note that the pool is actually + * available for open in read-only mode. + * + * As a result, if the state is SPA_LOAD_TRYIMPORT and we are + * missing a feature for write, we must first determine whether + * the pool can be opened read-only before returning to + * userland in order to know whether to display the + * abovementioned note. + */ + if (missing_feat_read || (missing_feat_write && + spa_writeable(spa))) { + return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, + ENOTSUP)); + } + } + + spa->spa_is_initializing = B_TRUE; + error = dsl_pool_open(spa->spa_dsl_pool); + spa->spa_is_initializing = B_FALSE; + if (error != 0) + return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); + if (!mosconfig) { uint64_t hostid; nvlist_t *policy = NULL, *nvconfig; @@ -1974,12 +2435,12 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, hostid != myhostid) { nvlist_free(nvconfig); cmn_err(CE_WARN, "pool '%s' could not be " - "loaded as it was last accessed by " - "another system (host: %s hostid: 0x%lx). " - "See: http://zfsonlinux.org/msg/ZFS-8000-EY", + "loaded as it was last accessed by another " + "system (host: %s hostid: 0x%lx). See: " + "http://zfsonlinux.org/msg/ZFS-8000-EY", spa_name(spa), hostname, (unsigned long)hostid); - return (EBADF); + return (SET_ERROR(EBADF)); } } if (nvlist_lookup_nvlist(spa->spa_config, @@ -2163,12 +2624,12 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, nvlist_free(nvconfig); /* - * Now that we've validate the config, check the state of the + * Now that we've validated the config, check the state of the * root vdev. If it can't be opened, it indicates one or * more toplevel vdevs are faulted. */ if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) - return (ENXIO); + return (SET_ERROR(ENXIO)); if (spa_check_logs(spa)) { *ereport = FM_EREPORT_ZFS_LOG_REPLAY; @@ -2176,6 +2637,17 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, } } + if (missing_feat_write) { + ASSERT(state == SPA_LOAD_TRYIMPORT); + + /* + * At this point, we know that we can open the pool in + * read-only mode but not read-write mode. We now have enough + * information and can return to userland. + */ + return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); + } + /* * We've successfully opened the pool, verify that we're ready * to start pushing transactions. @@ -2255,6 +2727,12 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, vdev_resilver_needed(rvd, NULL, NULL)) spa_async_request(spa, SPA_ASYNC_RESILVER); + /* + * Log the fact that we booted up (so that we can detect if + * we rebooted in the middle of an operation). + */ + spa_history_log_version(spa, "open"); + /* * Delete any inconsistent datasets. */ @@ -2286,10 +2764,18 @@ spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); } +/* + * If spa_load() fails this function will try loading prior txg's. If + * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool + * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this + * function will not rewind the pool and will return the same error as + * spa_load(). + */ static int spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, uint64_t max_request, int rewind_flags) { + nvlist_t *loadinfo = NULL; nvlist_t *config = NULL; int load_error, rewind_error; uint64_t safe_rewind_txg; @@ -2318,9 +2804,18 @@ spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, return (load_error); } - /* Price of rolling back is discarding txgs, including log */ - if (state == SPA_LOAD_RECOVER) + if (state == SPA_LOAD_RECOVER) { + /* Price of rolling back is discarding txgs, including log */ spa_set_log_state(spa, SPA_LOG_CLEAR); + } else { + /* + * If we aren't rolling back save the load info from our first + * import attempt so that we can restore it after attempting + * to rewind. + */ + loadinfo = spa->spa_load_info; + spa->spa_load_info = fnvlist_alloc(); + } spa->spa_load_max_txg = spa->spa_last_ubsync_txg; safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; @@ -2344,7 +2839,20 @@ spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, if (config && (rewind_error || state != SPA_LOAD_RECOVER)) spa_config_set(spa, config); - return (state == SPA_LOAD_RECOVER ? rewind_error : load_error); + if (state == SPA_LOAD_RECOVER) { + ASSERT3P(loadinfo, ==, NULL); + return (rewind_error); + } else { + /* Store the rewind info as part of the initial load info */ + fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, + spa->spa_load_info); + + /* Restore the initial load info */ + fnvlist_free(spa->spa_load_info); + spa->spa_load_info = loadinfo; + + return (load_error); + } } /* @@ -2367,6 +2875,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, spa_load_state_t state = SPA_LOAD_OPEN; int error; int locked = B_FALSE; + int firstopen = B_FALSE; *spapp = NULL; @@ -2384,12 +2893,14 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, if ((spa = spa_lookup(pool)) == NULL) { if (locked) mutex_exit(&spa_namespace_lock); - return (ENOENT); + return (SET_ERROR(ENOENT)); } if (spa->spa_state == POOL_STATE_UNINITIALIZED) { zpool_rewind_policy_t policy; + firstopen = B_TRUE; + zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, &policy); if (policy.zrp_request & ZPOOL_DO_REWIND) @@ -2417,7 +2928,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, spa_remove(spa); if (locked) mutex_exit(&spa_namespace_lock); - return (ENOENT); + return (SET_ERROR(ENOENT)); } if (error) { @@ -2428,7 +2939,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, */ if (config != NULL && spa->spa_config) { VERIFY(nvlist_dup(spa->spa_config, config, - KM_SLEEP) == 0); + KM_PUSHPAGE) == 0); VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info) == 0); @@ -2464,6 +2975,11 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, mutex_exit(&spa_namespace_lock); } +#ifdef _KERNEL + if (firstopen) + zvol_create_minors(spa->spa_name); +#endif + *spapp = spa; return (0); @@ -2614,8 +3130,50 @@ spa_add_l2cache(spa_t *spa, nvlist_t *config) } } +static void +spa_add_feature_stats(spa_t *spa, nvlist_t *config) +{ + nvlist_t *features; + zap_cursor_t zc; + zap_attribute_t za; + + ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); + VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + if (spa->spa_feat_for_read_obj != 0) { + for (zap_cursor_init(&zc, spa->spa_meta_objset, + spa->spa_feat_for_read_obj); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + ASSERT(za.za_integer_length == sizeof (uint64_t) && + za.za_num_integers == 1); + VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, + za.za_first_integer)); + } + zap_cursor_fini(&zc); + } + + if (spa->spa_feat_for_write_obj != 0) { + for (zap_cursor_init(&zc, spa->spa_meta_objset, + spa->spa_feat_for_write_obj); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + ASSERT(za.za_integer_length == sizeof (uint64_t) && + za.za_num_integers == 1); + VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, + za.za_first_integer)); + } + zap_cursor_fini(&zc); + } + + VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, + features) == 0); + nvlist_free(features); +} + int -spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) +spa_get_stats(const char *name, nvlist_t **config, + char *altroot, size_t buflen) { int error; spa_t *spa; @@ -2650,6 +3208,7 @@ spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) spa_add_spares(spa, *config); spa_add_l2cache(spa, *config); + spa_add_feature_stats(spa, *config); } } @@ -2705,14 +3264,14 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, return (0); if (ndev == 0) - return (EINVAL); + return (SET_ERROR(EINVAL)); /* * Make sure the pool is formatted with a version that supports this * device type. */ if (spa_version(spa) < version) - return (ENOTSUP); + return (SET_ERROR(ENOTSUP)); /* * Set the pending device list so we correctly handle device in-use @@ -2728,7 +3287,7 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, if (!vd->vdev_ops->vdev_op_leaf) { vdev_free(vd); - error = EINVAL; + error = SET_ERROR(EINVAL); goto out; } @@ -2739,7 +3298,8 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, #ifdef _KERNEL if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { - error = ENOTBLK; + error = SET_ERROR(ENOTBLK); + vdev_free(vd); goto out; } #endif @@ -2803,13 +3363,13 @@ spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, &olddevs, &oldndevs) == 0); newdevs = kmem_alloc(sizeof (void *) * - (ndevs + oldndevs), KM_SLEEP); + (ndevs + oldndevs), KM_PUSHPAGE); for (i = 0; i < oldndevs; i++) VERIFY(nvlist_dup(olddevs[i], &newdevs[i], - KM_SLEEP) == 0); + KM_PUSHPAGE) == 0); for (i = 0; i < ndevs; i++) VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], - KM_SLEEP) == 0); + KM_PUSHPAGE) == 0); VERIFY(nvlist_remove(sav->sav_config, config, DATA_TYPE_NVLIST_ARRAY) == 0); @@ -2824,7 +3384,7 @@ spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, * Generate a new dev list. */ VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, - KM_SLEEP) == 0); + KM_PUSHPAGE) == 0); VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, devs, ndevs) == 0); } @@ -2849,10 +3409,6 @@ spa_l2cache_drop(spa_t *spa) if (spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && l2arc_vdev_present(vd)) l2arc_remove_vdev(vd); - if (vd->vdev_isl2cache) - spa_l2cache_remove(vd); - vdev_clear_stats(vd); - (void) vdev_close(vd); } } @@ -2861,7 +3417,7 @@ spa_l2cache_drop(spa_t *spa) */ int spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, - const char *history_str, nvlist_t *zplprops) + nvlist_t *zplprops) { spa_t *spa; char *altroot = NULL; @@ -2873,6 +3429,8 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, nvlist_t **spares, **l2cache; uint_t nspares, nl2cache; uint64_t version, obj; + boolean_t has_features; + nvpair_t *elem; int c; /* @@ -2881,7 +3439,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, mutex_enter(&spa_namespace_lock); if (spa_lookup(pool) != NULL) { mutex_exit(&spa_namespace_lock); - return (EEXIST); + return (SET_ERROR(EEXIST)); } /* @@ -2899,10 +3457,18 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, return (error); } - if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), - &version) != 0) + has_features = B_FALSE; + for (elem = nvlist_next_nvpair(props, NULL); + elem != NULL; elem = nvlist_next_nvpair(props, elem)) { + if (zpool_prop_feature(nvpair_name(elem))) + has_features = B_TRUE; + } + + if (has_features || nvlist_lookup_uint64(props, + zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { version = SPA_VERSION; - ASSERT(version <= SPA_VERSION); + } + ASSERT(SPA_VERSION_IS_SUPPORTED(version)); spa->spa_first_txg = txg; spa->spa_uberblock.ub_txg = txg - 1; @@ -2926,7 +3492,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, ASSERT(error != 0 || spa->spa_root_vdev == rvd); if (error == 0 && !zfs_allocatable_devs(nvroot)) - error = EINVAL; + error = SET_ERROR(EINVAL); if (error == 0 && (error = vdev_create(rvd, txg, B_FALSE)) == 0 && @@ -2954,7 +3520,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, - KM_SLEEP) == 0); + KM_PUSHPAGE) == 0); VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, spares, nspares) == 0); spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); @@ -2969,7 +3535,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, - NV_UNIQUE_NAME, KM_SLEEP) == 0); + NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); @@ -2978,8 +3544,10 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, spa->spa_l2cache.sav_sync = B_TRUE; } + spa->spa_is_initializing = B_TRUE; spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); spa->spa_meta_objset = dp->dp_meta_objset; + spa->spa_is_initializing = B_FALSE; /* * Create DDTs (dedup tables). @@ -3003,6 +3571,9 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, cmn_err(CE_PANIC, "failed to add pool config"); } + if (spa_version(spa) >= SPA_VERSION_FEATURES) + spa_feature_create_zap_objects(spa, tx); + if (zap_add(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, sizeof (uint64_t), 1, &version, tx) != 0) { @@ -3051,7 +3622,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, if (props != NULL) { spa_configfile_set(spa, props, B_FALSE); - spa_sync_props(spa, props, tx); + spa_sync_props(props, tx); } dmu_tx_commit(tx); @@ -3067,9 +3638,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, spa_config_sync(spa, B_FALSE, B_TRUE); - if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) - (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); - spa_history_log_version(spa, LOG_POOL_CREATE); + spa_history_log_version(spa, "create"); spa->spa_minref = refcount_count(&spa->spa_refcount); @@ -3107,7 +3676,7 @@ spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) /* * Put this pool's top-level vdevs into a root vdev. */ - VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); @@ -3195,9 +3764,9 @@ spa_import_rootpool(char *devpath, char *devid) } #endif if (config == NULL) { - cmn_err(CE_NOTE, "Can not read the pool label from '%s'", + cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", devpath); - return (EIO); + return (SET_ERROR(EIO)); } VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, @@ -3240,7 +3809,7 @@ spa_import_rootpool(char *devpath, char *devid) if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", (u_longlong_t)guid); - error = ENOENT; + error = SET_ERROR(ENOENT); goto out; } @@ -3252,7 +3821,7 @@ spa_import_rootpool(char *devpath, char *devid) if (avd != bvd) { cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " "try booting from '%s'", avd->vdev_path); - error = EINVAL; + error = SET_ERROR(EINVAL); goto out; } @@ -3266,12 +3835,11 @@ spa_import_rootpool(char *devpath, char *devid) "try booting from '%s'", bvd->vdev_parent-> vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); - error = EINVAL; + error = SET_ERROR(EINVAL); goto out; } error = 0; - spa_history_log_version(spa, LOG_POOL_IMPORT); out: spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); vdev_free(rvd); @@ -3288,7 +3856,7 @@ out: * Import a non-root pool into the system. */ int -spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) +spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) { spa_t *spa; char *altroot = NULL; @@ -3307,7 +3875,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) mutex_enter(&spa_namespace_lock); if (spa_lookup(pool) != NULL) { mutex_exit(&spa_namespace_lock); - return (EEXIST); + return (SET_ERROR(EEXIST)); } /* @@ -3333,7 +3901,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) spa_config_sync(spa, B_FALSE, B_TRUE); mutex_exit(&spa_namespace_lock); - spa_history_log_version(spa, LOG_POOL_IMPORT); + spa_history_log_version(spa, "import"); return (0); } @@ -3418,7 +3986,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); else VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, - NV_UNIQUE_NAME, KM_SLEEP) == 0); + NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, spares, nspares) == 0); spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); @@ -3433,7 +4001,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); else VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, - NV_UNIQUE_NAME, KM_SLEEP) == 0); + NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); @@ -3464,7 +4032,11 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); mutex_exit(&spa_namespace_lock); - spa_history_log_version(spa, LOG_POOL_IMPORT); + spa_history_log_version(spa, "import"); + +#ifdef _KERNEL + zvol_create_minors(pool); +#endif return (0); } @@ -3509,6 +4081,8 @@ spa_tryimport(nvlist_t *tryconfig) state) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, spa->spa_uberblock.ub_timestamp) == 0); + VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, + spa->spa_load_info) == 0); /* * If the bootfs property exists on this pool then we @@ -3516,7 +4090,7 @@ spa_tryimport(nvlist_t *tryconfig) * pools are bootable. */ if ((!error || error == EEXIST) && spa->spa_bootfs) { - char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); + char *tmpname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE); /* * We have to play games with the name since the @@ -3525,7 +4099,9 @@ spa_tryimport(nvlist_t *tryconfig) if (dsl_dsobj_to_dsname(spa_name(spa), spa->spa_bootfs, tmpname) == 0) { char *cp; - char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); + char *dsname; + + dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE); cp = strchr(tmpname, '/'); if (cp == NULL) { @@ -3578,12 +4154,12 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, *oldconfig = NULL; if (!(spa_mode_global & FWRITE)) - return (EROFS); + return (SET_ERROR(EROFS)); mutex_enter(&spa_namespace_lock); if ((spa = spa_lookup(pool)) == NULL) { mutex_exit(&spa_namespace_lock); - return (ENOENT); + return (SET_ERROR(ENOENT)); } /* @@ -3617,7 +4193,7 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, new_state != POOL_STATE_UNINITIALIZED)) { spa_async_resume(spa); mutex_exit(&spa_namespace_lock); - return (EBUSY); + return (SET_ERROR(EBUSY)); } /* @@ -3630,7 +4206,7 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, spa_has_active_shared_spare(spa)) { spa_async_resume(spa); mutex_exit(&spa_namespace_lock); - return (EXDEV); + return (SET_ERROR(EXDEV)); } /* @@ -3826,12 +4402,12 @@ int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) { uint64_t txg, dtl_max_txg; - ASSERTV(vdev_t *rvd = spa->spa_root_vdev;) vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; vdev_ops_t *pvops; char *oldvdpath, *newvdpath; int newvd_isspare; int error; + ASSERTV(vdev_t *rvd = spa->spa_root_vdev); ASSERT(spa_writeable(spa)); @@ -3848,7 +4424,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) pvd = oldvd->vdev_parent; if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, - VDEV_ALLOC_ADD)) != 0) + VDEV_ALLOC_ATTACH)) != 0) return (spa_vdev_exit(spa, NULL, txg, EINVAL)); if (newrootvd->vdev_children != 1) @@ -3930,7 +4506,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { spa_strfree(oldvd->vdev_path); oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, - KM_SLEEP); + KM_PUSHPAGE); (void) sprintf(oldvd->vdev_path, "%s/%s", newvd->vdev_path, "old"); if (oldvd->vdev_devid != NULL) { @@ -3940,7 +4516,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) } /* mark the device being resilvered */ - newvd->vdev_resilvering = B_TRUE; + newvd->vdev_resilver_txg = txg; /* * If the parent is not a mirror, or if we're replacing, insert the new @@ -4001,7 +4577,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) */ (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); - spa_history_log_internal(LOG_POOL_VDEV_ATTACH, spa, NULL, + spa_history_log_internal(spa, "vdev attach", NULL, "%s vdev=%s %s vdev=%s", replacing && newvd_isspare ? "spare in" : replacing ? "replace" : "attach", newvdpath, @@ -4018,6 +4594,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) /* * Detach a device from a mirror or replacing vdev. + * * If 'replace_done' is specified, only detach if the parent * is a replacing vdev. */ @@ -4026,13 +4603,12 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) { uint64_t txg; int error; - ASSERTV(vdev_t *rvd = spa->spa_root_vdev;) vdev_t *vd, *pvd, *cvd, *tvd; boolean_t unspare = B_FALSE; uint64_t unspare_guid = 0; char *vdpath; int c, t; - + ASSERTV(vdev_t *rvd = spa->spa_root_vdev); ASSERT(spa_writeable(spa)); txg = spa_vdev_enter(spa); @@ -4170,7 +4746,6 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) if (pvd->vdev_ops == &vdev_spare_ops) cvd->vdev_unspare = B_FALSE; vdev_remove_parent(cvd); - cvd->vdev_resilvering = B_FALSE; } @@ -4219,7 +4794,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) error = spa_vdev_exit(spa, vd, txg, 0); - spa_history_log_internal(LOG_POOL_VDEV_DETACH, spa, NULL, + spa_history_log_internal(spa, "detach", NULL, "vdev=%s", vdpath); spa_strfree(vdpath); @@ -4325,8 +4900,8 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) return (spa_vdev_exit(spa, NULL, txg, EINVAL)); - vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); - glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); + vml = kmem_zalloc(children * sizeof (vdev_t *), KM_PUSHPAGE); + glist = kmem_zalloc(children * sizeof (uint64_t), KM_PUSHPAGE); /* then, loop over each vdev and validate it */ for (c = 0; c < children; c++) { @@ -4340,7 +4915,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, spa->spa_root_vdev->vdev_child[c]->vdev_islog) { continue; } else { - error = EINVAL; + error = SET_ERROR(EINVAL); break; } } @@ -4348,14 +4923,14 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, /* which disk is going to be split? */ if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, &glist[c]) != 0) { - error = EINVAL; + error = SET_ERROR(EINVAL); break; } /* look it up in the spa */ vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); if (vml[c] == NULL) { - error = ENODEV; + error = SET_ERROR(ENODEV); break; } @@ -4369,12 +4944,12 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, vml[c]->vdev_children != 0 || vml[c]->vdev_state != VDEV_STATE_HEALTHY || c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { - error = EINVAL; + error = SET_ERROR(EINVAL); break; } if (vdev_dtl_required(vml[c])) { - error = EBUSY; + error = SET_ERROR(EBUSY); break; } @@ -4406,7 +4981,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, * Temporarily record the splitting vdevs in the spa config. This * will disappear once the config is regenerated. */ - VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children) == 0); kmem_free(glist, children * sizeof (uint64_t)); @@ -4453,7 +5028,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, /* if that worked, generate a real config for the new pool */ if (newspa->spa_root_vdev != NULL) { VERIFY(nvlist_alloc(&newspa->spa_config_splitting, - NV_UNIQUE_NAME, KM_SLEEP) == 0); + NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, @@ -4488,9 +5063,8 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, if (vml[c] != NULL) { vdev_split(vml[c]); if (error == 0) - spa_history_log_internal(LOG_POOL_VDEV_DETACH, - spa, tx, "vdev=%s", - vml[c]->vdev_path); + spa_history_log_internal(spa, "detach", tx, + "vdev=%s", vml[c]->vdev_path); vdev_free(vml[c]); } } @@ -4505,8 +5079,8 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, zio_handle_panic_injection(spa, FTAG, 3); /* split is complete; log a history record */ - spa_history_log_internal(LOG_POOL_SPLIT, newspa, NULL, - "split new pool %s from pool %s", newname, spa_name(spa)); + spa_history_log_internal(newspa, "split", NULL, + "from pool %s", spa_name(spa)); kmem_free(vml, children * sizeof (vdev_t *)); @@ -4565,12 +5139,12 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, int i, j; if (count > 1) - newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); + newdev = kmem_alloc((count - 1) * sizeof (void *), KM_PUSHPAGE); for (i = 0, j = 0; i < count; i++) { if (dev[i] == dev_to_remove) continue; - VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); + VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_PUSHPAGE) == 0); } VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); @@ -4606,7 +5180,7 @@ spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) if (vd->vdev_stat.vs_alloc != 0) error = spa_offline_log(spa); } else { - error = ENOTSUP; + error = SET_ERROR(ENOTSUP); } if (error) @@ -4616,7 +5190,7 @@ spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) * The evacuation succeeded. Remove any remaining MOS metadata * associated with this vdev, and wait for these changes to sync. */ - ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); + ASSERT0(vd->vdev_stat.vs_alloc); txg = spa_vdev_config_enter(spa); vd->vdev_removing = B_TRUE; vdev_dirty(vd, 0, NULL, txg); @@ -4677,11 +5251,9 @@ spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) * the spa_vdev_config_[enter/exit] functions which allow us to * grab and release the spa_config_lock while still holding the namespace * lock. During each step the configuration is synced out. - */ - -/* - * Remove a device from the pool. Currently, this supports removing only hot - * spares, slogs, and level 2 ARC devices. + * + * Currently, this supports removing only hot spares, slogs, and level 2 ARC + * devices. */ int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) @@ -4715,7 +5287,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) spa_load_spares(spa); spa->spa_spares.sav_sync = B_TRUE; } else { - error = EBUSY; + error = SET_ERROR(EBUSY); } } else if (spa->spa_l2cache.sav_vdevs != NULL && nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, @@ -4775,12 +5347,12 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) /* * Normal vdevs cannot be removed (yet). */ - error = ENOTSUP; + error = SET_ERROR(ENOTSUP); } else { /* * There is no vdev of any kind with the specified guid. */ - error = ENOENT; + error = SET_ERROR(ENOENT); } if (!locked) @@ -4791,7 +5363,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) /* * Find any device that's done replacing, or a vdev marked 'unspare' that's - * current spared, so we can detach it. + * currently spared, so we can detach it. */ static vdev_t * spa_vdev_resilver_done_hunt(vdev_t *vd) @@ -4894,6 +5466,8 @@ spa_vdev_resilver_done(spa_t *spa) ASSERT(pvd->vdev_ops == &vdev_replacing_ops); sguid = ppvd->vdev_child[1]->vdev_guid; } + ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); + spa_config_exit(spa, SCL_ALL, FTAG); if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) return; @@ -4968,7 +5542,7 @@ spa_scan_stop(spa_t *spa) { ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); if (dsl_scan_resilvering(spa->spa_dsl_pool)) - return (EBUSY); + return (SET_ERROR(EBUSY)); return (dsl_scan_cancel(spa->spa_dsl_pool)); } @@ -4978,7 +5552,7 @@ spa_scan(spa_t *spa, pool_scan_func_t func) ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) - return (ENOTSUP); + return (SET_ERROR(ENOTSUP)); /* * If a resilver was requested, but there is no DTL on a @@ -5088,8 +5662,7 @@ spa_async_thread(spa_t *spa) * then log an internal history event. */ if (new_space != old_space) { - spa_history_log_internal(LOG_POOL_VDEV_ONLINE, - spa, NULL, + spa_history_log_internal(spa, "vdev online", NULL, "pool '%s' size: %llu(+%llu)", spa_name(spa), new_space, new_space - old_space); } @@ -5209,6 +5782,31 @@ spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) return (0); } +/* + * Note: this simple function is not inlined to make it easier to dtrace the + * amount of time spent syncing frees. + */ +static void +spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) +{ + zio_t *zio = zio_root(spa, NULL, NULL, 0); + bplist_iterate(bpl, spa_free_sync_cb, zio, tx); + VERIFY(zio_wait(zio) == 0); +} + +/* + * Note: this simple function is not inlined to make it easier to dtrace the + * amount of time spent syncing deferred frees. + */ +static void +spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) +{ + zio_t *zio = zio_root(spa, NULL, NULL, 0); + VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, + spa_free_sync_cb, zio, tx), ==, 0); + VERIFY0(zio_wait(zio)); +} + static void spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) { @@ -5224,11 +5822,11 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) * information. This avoids the dbuf_will_dirty() path and * saves us a pre-read to get data we don't actually care about. */ - bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE); - packed = vmem_alloc(bufsize, KM_SLEEP); + bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); + packed = vmem_alloc(bufsize, KM_PUSHPAGE); VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, - KM_SLEEP) == 0); + KM_PUSHPAGE) == 0); bzero(packed + nvsize, bufsize - nvsize); dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); @@ -5266,11 +5864,11 @@ spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, &sav->sav_object, tx) == 0); } - VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0); if (sav->sav_count == 0) { VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); } else { - list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); + list = kmem_alloc(sav->sav_count*sizeof (void *), KM_PUSHPAGE); for (i = 0; i < sav->sav_count; i++) list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], B_FALSE, VDEV_CONFIG_L2CACHE); @@ -5300,6 +5898,14 @@ spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) config = spa_config_generate(spa, spa->spa_root_vdev, dmu_tx_get_txg(tx), B_FALSE); + /* + * If we're upgrading the spa version then make sure that + * the config object gets updated with the correct version. + */ + if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) + fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, + spa->spa_uberblock.ub_version); + spa_config_exit(spa, SCL_STATE, FTAG); if (spa->spa_config_syncing) @@ -5309,41 +5915,70 @@ spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) spa_sync_nvlist(spa, spa->spa_config_object, config, tx); } +static void +spa_sync_version(void *arg, dmu_tx_t *tx) +{ + uint64_t *versionp = arg; + uint64_t version = *versionp; + spa_t *spa = dmu_tx_pool(tx)->dp_spa; + + /* + * Setting the version is special cased when first creating the pool. + */ + ASSERT(tx->tx_txg != TXG_INITIAL); + + ASSERT(SPA_VERSION_IS_SUPPORTED(version)); + ASSERT(version >= spa_version(spa)); + + spa->spa_uberblock.ub_version = version; + vdev_config_dirty(spa->spa_root_vdev); + spa_history_log_internal(spa, "set", tx, "version=%lld", version); +} + /* * Set zpool properties. */ static void -spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx) +spa_sync_props(void *arg, dmu_tx_t *tx) { - spa_t *spa = arg1; + nvlist_t *nvp = arg; + spa_t *spa = dmu_tx_pool(tx)->dp_spa; objset_t *mos = spa->spa_meta_objset; - nvlist_t *nvp = arg2; - nvpair_t *elem; - uint64_t intval; - char *strval; - zpool_prop_t prop; - const char *propname; - zprop_type_t proptype; + nvpair_t *elem = NULL; mutex_enter(&spa->spa_props_lock); - elem = NULL; while ((elem = nvlist_next_nvpair(nvp, elem))) { - switch (prop = zpool_name_to_prop(nvpair_name(elem))) { + uint64_t intval; + char *strval, *fname; + zpool_prop_t prop; + const char *propname; + zprop_type_t proptype; + zfeature_info_t *feature; + + prop = zpool_name_to_prop(nvpair_name(elem)); + switch ((int)prop) { + case ZPROP_INVAL: + /* + * We checked this earlier in spa_prop_validate(). + */ + ASSERT(zpool_prop_feature(nvpair_name(elem))); + + fname = strchr(nvpair_name(elem), '@') + 1; + VERIFY3U(0, ==, zfeature_lookup_name(fname, &feature)); + + spa_feature_enable(spa, feature, tx); + spa_history_log_internal(spa, "set", tx, + "%s=enabled", nvpair_name(elem)); + break; + case ZPOOL_PROP_VERSION: + VERIFY(nvpair_value_uint64(elem, &intval) == 0); /* - * Only set version for non-zpool-creation cases - * (set/import). spa_create() needs special care - * for version setting. + * The version is synced seperatly before other + * properties and should be correct by now. */ - if (tx->tx_txg != TXG_INITIAL) { - VERIFY(nvpair_value_uint64(elem, - &intval) == 0); - ASSERT(intval <= SPA_VERSION); - ASSERT(intval >= spa_version(spa)); - spa->spa_uberblock.ub_version = intval; - vdev_config_dirty(spa->spa_root_vdev); - } + ASSERT3U(spa_version(spa), >=, intval); break; case ZPOOL_PROP_ALTROOT: @@ -5361,19 +5996,31 @@ spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx) * properties. */ break; + case ZPOOL_PROP_COMMENT: + VERIFY(nvpair_value_string(elem, &strval) == 0); + if (spa->spa_comment != NULL) + spa_strfree(spa->spa_comment); + spa->spa_comment = spa_strdup(strval); + /* + * We need to dirty the configuration on all the vdevs + * so that their labels get updated. It's unnecessary + * to do this for pool creation since the vdev's + * configuratoin has already been dirtied. + */ + if (tx->tx_txg != TXG_INITIAL) + vdev_config_dirty(spa->spa_root_vdev); + spa_history_log_internal(spa, "set", tx, + "%s=%s", nvpair_name(elem), strval); + break; default: /* * Set pool property values in the poolprops mos object. */ if (spa->spa_pool_props_object == 0) { - VERIFY((spa->spa_pool_props_object = - zap_create(mos, DMU_OT_POOL_PROPS, - DMU_OT_NONE, 0, tx)) > 0); - - VERIFY(zap_update(mos, + spa->spa_pool_props_object = + zap_create_link(mos, DMU_OT_POOL_PROPS, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, - 8, 1, &spa->spa_pool_props_object, tx) - == 0); + tx); } /* normalize the property name */ @@ -5386,7 +6033,8 @@ spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx) VERIFY(zap_update(mos, spa->spa_pool_props_object, propname, 1, strlen(strval) + 1, strval, tx) == 0); - + spa_history_log_internal(spa, "set", tx, + "%s=%s", nvpair_name(elem), strval); } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { VERIFY(nvpair_value_uint64(elem, &intval) == 0); @@ -5398,6 +6046,8 @@ spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx) VERIFY(zap_update(mos, spa->spa_pool_props_object, propname, 8, 1, &intval, tx) == 0); + spa_history_log_internal(spa, "set", tx, + "%s=%lld", nvpair_name(elem), intval); } else { ASSERT(0); /* not allowed */ } @@ -5426,13 +6076,6 @@ spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx) } } - /* log internal history if this is not a zpool create */ - if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && - tx->tx_txg != TXG_INITIAL) { - spa_history_log_internal(LOG_POOL_PROPSET, - spa, tx, "%s %lld %s", - nvpair_name(elem), intval, spa_name(spa)); - } } mutex_exit(&spa->spa_props_lock); @@ -5452,6 +6095,8 @@ spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) ASSERT(spa->spa_sync_pass == 1); + rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); + if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { dsl_pool_create_origin(dp, tx); @@ -5472,6 +6117,12 @@ spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) /* Keeping the freedir open increases spa_minref */ spa->spa_minref += 3; } + + if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && + spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { + spa_feature_create_zap_objects(spa, tx); + } + rrw_exit(&dp->dp_config_rwlock, FTAG); } /* @@ -5483,7 +6134,6 @@ spa_sync(spa_t *spa, uint64_t txg) { dsl_pool_t *dp = spa->spa_dsl_pool; objset_t *mos = spa->spa_meta_objset; - bpobj_t *defer_bpo = &spa->spa_deferred_bpobj; bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; vdev_t *rvd = spa->spa_root_vdev; vdev_t *vd; @@ -5527,6 +6177,12 @@ spa_sync(spa_t *spa, uint64_t txg) tx = dmu_tx_create_assigned(dp, txg); + spa->spa_sync_starttime = gethrtime(); + taskq_cancel_id(system_taskq, spa->spa_deadman_tqid); + spa->spa_deadman_tqid = taskq_dispatch_delay(system_taskq, + spa_deadman, spa, TQ_PUSHPAGE, ddi_get_lbolt() + + NSEC_TO_TICK(spa->spa_deadman_synctime)); + /* * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, * set spa_deflate if we have no raid-z vdevs. @@ -5560,10 +6216,7 @@ spa_sync(spa_t *spa, uint64_t txg) !txg_list_empty(&dp->dp_sync_tasks, txg) || ((dsl_scan_active(dp->dp_scan) || txg_sync_waiting(dp)) && !spa_shutting_down(spa))) { - zio_t *zio = zio_root(spa, NULL, NULL, 0); - VERIFY3U(bpobj_iterate(defer_bpo, - spa_free_sync_cb, zio, tx), ==, 0); - VERIFY3U(zio_wait(zio), ==, 0); + spa_sync_deferred_frees(spa, tx); } /* @@ -5580,14 +6233,11 @@ spa_sync(spa_t *spa, uint64_t txg) spa_errlog_sync(spa, txg); dsl_pool_sync(dp, txg); - if (pass <= SYNC_PASS_DEFERRED_FREE) { - zio_t *zio = zio_root(spa, NULL, NULL, 0); - bplist_iterate(free_bpl, spa_free_sync_cb, - zio, tx); - VERIFY(zio_wait(zio) == 0); + if (pass < zfs_sync_pass_deferred_free) { + spa_sync_frees(spa, free_bpl, tx); } else { bplist_iterate(free_bpl, bpobj_enqueue_cb, - defer_bpo, tx); + &spa->spa_deferred_bpobj, tx); } ddt_sync(spa, txg); @@ -5643,6 +6293,9 @@ spa_sync(spa_t *spa, uint64_t txg) rvd->vdev_children, txg, B_TRUE); } + if (error == 0) + spa->spa_last_synced_guid = rvd->vdev_guid; + spa_config_exit(spa, SCL_STATE, FTAG); if (error == 0) @@ -5652,6 +6305,9 @@ spa_sync(spa_t *spa, uint64_t txg) } dmu_tx_commit(tx); + taskq_cancel_id(system_taskq, spa->spa_deadman_tqid); + spa->spa_deadman_tqid = 0; + /* * Clear the dirty config list. */ @@ -5801,7 +6457,7 @@ spa_upgrade(spa_t *spa, uint64_t version) * future version would result in an unopenable pool, this shouldn't be * possible. */ - ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); + ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); ASSERT(version >= spa->spa_uberblock.ub_version); spa->spa_uberblock.ub_version = version;