zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
- if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
+ if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
continue;
switch (za.za_integer_length) {
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
- switch ((int)prop) {
- case ZPROP_INVAL:
+ switch (prop) {
+ case ZPOOL_PROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = SET_ERROR(EINVAL);
break;
prop == ZPOOL_PROP_READONLY)
continue;
- if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
+ if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver;
if (prop == ZPOOL_PROP_VERSION) {
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
+ ASSERTV(uint64_t *newguid = arg);
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
- ASSERTV(uint64_t *newguid = arg);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
char name[32];
- uint_t i, flags = 0;
+ uint_t flags = 0;
boolean_t batch = B_FALSE;
if (mode == ZTI_MODE_NULL) {
break;
}
- for (i = 0; i < count; i++) {
+ for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
if (count > 1) {
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
- uint_t i;
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
- for (i = 0; i < tqs->stqs_count; i++) {
+ for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
- int t, q;
-
- for (t = 0; t < ZIO_TYPES; t++) {
- for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
+ for (int t = 0; t < ZIO_TYPES; t++) {
+ for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
+/*
+ * Disabled until spa_thread() can be adapted for Linux.
+ */
+#undef HAVE_SPA_THREAD
+
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
+ spa_keystore_init(&spa->spa_keystore);
+
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
+ /*
+ * Taskq dedicated to prefetcher threads: this is used to prevent the
+ * pool traverse code from monopolizing the global (and limited)
+ * system_taskq by inappropriately scheduling long running tasks on it.
+ */
+ spa->spa_prefetch_taskq = taskq_create("z_prefetch", boot_ncpus,
+ defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
+
/*
* The taskq to upgrade datasets in this pool. Currently used by
- * feature SPA_FEATURE_USEROBJ_ACCOUNTING.
+ * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
static void
spa_deactivate(spa_t *spa)
{
- int t, q;
-
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
spa->spa_zvol_taskq = NULL;
}
+ if (spa->spa_prefetch_taskq) {
+ taskq_destroy(spa->spa_prefetch_taskq);
+ spa->spa_prefetch_taskq = NULL;
+ }
+
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
- for (t = 0; t < ZIO_TYPES; t++) {
- for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
+ for (int t = 0; t < ZIO_TYPES; t++) {
+ for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
-
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
+ spa_keystore_fini(&spa->spa_keystore);
+
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
nvlist_t **child;
uint_t children;
int error;
- int c;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
return (SET_ERROR(EINVAL));
}
- for (c = 0; c < children; c++) {
+ for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
static void
spa_unload(spa_t *spa)
{
- int i, c;
+ int i;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
*/
if (spa->spa_root_vdev != NULL) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
- for (c = 0; c < spa->spa_root_vdev->vdev_children; c++)
+ for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
spa_config_exit(spa, SCL_ALL, FTAG);
}
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
- for (i = 0; i < max_ncpus; i++)
+ for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
static void
spa_load_l2cache(spa_t *spa)
{
- nvlist_t **l2cache;
+ nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
DATA_TYPE_NVLIST_ARRAY) == 0);
- l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
+ if (sav->sav_count > 0)
+ l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
+ KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
static void
spa_check_removed(vdev_t *vd)
{
- int c;
-
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
static void
spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
{
- uint64_t i;
-
ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
vd->vdev_top_zap = mvd->vdev_top_zap;
vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
- for (i = 0; i < vd->vdev_children; i++) {
+ for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
}
}
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv;
- int c, i;
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
KM_SLEEP);
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
vdev_t *mtvd = mrvd->vdev_child[c];
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
- for (i = 0; i < idx; i++)
+ for (int i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
* from the MOS config (mrvd). Check each top-level vdev
* with the corresponding MOS config top-level (mtvd).
*/
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
vdev_t *mtvd = mrvd->vdev_child[c];
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
- int c;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
if (!spa_has_slogs(spa))
return (B_FALSE);
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
- int c;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
- int i;
-
- for (i = 0; i < sav->sav_count; i++)
+ for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
}
mutex_enter(&spa->spa_scrub_lock);
- spa->spa_scrub_inflight--;
+ spa->spa_load_verify_ios--;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
- zio_t *rio;
- size_t size;
-
if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return (0);
/*
if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
return (0);
- rio = arg;
- size = BP_GET_PSIZE(bp);
+ zio_t *rio = arg;
+ size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
- while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
+ while (spa->spa_load_verify_ios >= spa_load_verify_maxinflight)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
- spa->spa_scrub_inflight++;
+ spa->spa_load_verify_ios++;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
if (spa_load_verify_metadata) {
error = traverse_pool(spa, spa->spa_verify_min_txg,
- TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
- spa_load_verify_cb, rio);
+ TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
+ TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
- zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
+ zfs_ereport_post(ereport, spa, NULL, NULL, NULL, 0, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
- uint64_t i;
if (vd->vdev_top_zap != 0) {
total++;
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
- for (i = 0; i < vd->vdev_children; i++) {
+ for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
* Determine whether the activity check is required.
*/
static boolean_t
-spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *config)
+spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
+ nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
- (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
/*
* Disable the MMP activity check - This is used by zdb which
/*
* Allow the activity check to be skipped when importing the pool
- * on the same host which last imported it.
+ * on the same host which last imported it. Since the hostid from
+ * configuration may be stale use the one read from the label.
*/
+ if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
+ hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
+
if (hostid == spa_get_hostid())
return (B_FALSE);
import_delay = MAX(import_delay, import_intervals *
MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)));
+ zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu import_intervals=%u "
+ "leaves=%u", import_delay, ub->ub_mmp_delay, import_intervals,
+ vdev_count_leaves(spa));
+
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_expire = gethrtime() + import_delay +
(import_delay * spa_get_random(250) / 1000);
uberblock_t *ub = &spa->spa_uberblock;
uint64_t children, config_cache_txg = spa->spa_config_txg;
int orig_mode = spa->spa_mode;
- int parse, i;
+ int parse;
uint64_t obj;
boolean_t missing_feat_write = B_FALSE;
boolean_t activity_check = B_FALSE;
- nvlist_t *mos_config;
/*
* If this is an untrusted config, access the pool in read-only mode.
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
- for (i = 0; i < max_ncpus; i++) {
+ for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
- activity_check = spa_activity_check_required(spa, ub, config);
+ activity_check = spa_activity_check_required(spa, ub, label, config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid() == 0) {
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
- nvpair_t *nvp;
VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
0);
- for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL);
- nvp != NULL;
+ for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
+ NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
VERIFY(nvlist_add_string(unsup_feat,
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
- spa_feature_t i;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj) != 0) {
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
- for (i = 0; i < SPA_FEATURES; i++) {
+ for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
*/
/* The sentinel is only available in the MOS config. */
+ nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
dmu_tx_t *tx;
int need_update = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
- int c;
ASSERT(state != SPA_LOAD_TRYIMPORT);
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
- for (c = 0; c < rvd->vdev_children; c++)
+ for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
- spa_history_log_version(spa, "open");
+ spa_history_log_version(spa, "open", NULL);
/*
* Delete any inconsistent datasets.
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
- if (mutex_owner(&spa_namespace_lock) != curthread) {
+ if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
ZPOOL_CONFIG_ERRCOUNT,
spa_get_errlog_size(spa)) == 0);
- if (spa_suspended(spa))
+ if (spa_suspended(spa)) {
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode) == 0);
+ VERIFY(nvlist_add_uint64(*config,
+ ZPOOL_CONFIG_SUSPENDED_REASON,
+ spa->spa_suspended) == 0);
+ }
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
}
}
+/*
+ * Verify encryption parameters for spa creation. If we are encrypting, we must
+ * have the encryption feature flag enabled.
+ */
+static int
+spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
+ boolean_t has_encryption)
+{
+ if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
+ dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
+ !has_encryption)
+ return (SET_ERROR(ENOTSUP));
+
+ return (dmu_objset_create_crypt_check(NULL, dcp));
+}
+
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
- nvlist_t *zplprops)
+ nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
char *altroot = NULL;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
- uint64_t version, obj;
+ uint64_t version, obj, root_dsobj = 0;
boolean_t has_features;
- nvpair_t *elem;
- int c, i;
+ boolean_t has_encryption;
+ spa_feature_t feat;
+ char *feat_name;
char *poolname;
nvlist_t *nvl;
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
- for (elem = nvlist_next_nvpair(props, NULL);
+ has_encryption = B_FALSE;
+ for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
- if (zpool_prop_feature(nvpair_name(elem)))
+ if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
+
+ feat_name = strchr(nvpair_name(elem), '@') + 1;
+ VERIFY0(zfeature_lookup_name(feat_name, &feat));
+ if (feat == SPA_FEATURE_ENCRYPTION)
+ has_encryption = B_TRUE;
+ }
+ }
+
+ /* verify encryption params, if they were provided */
+ if (dcp != NULL) {
+ error = spa_create_check_encryption_params(dcp, has_encryption);
+ if (error != 0) {
+ spa_deactivate(spa);
+ spa_remove(spa);
+ mutex_exit(&spa_namespace_lock);
+ return (error);
+ }
}
if (has_features || nvlist_lookup_uint64(props,
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
- for (i = 0; i < max_ncpus; i++) {
+ for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg,
VDEV_ALLOC_ADD)) == 0) {
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (int c = 0; c < rvd->vdev_children; c++) {
vdev_metaslab_set_size(rvd->vdev_child[c]);
vdev_expand(rvd->vdev_child[c], txg);
}
}
spa->spa_is_initializing = B_TRUE;
- spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
- spa->spa_meta_objset = dp->dp_meta_objset;
+ spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
tx = dmu_tx_create_assigned(dp, txg);
+ /*
+ * Create the pool's history object.
+ */
+ if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
+ spa_history_create_obj(spa, tx);
+
+ spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
+ spa_history_log_version(spa, "create", tx);
+
/*
* Create the pool config object.
*/
cmn_err(CE_PANIC, "failed to add pool config");
}
- if (spa_version(spa) >= SPA_VERSION_FEATURES)
- spa_feature_create_zap_objects(spa, tx);
-
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
- /*
- * Create the pool's history object.
- */
- if (version >= SPA_VERSION_ZPOOL_HISTORY)
- spa_history_create_obj(spa, tx);
-
/*
* Generate some random noise for salted checksums to operate on.
*/
dmu_tx_commit(tx);
+ /*
+ * If the root dataset is encrypted we will need to create key mappings
+ * for the zio layer before we start to write any data to disk and hold
+ * them until after the first txg has been synced. Waiting for the first
+ * transaction to complete also ensures that our bean counters are
+ * appropriately updated.
+ */
+ if (dp->dp_root_dir->dd_crypto_obj != 0) {
+ root_dsobj = dsl_dir_phys(dp->dp_root_dir)->dd_head_dataset_obj;
+ VERIFY0(spa_keystore_create_mapping_impl(spa, root_dsobj,
+ dp->dp_root_dir, FTAG));
+ }
+
spa->spa_sync_on = B_TRUE;
- txg_sync_start(spa->spa_dsl_pool);
+ txg_sync_start(dp);
mmp_thread_start(spa);
+ txg_wait_synced(dp, txg);
- /*
- * We explicitly wait for the first transaction to complete so that our
- * bean counters are appropriately updated.
- */
- txg_wait_synced(spa->spa_dsl_pool, txg);
+ if (dp->dp_root_dir->dd_crypto_obj != 0)
+ VERIFY0(spa_keystore_remove_mapping(spa, root_dsobj, FTAG));
spa_config_sync(spa, B_FALSE, B_TRUE);
- spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
-
- spa_history_log_version(spa, "create");
/*
* Don't count references from objsets that are already closed
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
- spa_history_log_version(spa, "import");
+ spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
}
export_spa:
- spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
+ if (new_state == POOL_STATE_DESTROYED)
+ spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
+ else if (new_state == POOL_STATE_EXPORTED)
+ spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
- int c;
ASSERT(spa_writeable(spa));
/*
* Transfer each new top-level vdev from vd to rvd.
*/
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
/*
* Set the vdev id to the first hole, if one exists.
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
{
uint64_t txg, dtl_max_txg;
+ ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
- ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
ASSERT(spa_writeable(spa));
{
uint64_t txg;
int error;
+ ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
- int c, t;
- ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
+
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
- for (c = 0; c < pvd->vdev_children; c++) {
+ for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
- for (t = 0; t < TXG_SIZE; t++)
+ for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
static nvlist_t *
spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
{
- int i;
-
- for (i = 0; i < count; i++) {
+ for (int i = 0; i < count; i++) {
uint64_t guid;
VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
nvlist_t *dev_to_remove)
{
nvlist_t **newdev = NULL;
- int i, j;
if (count > 1)
newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
- for (i = 0, j = 0; i < count; i++) {
+ for (int i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
- for (i = 0; i < count - 1; i++)
+ for (int i = 0; i < count - 1; i++)
nvlist_free(newdev[i]);
if (count > 1)
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
- int c;
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
- int c;
-
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_state_dirty(vd->vdev_top);
}
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
- int c;
-
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
- int c;
-
if (!spa->spa_autoexpand)
return;
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
}
static void
-spa_async_thread(spa_t *spa)
+spa_async_thread(void *arg)
{
- int tasks, i;
+ spa_t *spa = (spa_t *)arg;
+ int tasks;
ASSERT(spa->spa_sync_on);
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
- for (i = 0; i < spa->spa_l2cache.sav_count; i++)
+ for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
- for (i = 0; i < spa->spa_spares.sav_count; i++)
+ for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
- uint64_t i;
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
- for (i = 0; i < vd->vdev_children; i++) {
+ for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
- zap_cursor_t zc;
- zap_attribute_t za;
-
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
+ zap_cursor_t zc;
+ zap_attribute_t za;
+
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zprop_type_t proptype;
spa_feature_t fid;
- prop = zpool_name_to_prop(nvpair_name(elem));
- switch ((int)prop) {
- case ZPROP_INVAL:
+ switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
+ case ZPOOL_PROP_INVAL:
/*
* We checked this earlier in spa_prop_validate().
*/
dsl_pool_t *dp = spa->spa_dsl_pool;
objset_t *mos = spa->spa_meta_objset;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
- metaslab_class_t *mc;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd;
dmu_tx_t *tx;
int error;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
- uint64_t queue_depth_total;
- int c;
VERIFY(spa_writeable(spa));
* The max queue depth will not change in the middle of syncing
* out this txg.
*/
- queue_depth_total = 0;
- for (c = 0; c < rvd->vdev_children; c++) {
+ uint64_t queue_depth_total = 0;
+ for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
mg->mg_max_alloc_queue_depth = max_queue_depth;
queue_depth_total += mg->mg_max_alloc_queue_depth;
}
- mc = spa_normal_class(spa);
+ metaslab_class_t *mc = spa_normal_class(spa);
ASSERT0(refcount_count(&mc->mc_alloc_slots));
mc->mc_alloc_max_slots = queue_depth_total;
mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
int children = rvd->vdev_children;
int c0 = spa_get_random(children);
- for (c = 0; c < children; c++) {
+ for (int c = 0; c < children; c++) {
vd = rvd->vdev_child[(c0 + c) % children];
if (vd->vdev_ms_array == 0 || vd->vdev_islog)
continue;
if (error == 0)
break;
- zio_suspend(spa, NULL);
+ zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
dmu_tx_commit(tx);