* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
+ * Copyright (c) 2016 Actifio, Inc. All rights reserved.
*/
/*
#include "zfs_prop.h"
#include "zfs_comutil.h"
+/*
+ * The interval, in seconds, at which failed configuration cache file writes
+ * should be retried.
+ */
+static int zfs_ccw_retry_interval = 300;
+
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
+ if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
+ spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
+ DNODE_MAX_SIZE, ZPROP_SRC_NONE);
+ } else {
+ spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
+ DNODE_MIN_SIZE, ZPROP_SRC_NONE);
+ }
+
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
break;
}
- strval = kmem_alloc(
- MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
+ strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
- kmem_free(strval,
- MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
+ kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
/*
* Must be ZPL, and its property settings
* must be supported by GRUB (compression
- * is not gzip, and large blocks are not used).
+ * is not gzip, and large blocks or large
+ * dnodes are not used).
*/
if (dmu_objset_type(os) != DMU_OST_ZFS) {
&propval)) == 0 &&
propval > SPA_OLD_MAXBLOCKSIZE) {
error = SET_ERROR(ENOTSUP);
+ } else if ((error =
+ dsl_prop_get_int_ds(dmu_objset_ds(os),
+ zfs_prop_to_name(ZFS_PROP_DNODESIZE),
+ &propval)) == 0 &&
+ propval != ZFS_DNSIZE_LEGACY) {
+ error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
error = SET_ERROR(EINVAL);
break;
}
- check++;
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
- value = zio_taskq_batch_pct;
+ value = MIN(zio_taskq_batch_pct, 100);
break;
default:
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
- * intensive. Run it at slightly lower priority
- * than the other taskqs.
+ * intensive. Run it at slightly less important
+ * priority than the other taskqs. Under Linux this
+ * means incrementing the priority value on platforms
+ * like illumos it should be decremented.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
- pri--;
+ pri++;
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
+
+ /*
+ * This taskq is used to perform zvol-minor-related tasks
+ * asynchronously. This has several advantages, including easy
+ * resolution of various deadlocks (zfsonlinux bug #3681).
+ *
+ * The taskq must be single threaded to ensure tasks are always
+ * processed in the order in which they were dispatched.
+ *
+ * A taskq per pool allows one to keep the pools independent.
+ * This way if one pool is suspended, it will not impact another.
+ *
+ * The preferred location to dispatch a zvol minor task is a sync
+ * task. In this context, there is easy access to the spa_t and minimal
+ * error handling is required because the sync task must succeed.
+ */
+ spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
+ 1, INT_MAX, 0);
}
/*
spa_evicting_os_wait(spa);
+ if (spa->spa_zvol_taskq) {
+ taskq_destroy(spa->spa_zvol_taskq);
+ spa->spa_zvol_taskq = NULL;
+ }
+
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
}
}
+static void
+spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
+{
+ uint64_t i;
+
+ ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
+
+ vd->vdev_top_zap = mvd->vdev_top_zap;
+ vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
+
+ for (i = 0; i < vd->vdev_children; i++) {
+ spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
+ }
+}
+
/*
* Validate the current config against the MOS config
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_reopen(rvd);
- } else if (mtvd->vdev_islog) {
+ } else {
+ if (mtvd->vdev_islog) {
+ /*
+ * Load the slog device's state from the MOS
+ * config since it's possible that the label
+ * does not contain the most up-to-date
+ * information.
+ */
+ vdev_load_log_state(tvd, mtvd);
+ vdev_reopen(tvd);
+ }
+
/*
- * Load the slog device's state from the MOS config
- * since it's possible that the label does not
- * contain the most up-to-date information.
+ * Per-vdev ZAP info is stored exclusively in the MOS.
*/
- vdev_load_log_state(tvd, mtvd);
- vdev_reopen(tvd);
+ spa_config_valid_zaps(tvd, mtvd);
}
}
+
vdev_free(mrvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
- atomic_add_64(&sle->sle_meta_count, 1);
+ atomic_inc_64(&sle->sle_meta_count);
else
- atomic_add_64(&sle->sle_data_count, 1);
+ atomic_inc_64(&sle->sle_data_count);
}
zio_data_buf_free(zio->io_data, zio->io_size);
size_t size;
void *data;
- if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
+ if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return (0);
/*
* Note: normally this routine will not be called if
return (0);
}
+/* ARGSUSED */
+int
+verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
+{
+ if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
+ return (SET_ERROR(ENAMETOOLONG));
+
+ return (0);
+}
+
static int
spa_load_verify(spa_t *spa)
{
if (policy.zrp_request & ZPOOL_NEVER_REWIND)
return (0);
+ dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
+ error = dmu_objset_find_dp(spa->spa_dsl_pool,
+ spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
+ DS_FIND_CHILDREN);
+ dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
+ if (error != 0)
+ return (error);
+
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
return (error);
}
+#ifdef ZFS_DEBUG
+/*
+ * Count the number of per-vdev ZAPs associated with all of the vdevs in the
+ * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
+ * spa's per-vdev ZAP list.
+ */
+static uint64_t
+vdev_count_verify_zaps(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+ uint64_t total = 0;
+ uint64_t i;
+
+ if (vd->vdev_top_zap != 0) {
+ total++;
+ ASSERT0(zap_lookup_int(spa->spa_meta_objset,
+ spa->spa_all_vdev_zaps, vd->vdev_top_zap));
+ }
+ if (vd->vdev_leaf_zap != 0) {
+ total++;
+ ASSERT0(zap_lookup_int(spa->spa_meta_objset,
+ spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
+ }
+
+ for (i = 0; i < vd->vdev_children; i++) {
+ total += vdev_count_verify_zaps(vd->vdev_child[i]);
+ }
+
+ return (total);
+}
+#endif
+
/*
* Load an existing storage pool, using the pool's builtin spa_config as a
* source of configuration information.
int parse, i;
uint64_t obj;
boolean_t missing_feat_write = B_FALSE;
+ nvlist_t *mos_config;
/*
* If this is an untrusted config, access the pool in read-only mode.
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ /*
+ * Load the per-vdev ZAP map. If we have an older pool, this will not
+ * be present; in this case, defer its creation to a later time to
+ * avoid dirtying the MOS this early / out of sync context. See
+ * spa_sync_config_object.
+ */
+
+ /* The sentinel is only available in the MOS config. */
+ if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+
+ error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
+ &spa->spa_all_vdev_zaps);
+
+ if (error != ENOENT && error != 0) {
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ } else if (error == 0 && !nvlist_exists(mos_config,
+ ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
+ /*
+ * An older version of ZFS overwrote the sentinel value, so
+ * we have orphaned per-vdev ZAPs in the MOS. Defer their
+ * destruction to later; see spa_sync_config_object.
+ */
+ spa->spa_avz_action = AVZ_ACTION_DESTROY;
+ /*
+ * We're assuming that no vdevs have had their ZAPs created
+ * before this. Better be sure of it.
+ */
+ ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
+ }
+ nvlist_free(mos_config);
+
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
mutex_exit(&spa_namespace_lock);
}
-#ifdef _KERNEL
if (firstopen)
- zvol_create_minors(spa->spa_name);
-#endif
+ zvol_create_minors(spa, spa_name(spa), B_TRUE);
*spapp = spa;
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
vdev_get_stats(vd, vs);
+ vdev_config_generate_stats(vd, l2cache[i]);
+
}
}
}
return (0);
}
-#ifdef _KERNEL
+#if defined(_KERNEL) && !defined(__linux__)
/*
* Get the root pool information from the root disk, then import the root pool
* during the system boot up time.
return (error);
}
-#endif
+#endif /* defined(_KERNEL) && !defined(__linux__) */
/*
* Import a non-root pool into the system.
mutex_exit(&spa_namespace_lock);
spa_history_log_version(spa, "import");
-
-#ifdef _KERNEL
- zvol_create_minors(pool);
-#endif
+ zvol_create_minors(spa, pool, B_TRUE);
return (0);
}
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
+ if (spa->spa_zvol_taskq) {
+ zvol_remove_minors(spa, spa_name(spa), B_TRUE);
+ taskq_wait(spa->spa_zvol_taskq);
+ }
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
vml[c]->vdev_top->vdev_asize) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift) == 0);
+
+ /* transfer per-vdev ZAPs */
+ ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
+ VERIFY0(nvlist_add_uint64(child[c],
+ ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
+
+ ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
+ VERIFY0(nvlist_add_uint64(child[c],
+ ZPOOL_CONFIG_VDEV_TOP_ZAP,
+ vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
spa->spa_config_txg) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL)) == 0);
+ VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
+ newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
+
vdev_free(vml[c]);
}
}
+ spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
mutex_exit(&spa->spa_async_lock);
}
+static boolean_t
+spa_async_tasks_pending(spa_t *spa)
+{
+ uint_t non_config_tasks;
+ uint_t config_task;
+ boolean_t config_task_suspended;
+
+ non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
+ config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
+ if (spa->spa_ccw_fail_time == 0) {
+ config_task_suspended = B_FALSE;
+ } else {
+ config_task_suspended =
+ (gethrtime() - spa->spa_ccw_fail_time) <
+ (zfs_ccw_retry_interval * NANOSEC);
+ }
+
+ return (non_config_tasks || (config_task && !config_task_suspended));
+}
+
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
- if (spa->spa_async_tasks && !spa->spa_async_suspended &&
+ if (spa_async_tasks_pending(spa) &&
+ !spa->spa_async_suspended &&
spa->spa_async_thread == NULL &&
- rootdir != NULL && !vn_is_readonly(rootdir))
+ rootdir != NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
sav->sav_sync = B_FALSE;
}
+/*
+ * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
+ * The all-vdev ZAP must be empty.
+ */
+static void
+spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
+{
+ spa_t *spa = vd->vdev_spa;
+ uint64_t i;
+
+ if (vd->vdev_top_zap != 0) {
+ VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
+ vd->vdev_top_zap, tx));
+ }
+ if (vd->vdev_leaf_zap != 0) {
+ VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
+ vd->vdev_leaf_zap, tx));
+ }
+ for (i = 0; i < vd->vdev_children; i++) {
+ spa_avz_build(vd->vdev_child[i], avz, tx);
+ }
+}
+
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
- if (list_is_empty(&spa->spa_config_dirty_list))
+ /*
+ * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
+ * its config may not be dirty but we still need to build per-vdev ZAPs.
+ * Similarly, if the pool is being assembled (e.g. after a split), we
+ * need to rebuild the AVZ although the config may not be dirty.
+ */
+ if (list_is_empty(&spa->spa_config_dirty_list) &&
+ spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+ ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
+ spa->spa_all_vdev_zaps != 0);
+
+ if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
+ zap_cursor_t zc;
+ zap_attribute_t za;
+
+ /* Make and build the new AVZ */
+ uint64_t new_avz = zap_create(spa->spa_meta_objset,
+ DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
+ spa_avz_build(spa->spa_root_vdev, new_avz, tx);
+
+ /* Diff old AVZ with new one */
+ for (zap_cursor_init(&zc, spa->spa_meta_objset,
+ spa->spa_all_vdev_zaps);
+ zap_cursor_retrieve(&zc, &za) == 0;
+ zap_cursor_advance(&zc)) {
+ uint64_t vdzap = za.za_first_integer;
+ if (zap_lookup_int(spa->spa_meta_objset, new_avz,
+ vdzap) == ENOENT) {
+ /*
+ * ZAP is listed in old AVZ but not in new one;
+ * destroy it
+ */
+ VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
+ tx));
+ }
+ }
+
+ zap_cursor_fini(&zc);
+
+ /* Destroy the old AVZ */
+ VERIFY0(zap_destroy(spa->spa_meta_objset,
+ spa->spa_all_vdev_zaps, tx));
+
+ /* Replace the old AVZ in the dir obj with the new one */
+ VERIFY0(zap_update(spa->spa_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
+ sizeof (new_avz), 1, &new_avz, tx));
+
+ spa->spa_all_vdev_zaps = new_avz;
+ } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
+ zap_cursor_t zc;
+ zap_attribute_t za;
+
+ /* Walk through the AVZ and destroy all listed ZAPs */
+ for (zap_cursor_init(&zc, spa->spa_meta_objset,
+ spa->spa_all_vdev_zaps);
+ zap_cursor_retrieve(&zc, &za) == 0;
+ zap_cursor_advance(&zc)) {
+ uint64_t zap = za.za_first_integer;
+ VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
+ }
+
+ zap_cursor_fini(&zc);
+
+ /* Destroy and unlink the AVZ itself */
+ VERIFY0(zap_destroy(spa->spa_meta_objset,
+ spa->spa_all_vdev_zaps, tx));
+ VERIFY0(zap_remove(spa->spa_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
+ spa->spa_all_vdev_zaps = 0;
+ }
+
+ if (spa->spa_all_vdev_zaps == 0) {
+ spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
+ DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_VDEV_ZAP_MAP, tx);
+ }
+ spa->spa_avz_action = AVZ_ACTION_NONE;
+
+ /* Create ZAPs for vdevs that don't have them. */
+ vdev_construct_zaps(spa->spa_root_vdev, tx);
+
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
spa_config_exit(spa, SCL_STATE, FTAG);
- if (spa->spa_config_syncing)
- nvlist_free(spa->spa_config_syncing);
+ nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
}
- /*
- * If anything has changed in this txg, or if someone is waiting
- * for this txg to sync (eg, spa_vdev_remove()), push the
- * deferred frees from the previous txg. If not, leave them
- * alone so that we don't generate work on an otherwise idle
- * system.
- */
- if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
- !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
- !txg_list_empty(&dp->dp_sync_tasks, txg) ||
- ((dsl_scan_active(dp->dp_scan) ||
- txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
- spa_sync_deferred_frees(spa, tx);
- }
-
/*
* Iterate to convergence.
*/
if (pass < zfs_sync_pass_deferred_free) {
spa_sync_frees(spa, free_bpl, tx);
} else {
+ /*
+ * We can not defer frees in pass 1, because
+ * we sync the deferred frees later in pass 1.
+ */
+ ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_cb,
&spa->spa_deferred_bpobj, tx);
}
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
vdev_sync(vd, txg);
- if (pass == 1)
+ if (pass == 1) {
spa_sync_upgrades(spa, tx);
+ ASSERT3U(txg, >=,
+ spa->spa_uberblock.ub_rootbp.blk_birth);
+ /*
+ * Note: We need to check if the MOS is dirty
+ * because we could have marked the MOS dirty
+ * without updating the uberblock (e.g. if we
+ * have sync tasks but no dirty user data). We
+ * need to check the uberblock's rootbp because
+ * it is updated if we have synced out dirty
+ * data (though in this case the MOS will most
+ * likely also be dirty due to second order
+ * effects, we don't want to rely on that here).
+ */
+ if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
+ !dmu_objset_is_dirty(mos, txg)) {
+ /*
+ * Nothing changed on the first pass,
+ * therefore this TXG is a no-op. Avoid
+ * syncing deferred frees, so that we
+ * can keep this TXG as a no-op.
+ */
+ ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
+ txg));
+ ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
+ ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
+ break;
+ }
+ spa_sync_deferred_frees(spa, tx);
+ }
} while (dmu_objset_is_dirty(mos, txg));
+#ifdef ZFS_DEBUG
+ if (!list_is_empty(&spa->spa_config_dirty_list)) {
+ /*
+ * Make sure that the number of ZAPs for all the vdevs matches
+ * the number of ZAPs in the per-vdev ZAP list. This only gets
+ * called if the config is dirty; otherwise there may be
+ * outstanding AVZ operations that weren't completed in
+ * spa_sync_config_object.
+ */
+ uint64_t all_vdev_zap_entry_count;
+ ASSERT0(zap_count(spa->spa_meta_objset,
+ spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
+ ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
+ all_vdev_zap_entry_count);
+ }
+#endif
+
/*
* Rewrite the vdev configuration (which includes the uberblock)
* to commit the transaction group.
if (svdcount == SPA_DVAS_PER_BP)
break;
}
- error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
- if (error != 0)
- error = vdev_config_sync(svd, svdcount, txg,
- B_TRUE);
+ error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
- rvd->vdev_children, txg, B_FALSE);
- if (error != 0)
- error = vdev_config_sync(rvd->vdev_child,
- rvd->vdev_children, txg, B_TRUE);
+ rvd->vdev_children, txg);
}
if (error == 0)
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
-EXPORT_SYMBOL(spa_import_rootpool);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
module_param(spa_load_verify_data, int, 0644);
MODULE_PARM_DESC(spa_load_verify_data,
"Set to traverse data on pool import");
+
+module_param(zio_taskq_batch_pct, uint, 0444);
+MODULE_PARM_DESC(zio_taskq_batch_pct,
+ "Percentage of CPUs to run an IO worker thread");
+
#endif