/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
- * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
/*
const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
- { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */
- { ZTI_BATCH, ZTI_N(5), ZTI_N(16), ZTI_N(5) }, /* WRITE */
- { ZTI_P(4, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
+ { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
+ { ZTI_BATCH, ZTI_N(5), ZTI_P(12, 8), ZTI_N(5) }, /* WRITE */
+ { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
};
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
if (strval != NULL)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
- uint64_t size;
- uint64_t alloc;
- uint64_t space;
- uint64_t cap, version;
+ uint64_t size, alloc, cap, version;
zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
- int c;
+ metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
- space = 0;
- for (c = 0; c < rvd->vdev_children; c++) {
- vdev_t *tvd = rvd->vdev_child[c];
- space += tvd->vdev_max_asize - tvd->vdev_asize;
- }
- spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space,
- src);
-
+ spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
+ metaslab_class_fragmentation(mc), src);
+ spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
+ metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == FREAD), src);
}
if (pool != NULL) {
- dsl_dir_t *freedir = pool->dp_free_dir;
-
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
- if (freedir != NULL) {
+ if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
- freedir->dd_phys->dd_used_bytes, src);
+ dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
+ src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
+
+ if (pool->dp_leak_dir != NULL) {
+ spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
+ dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
+ src);
+ } else {
+ spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
+ NULL, 0, src);
+ }
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
+ if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
+ spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+ MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
+ } else {
+ spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
+ SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
+ }
+
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
zap_attribute_t za;
int err;
- err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE);
+ err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
strval = kmem_alloc(
MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
- KM_PUSHPAGE);
+ KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
case 1:
/* string property */
- strval = kmem_alloc(za.za_num_integers, KM_PUSHPAGE);
+ strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
if (!error) {
objset_t *os;
- uint64_t compress;
+ uint64_t propval;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
if (error)
break;
- /* Must be ZPL and not gzip compressed. */
+ /*
+ * Must be ZPL, and its property settings
+ * must be supported by GRUB (compression
+ * is not gzip, and large blocks are not used).
+ */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else if ((error =
dsl_prop_get_int_ds(dmu_objset_ds(os),
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
- &compress)) == 0 &&
- !BOOTFS_COMPRESS_VALID(compress)) {
+ &propval)) == 0 &&
+ !BOOTFS_COMPRESS_VALID(propval)) {
+ error = SET_ERROR(ENOTSUP);
+ } else if ((error =
+ dsl_prop_get_int_ds(dmu_objset_ds(os),
+ zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
+ &propval)) == 0 &&
+ propval > SPA_OLD_MAXBLOCKSIZE) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
- KM_PUSHPAGE);
+ KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
- spa_sync_version, &ver, 6);
+ spa_sync_version, &ver,
+ 6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
- nvp, 6));
+ nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
- spa_change_guid_sync, &guid, 5);
+ spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
spa_config_sync(spa, B_FALSE, B_TRUE);
int ret;
ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
- sizeof (zbookmark_t));
+ sizeof (zbookmark_phys_t));
if (ret < 0)
return (-1);
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
char name[32];
- uint_t i, flags = 0;
+ uint_t i, flags = TASKQ_DYNAMIC;
boolean_t batch = B_FALSE;
if (mode == ZTI_MODE_NULL) {
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
- value = zio_taskq_batch_pct;
+ value = MIN(zio_taskq_batch_pct, 100);
break;
default:
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
- * intensive. Run it at slightly lower priority
- * than the other taskqs.
+ * intensive. Run it at slightly less important
+ * priority than the other taskqs. Under Linux this
+ * means incrementing the priority value on platforms
+ * like illumos it should be decremented.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
- pri--;
+ pri++;
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
+ list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
+ offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
+ spa_evicting_os_wait(spa);
+
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
+ list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
- (void) zio_wait(spa->spa_async_zio_root);
+ for (i = 0; i < max_ncpus; i++)
+ (void) zio_wait(spa->spa_async_zio_root[i]);
+ kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
- KM_PUSHPAGE);
+ KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
DATA_TYPE_NVLIST_ARRAY) == 0);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
- KM_PUSHPAGE);
+ KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
if (sav->sav_config != NULL) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
- newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_PUSHPAGE);
+ newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
} else {
nl2cache = 0;
newvdevs = NULL;
VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
DATA_TYPE_NVLIST_ARRAY) == 0);
- l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
+ l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
- packed = kmem_alloc(nvsize, KM_PUSHPAGE | KM_NODEBUG);
+ packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
- kmem_free(packed, nvsize);
+ vmem_free(packed, nvsize);
return (error);
}
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
- KM_PUSHPAGE);
- VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ KM_SLEEP);
+ VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
+ dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
- rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
- NULL, DS_FIND_CHILDREN) != 0);
+ rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
+ zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
+ spa_t *spa = zio->io_spa;
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
atomic_add_64(&sle->sle_data_count, 1);
}
zio_data_buf_free(zio->io_data, zio->io_size);
+
+ mutex_enter(&spa->spa_scrub_lock);
+ spa->spa_scrub_inflight--;
+ cv_broadcast(&spa->spa_scrub_io_cv);
+ mutex_exit(&spa->spa_scrub_lock);
}
+/*
+ * Maximum number of concurrent scrub i/os to create while verifying
+ * a pool while importing it.
+ */
+int spa_load_verify_maxinflight = 10000;
+int spa_load_verify_metadata = B_TRUE;
+int spa_load_verify_data = B_TRUE;
+
/*ARGSUSED*/
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
- const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
+ const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
- if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
- zio_t *rio = arg;
- size_t size = BP_GET_PSIZE(bp);
- void *data = zio_data_buf_alloc(size);
+ zio_t *rio;
+ size_t size;
+ void *data;
- zio_nowait(zio_read(rio, spa, bp, data, size,
- spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
- ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
- ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
- }
+ if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
+ return (0);
+ /*
+ * Note: normally this routine will not be called if
+ * spa_load_verify_metadata is not set. However, it may be useful
+ * to manually set the flag after the traversal has begun.
+ */
+ if (!spa_load_verify_metadata)
+ return (0);
+ if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data)
+ return (0);
+
+ rio = arg;
+ size = BP_GET_PSIZE(bp);
+ data = zio_data_buf_alloc(size);
+
+ mutex_enter(&spa->spa_scrub_lock);
+ while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
+ cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
+ spa->spa_scrub_inflight++;
+ mutex_exit(&spa->spa_scrub_lock);
+
+ zio_nowait(zio_read(rio, spa, bp, data, size,
+ spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
+ ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
+ ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
spa_load_error_t sle = { 0 };
zpool_rewind_policy_t policy;
boolean_t verify_ok = B_FALSE;
- int error;
+ int error = 0;
zpool_get_rewind_policy(spa->spa_config, &policy);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
- error = traverse_pool(spa, spa->spa_verify_min_txg,
- TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio);
+ if (spa_load_verify_metadata) {
+ error = traverse_pool(spa, spa->spa_verify_min_txg,
+ TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
+ spa_load_verify_cb, rio);
+ }
(void) zio_wait(rio);
&glist, &gcount) != 0)
return;
- vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_PUSHPAGE);
+ vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
&nvl) == 0) {
VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
- KM_PUSHPAGE) == 0);
+ KM_SLEEP) == 0);
}
nvlist_free(spa->spa_load_info);
mosconfig, &ereport);
}
+ /*
+ * Don't count references from objsets that are already closed
+ * and are making their way through the eviction process.
+ */
+ spa_evicting_os_wait(spa);
spa->spa_minref = refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
uberblock_t *ub = &spa->spa_uberblock;
uint64_t children, config_cache_txg = spa->spa_config_txg;
int orig_mode = spa->spa_mode;
- int parse;
+ int parse, i;
uint64_t obj;
boolean_t missing_feat_write = B_FALSE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
- spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
- ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
+ spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
+ KM_SLEEP);
+ for (i = 0; i < max_ncpus; i++) {
+ spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
+ ZIO_FLAG_GODFATHER);
+ }
/*
* Parse the configuration into a vdev tree. We explicitly set the
return (error);
ASSERT(spa->spa_root_vdev == rvd);
+ ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
+ ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
return (SET_ERROR(ENXIO));
- if (spa_check_logs(spa)) {
+ if (spa_writeable(spa) && spa_check_logs(spa)) {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
}
spa->spa_load_max_txg == UINT64_MAX)) {
dmu_tx_t *tx;
int need_update = B_FALSE;
+ dsl_pool_t *dp = spa_get_dsl(spa);
int c;
ASSERT(state != SPA_LOAD_TRYIMPORT);
*/
spa->spa_claiming = B_TRUE;
- tx = dmu_tx_create_assigned(spa_get_dsl(spa),
- spa_first_txg(spa));
- (void) dmu_objset_find(spa_name(spa),
+ tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
+ (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa_unload(spa);
spa_deactivate(spa);
- spa->spa_load_max_txg--;
+ spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
+ if (max_request != UINT64_MAX)
+ spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
*/
if (config != NULL && spa->spa_config) {
VERIFY(nvlist_dup(spa->spa_config, config,
- KM_PUSHPAGE) == 0);
+ KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
static void
-spa_add_feature_stats(spa_t *spa, nvlist_t *config)
+spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
- nvlist_t *features;
zap_cursor_t zc;
zap_attribute_t za;
- ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
- VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
-
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
- VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
+ VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
- VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
+ VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
+}
+
+static void
+spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
+{
+ int i;
+
+ for (i = 0; i < SPA_FEATURES; i++) {
+ zfeature_info_t feature = spa_feature_table[i];
+ uint64_t refcount;
+
+ if (feature_get_refcount(spa, &feature, &refcount) != 0)
+ continue;
+
+ VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
+ }
+}
+
+/*
+ * Store a list of pool features and their reference counts in the
+ * config.
+ *
+ * The first time this is called on a spa, allocate a new nvlist, fetch
+ * the pool features and reference counts from disk, then save the list
+ * in the spa. In subsequent calls on the same spa use the saved nvlist
+ * and refresh its values from the cached reference counts. This
+ * ensures we don't block here on I/O on a suspended pool so 'zpool
+ * clear' can resume the pool.
+ */
+static void
+spa_add_feature_stats(spa_t *spa, nvlist_t *config)
+{
+ nvlist_t *features;
+
+ ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
+
+ mutex_enter(&spa->spa_feat_stats_lock);
+ features = spa->spa_feat_stats;
+
+ if (features != NULL) {
+ spa_feature_stats_from_cache(spa, features);
+ } else {
+ VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
+ spa->spa_feat_stats = features;
+ spa_feature_stats_from_disk(spa, features);
+ }
+
+ VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
+ features));
- VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
- features) == 0);
- nvlist_free(features);
+ mutex_exit(&spa->spa_feat_stats_lock);
}
int
&olddevs, &oldndevs) == 0);
newdevs = kmem_alloc(sizeof (void *) *
- (ndevs + oldndevs), KM_PUSHPAGE);
+ (ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
- KM_PUSHPAGE) == 0);
+ KM_SLEEP) == 0);
for (i = 0; i < ndevs; i++)
VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
- KM_PUSHPAGE) == 0);
+ KM_SLEEP) == 0);
VERIFY(nvlist_remove(sav->sav_config, config,
DATA_TYPE_NVLIST_ARRAY) == 0);
* Generate a new dev list.
*/
VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
- KM_PUSHPAGE) == 0);
+ KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
devs, ndevs) == 0);
}
uint64_t version, obj;
boolean_t has_features;
nvpair_t *elem;
- int c;
+ int c, i;
+ char *poolname;
+ nvlist_t *nvl;
+
+ if (nvlist_lookup_string(props, "tname", &poolname) != 0)
+ poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
- if (spa_lookup(pool) != NULL) {
+ if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
+ nvl = fnvlist_alloc();
+ fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
- spa = spa_add(pool, NULL, altroot);
+ spa = spa_add(poolname, nvl, altroot);
+ fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
return (error);
}
+ /*
+ * Temporary pool names should never be written to disk.
+ */
+ if (poolname != pool)
+ spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
+
has_features = B_FALSE;
for (elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
/*
* Create "The Godfather" zio to hold all async IOs
*/
- spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
- ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
+ spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
+ KM_SLEEP);
+ for (i = 0; i < max_ncpus; i++) {
+ spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
+ ZIO_FLAG_GODFATHER);
+ }
/*
* Create the root vdev.
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
- KM_PUSHPAGE) == 0);
+ KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
- NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_history_log_version(spa, "create");
+ /*
+ * Don't count references from objsets that are already closed
+ * and are making their way through the eviction process.
+ */
+ spa_evicting_os_wait(spa);
spa->spa_minref = refcount_count(&spa->spa_refcount);
mutex_exit(&spa_namespace_lock);
/*
* Put this pool's top-level vdevs into a root vdev.
*/
- VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) == 0);
VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
- NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
- NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
- char *tmpname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
+ char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
char *cp;
char *dsname;
- dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
+ dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
+ if (spa->spa_state == POOL_STATE_UNINITIALIZED)
+ goto export_spa;
/*
- * The pool will be in core if it's openable,
- * in which case we can modify its state.
+ * The pool will be in core if it's openable, in which case we can
+ * modify its state. Objsets may be open only because they're dirty,
+ * so we have to force it to sync before checking spa_refcnt.
*/
- if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
- /*
- * Objsets may be open only because they're dirty, so we
- * have to force it to sync before checking spa_refcnt.
- */
+ if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
+ spa_evicting_os_wait(spa);
+ }
- /*
- * A pool cannot be exported or destroyed if there are active
- * references. If we are resetting a pool, allow references by
- * fault injection handlers.
- */
- if (!spa_refcount_zero(spa) ||
- (spa->spa_inject_ref != 0 &&
- new_state != POOL_STATE_UNINITIALIZED)) {
- spa_async_resume(spa);
- mutex_exit(&spa_namespace_lock);
- return (SET_ERROR(EBUSY));
- }
+ /*
+ * A pool cannot be exported or destroyed if there are active
+ * references. If we are resetting a pool, allow references by
+ * fault injection handlers.
+ */
+ if (!spa_refcount_zero(spa) ||
+ (spa->spa_inject_ref != 0 &&
+ new_state != POOL_STATE_UNINITIALIZED)) {
+ spa_async_resume(spa);
+ mutex_exit(&spa_namespace_lock);
+ return (SET_ERROR(EBUSY));
+ }
+ if (spa->spa_sync_on) {
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
}
}
+export_spa:
spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_DESTROY);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
- KM_PUSHPAGE);
+ KM_SLEEP);
(void) sprintf(oldvd->vdev_path, "%s/%s",
newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
- vml = kmem_zalloc(children * sizeof (vdev_t *), KM_PUSHPAGE);
- glist = kmem_zalloc(children * sizeof (uint64_t), KM_PUSHPAGE);
+ vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
+ glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
- VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
glist, children) == 0);
kmem_free(glist, children * sizeof (uint64_t));
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
- NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
int i, j;
if (count > 1)
- newdev = kmem_alloc((count - 1) * sizeof (void *), KM_PUSHPAGE);
+ newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
for (i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
- VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_PUSHPAGE) == 0);
+ VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
}
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
- packed = vmem_alloc(bufsize, KM_PUSHPAGE);
+ packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
- KM_PUSHPAGE) == 0);
+ KM_SLEEP) == 0);
bzero(packed + nvsize, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
&sav->sav_object, tx) == 0);
}
- VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
+ VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (sav->sav_count == 0) {
VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
} else {
- list = kmem_alloc(sav->sav_count*sizeof (void *), KM_PUSHPAGE);
+ list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
+
+ /*
+ * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
+ * when possibility to use lz4 compression for metadata was added
+ * Old pools that have this feature enabled must be upgraded to have
+ * this feature active
+ */
+ if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
+ boolean_t lz4_en = spa_feature_is_enabled(spa,
+ SPA_FEATURE_LZ4_COMPRESS);
+ boolean_t lz4_ac = spa_feature_is_active(spa,
+ SPA_FEATURE_LZ4_COMPRESS);
+
+ if (lz4_en && !lz4_ac)
+ spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
+ }
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_taskq,
- spa_deadman, spa, TQ_PUSHPAGE, ddi_get_lbolt() +
+ spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
}
}
- /*
- * If anything has changed in this txg, or if someone is waiting
- * for this txg to sync (eg, spa_vdev_remove()), push the
- * deferred frees from the previous txg. If not, leave them
- * alone so that we don't generate work on an otherwise idle
- * system.
- */
- if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
- !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
- !txg_list_empty(&dp->dp_sync_tasks, txg) ||
- ((dsl_scan_active(dp->dp_scan) ||
- txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
- spa_sync_deferred_frees(spa, tx);
- }
-
/*
* Iterate to convergence.
*/
if (pass < zfs_sync_pass_deferred_free) {
spa_sync_frees(spa, free_bpl, tx);
} else {
+ /*
+ * We can not defer frees in pass 1, because
+ * we sync the deferred frees later in pass 1.
+ */
+ ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_cb,
&spa->spa_deferred_bpobj, tx);
}
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
vdev_sync(vd, txg);
- if (pass == 1)
+ if (pass == 1) {
spa_sync_upgrades(spa, tx);
+ ASSERT3U(txg, >=,
+ spa->spa_uberblock.ub_rootbp.blk_birth);
+ /*
+ * Note: We need to check if the MOS is dirty
+ * because we could have marked the MOS dirty
+ * without updating the uberblock (e.g. if we
+ * have sync tasks but no dirty user data). We
+ * need to check the uberblock's rootbp because
+ * it is updated if we have synced out dirty
+ * data (though in this case the MOS will most
+ * likely also be dirty due to second order
+ * effects, we don't want to rely on that here).
+ */
+ if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
+ !dmu_objset_is_dirty(mos, txg)) {
+ /*
+ * Nothing changed on the first pass,
+ * therefore this TXG is a no-op. Avoid
+ * syncing deferred frees, so that we
+ * can keep this TXG as a no-op.
+ */
+ ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
+ txg));
+ ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
+ ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
+ break;
+ }
+ spa_sync_deferred_frees(spa, tx);
+ }
} while (dmu_objset_is_dirty(mos, txg));
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
#endif
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(spa_load_verify_maxinflight, int, 0644);
+MODULE_PARM_DESC(spa_load_verify_maxinflight,
+ "Max concurrent traversal I/Os while verifying pool during import -X");
+
+module_param(spa_load_verify_metadata, int, 0644);
+MODULE_PARM_DESC(spa_load_verify_metadata,
+ "Set to traverse metadata on pool import");
+
+module_param(spa_load_verify_data, int, 0644);
+MODULE_PARM_DESC(spa_load_verify_data,
+ "Set to traverse data on pool import");
+
+module_param(zio_taskq_batch_pct, uint, 0444);
+MODULE_PARM_DESC(zio_taskq_batch_pct,
+ "Percentage of CPUs to run an IO worker thread");
+
+#endif