*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
+ * Copyright 2013 Saso Kiselkov. All rights reserved.
+ * Copyright (c) 2017 Datto Inc.
+ * Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
+#include <sys/vdev_file.h>
+#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/ddt.h>
+#include <sys/kstat.h>
#include "zfs_prop.h"
+#include <sys/zfeature.h>
+#include "qat.h"
/*
* SPA locking
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*
- * spa_rename() is also implemented within this file since is requires
+ * spa_rename() is also implemented within this file since it requires
* manipulation of the namespace.
*/
static avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
-static int spa_active_count;
int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
int spa_mode_global;
#ifdef ZFS_DEBUG
-/* Everything except dprintf is on by default in debug builds */
-int zfs_flags = ~ZFS_DEBUG_DPRINTF;
+/*
+ * Everything except dprintf, set_error, spa, and indirect_remap is on
+ * by default in debug builds.
+ */
+int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
+ ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
+ * This should only be used as a last resort, as it typically results
+ * in leaked space, or worse.
+ */
+int zfs_recover = B_FALSE;
+
+/*
+ * If destroy encounters an EIO while reading metadata (e.g. indirect
+ * blocks), space referenced by the missing metadata can not be freed.
+ * Normally this causes the background destroy to become "stalled", as
+ * it is unable to make forward progress. While in this stalled state,
+ * all remaining space to free from the error-encountering filesystem is
+ * "temporarily leaked". Set this flag to cause it to ignore the EIO,
+ * permanently leak the space from indirect blocks that can not be read,
+ * and continue to free everything else that it can.
+ *
+ * The default, "stalling" behavior is useful if the storage partially
+ * fails (i.e. some but not all i/os fail), and then later recovers. In
+ * this case, we will be able to continue pool operations while it is
+ * partially failed, and when it recovers, we can continue to free the
+ * space, with no leaks. However, note that this case is actually
+ * fairly rare.
+ *
+ * Typically pools either (a) fail completely (but perhaps temporarily,
+ * e.g. a top-level vdev going offline), or (b) have localized,
+ * permanent errors (e.g. disk returns the wrong data due to bit flip or
+ * firmware bug). In case (a), this setting does not matter because the
+ * pool will be suspended and the sync thread will not be able to make
+ * forward progress regardless. In case (b), because the error is
+ * permanent, the best we can do is leak the minimum amount of space,
+ * which is what setting this flag will do. Therefore, it is reasonable
+ * for this flag to normally be set, but we chose the more conservative
+ * approach of not setting it, so that there is no possibility of
+ * leaking space in the "partial temporary" failure case.
+ */
+int zfs_free_leak_on_eio = B_FALSE;
+
+/*
+ * Expiration time in milliseconds. This value has two meanings. First it is
+ * used to determine when the spa_deadman() logic should fire. By default the
+ * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
+ * Secondly, the value determines if an I/O is considered "hung". Any I/O that
+ * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
+ * in one of three behaviors controlled by zfs_deadman_failmode.
+ */
+unsigned long zfs_deadman_synctime_ms = 600000ULL;
+
+/*
+ * This value controls the maximum amount of time zio_wait() will block for an
+ * outstanding IO. By default this is 300 seconds at which point the "hung"
+ * behavior will be applied as described for zfs_deadman_synctime_ms.
+ */
+unsigned long zfs_deadman_ziotime_ms = 300000ULL;
+
+/*
+ * Check time in milliseconds. This defines the frequency at which we check
+ * for hung I/O.
+ */
+unsigned long zfs_deadman_checktime_ms = 60000ULL;
+
+/*
+ * By default the deadman is enabled.
+ */
+int zfs_deadman_enabled = 1;
+
+/*
+ * Controls the behavior of the deadman when it detects a "hung" I/O.
+ * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
+ *
+ * wait - Wait for the "hung" I/O (default)
+ * continue - Attempt to recover from a "hung" I/O
+ * panic - Panic the system
+ */
+char *zfs_deadman_failmode = "wait";
+
+/*
+ * The worst case is single-sector max-parity RAID-Z blocks, in which
+ * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
+ * times the size; so just assume that. Add to this the fact that
+ * we can have up to 3 DVAs per bp, and one more factor of 2 because
+ * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
+ * the worst case is:
+ * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
+ */
+int spa_asize_inflation = 24;
+
+/*
+ * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
+ * the pool to be consumed. This ensures that we don't run the pool
+ * completely out of space, due to unaccounted changes (e.g. to the MOS).
+ * It also limits the worst-case time to allocate space. If we have
+ * less than this amount of free space, most ZPL operations (e.g. write,
+ * create) will return ENOSPC.
+ *
+ * Certain operations (e.g. file removal, most administrative actions) can
+ * use half the slop space. They will only return ENOSPC if less than half
+ * the slop space is free. Typically, once the pool has less than the slop
+ * space free, the user will use these operations to free up space in the pool.
+ * These are the operations that call dsl_pool_adjustedsize() with the netfree
+ * argument set to TRUE.
+ *
+ * Operations that are almost guaranteed to free up space in the absence of
+ * a pool checkpoint can use up to three quarters of the slop space
+ * (e.g zfs destroy).
+ *
+ * A very restricted set of operations are always permitted, regardless of
+ * the amount of free space. These are the operations that call
+ * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
+ * increase in the amount of space used, it is possible to run the pool
+ * completely out of space, causing it to be permanently read-only.
+ *
+ * Note that on very small pools, the slop space will be larger than
+ * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
+ * but we never allow it to be more than half the pool size.
+ *
+ * See also the comments in zfs_space_check_t.
+ */
+int spa_slop_shift = 5;
+uint64_t spa_min_slop = 128 * 1024 * 1024;
+int spa_allocators = 4;
+
+
+/*PRINTFLIKE2*/
+void
+spa_load_failed(spa_t *spa, const char *fmt, ...)
+{
+ va_list adx;
+ char buf[256];
+
+ va_start(adx, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+ va_end(adx);
+
+ zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
+ spa->spa_trust_config ? "trusted" : "untrusted", buf);
+}
+
+/*PRINTFLIKE2*/
+void
+spa_load_note(spa_t *spa, const char *fmt, ...)
+{
+ va_list adx;
+ char buf[256];
+
+ va_start(adx, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+ va_end(adx);
+
+ zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
+ spa->spa_trust_config ? "trusted" : "untrusted", buf);
+}
+
+/*
+ * By default dedup and user data indirects land in the special class
*/
-int zfs_recover = 0;
+int zfs_ddt_data_is_special = B_TRUE;
+int zfs_user_indirect_is_special = B_TRUE;
+/*
+ * The percentage of special class final space reserved for metadata only.
+ * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
+ * let metadata into the class.
+ */
+int zfs_special_class_metadata_reserve_pct = 25;
/*
* ==========================================================================
static void
spa_config_lock_init(spa_t *spa)
{
- int i;
-
- for (i = 0; i < SCL_LOCKS; i++) {
+ for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
- refcount_create(&scl->scl_count);
+ refcount_create_untracked(&scl->scl_count);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
}
static void
spa_config_lock_destroy(spa_t *spa)
{
- int i;
-
- for (i = 0; i < SCL_LOCKS; i++) {
+ for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
int
spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
{
- int i;
-
- for (i = 0; i < SCL_LOCKS; i++) {
+ for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if (rw == RW_READER) {
if (scl->scl_writer || scl->scl_write_wanted) {
mutex_exit(&scl->scl_lock);
- spa_config_exit(spa, locks ^ (1 << i), tag);
+ spa_config_exit(spa, locks & ((1 << i) - 1),
+ tag);
return (0);
}
} else {
ASSERT(scl->scl_writer != curthread);
if (!refcount_is_zero(&scl->scl_count)) {
mutex_exit(&scl->scl_lock);
- spa_config_exit(spa, locks ^ (1 << i), tag);
+ spa_config_exit(spa, locks & ((1 << i) - 1),
+ tag);
return (0);
}
scl->scl_writer = curthread;
spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
{
int wlocks_held = 0;
- int i;
- for (i = 0; i < SCL_LOCKS; i++) {
+ ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
+
+ for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
(void) refcount_add(&scl->scl_count, tag);
mutex_exit(&scl->scl_lock);
}
- ASSERT(wlocks_held <= locks);
+ ASSERT3U(wlocks_held, <=, locks);
}
void
spa_config_exit(spa_t *spa, int locks, void *tag)
{
- int i;
-
- for (i = SCL_LOCKS - 1; i >= 0; i--) {
+ for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
- int i, locks_held = 0;
+ int locks_held = 0;
- for (i = 0; i < SCL_LOCKS; i++) {
+ for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
- char c = 0;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
+
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
- cp = strpbrk(name, "/@");
- if (cp) {
- c = *cp;
+ cp = strpbrk(search.spa_name, "/@#");
+ if (cp != NULL)
*cp = '\0';
- }
- (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
spa = avl_find(&spa_namespace_avl, &search, &where);
- if (cp)
- *cp = c;
-
return (spa);
}
+/*
+ * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
+ * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
+ * looking for potentially hung I/Os.
+ */
+void
+spa_deadman(void *arg)
+{
+ spa_t *spa = arg;
+
+ /* Disable the deadman if the pool is suspended. */
+ if (spa_suspended(spa))
+ return;
+
+ zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
+ (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
+ ++spa->spa_deadman_calls);
+ if (zfs_deadman_enabled)
+ vdev_deadman(spa->spa_root_vdev, FTAG);
+
+ spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
+ spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
+ MSEC_TO_TICK(zfs_deadman_checktime_ms));
+}
+
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
{
spa_t *spa;
spa_config_dirent_t *dp;
- int t;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
- spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP | KM_NODEBUG);
+ spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
- for (t = 0; t < TXG_SIZE; t++)
+ for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_load_max_txg = UINT64_MAX;
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
+ spa->spa_trust_config = B_TRUE;
+
+ spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
+ spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
+ spa_set_deadman_failmode(spa, zfs_deadman_failmode);
refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
+ spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
/*
* Set the alternate root, if there is one.
*/
- if (altroot) {
+ if (altroot)
spa->spa_root = spa_strdup(altroot);
- spa_active_count++;
+
+ spa->spa_alloc_count = spa_allocators;
+ spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
+ sizeof (kmutex_t), KM_SLEEP);
+ spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
+ sizeof (avl_tree_t), KM_SLEEP);
+ for (int i = 0; i < spa->spa_alloc_count; i++) {
+ mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
+ avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
+ sizeof (zio_t), offsetof(zio_t, io_alloc_node));
}
/*
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
- if (config != NULL)
+ if (config != NULL) {
+ nvlist_t *features;
+
+ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
+ &features) == 0) {
+ VERIFY(nvlist_dup(features, &spa->spa_label_features,
+ 0) == 0);
+ }
+
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
+ }
+
+ if (spa->spa_label_features == NULL) {
+ VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
+ KM_SLEEP) == 0);
+ }
+
+ spa->spa_min_ashift = INT_MAX;
+ spa->spa_max_ashift = 0;
+
+ /* Reset cached value */
+ spa->spa_dedup_dspace = ~0ULL;
+
+ /*
+ * As a pool is being created, treat all features as disabled by
+ * setting SPA_FEATURE_DISABLED for all entries in the feature
+ * refcount cache.
+ */
+ for (int i = 0; i < SPA_FEATURES; i++) {
+ spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
+ }
return (spa);
}
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
- int t;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
+ ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
nvlist_free(spa->spa_config_splitting);
avl_remove(&spa_namespace_avl, spa);
cv_broadcast(&spa_namespace_cv);
- if (spa->spa_root) {
+ if (spa->spa_root)
spa_strfree(spa->spa_root);
- spa_active_count--;
- }
while ((dp = list_head(&spa->spa_config_list)) != NULL) {
list_remove(&spa->spa_config_list, dp);
kmem_free(dp, sizeof (spa_config_dirent_t));
}
+ for (int i = 0; i < spa->spa_alloc_count; i++) {
+ avl_destroy(&spa->spa_alloc_trees[i]);
+ mutex_destroy(&spa->spa_alloc_locks[i]);
+ }
+ kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
+ sizeof (kmutex_t));
+ kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
+ sizeof (avl_tree_t));
+
list_destroy(&spa->spa_config_list);
+ nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
+ nvlist_free(spa->spa_feat_stats);
spa_config_set(spa, NULL);
refcount_destroy(&spa->spa_refcount);
+ spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
- for (t = 0; t < TXG_SIZE; t++)
+ for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
+ zio_checksum_templates_free(spa);
+
cv_destroy(&spa->spa_async_cv);
+ cv_destroy(&spa->spa_evicting_os_cv);
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
+ mutex_destroy(&spa->spa_evicting_os_lock);
mutex_destroy(&spa->spa_history_lock);
mutex_destroy(&spa->spa_proc_lock);
mutex_destroy(&spa->spa_props_lock);
+ mutex_destroy(&spa->spa_cksum_tmpls_lock);
mutex_destroy(&spa->spa_scrub_lock);
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
+ mutex_destroy(&spa->spa_feat_stats_lock);
kmem_free(spa, sizeof (spa_t));
}
(void) refcount_remove(&spa->spa_refcount, tag);
}
+/*
+ * Remove a reference to the given spa_t held by a dsl dir that is
+ * being asynchronously released. Async releases occur from a taskq
+ * performing eviction of dsl datasets and dirs. The namespace lock
+ * isn't held and the hold by the object being evicted may contribute to
+ * spa_minref (e.g. dataset or directory released during pool export),
+ * so the asserts in spa_close() do not apply.
+ */
+void
+spa_async_close(spa_t *spa, void *tag)
+{
+ (void) refcount_remove(&spa->spa_refcount, tag);
+}
+
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
int aux_count;
} spa_aux_t;
-static int
+static inline int
spa_aux_compare(const void *a, const void *b)
{
- const spa_aux_t *sa = a;
- const spa_aux_t *sb = b;
+ const spa_aux_t *sa = (const spa_aux_t *)a;
+ const spa_aux_t *sb = (const spa_aux_t *)b;
- if (sa->aux_guid < sb->aux_guid)
- return (-1);
- else if (sa->aux_guid > sb->aux_guid)
- return (1);
- else
- return (0);
+ return (AVL_CMP(sa->aux_guid, sb->aux_guid));
}
void
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
{
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+
int config_changed = B_FALSE;
- ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
+ ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
+ ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
spa_config_exit(spa, SCL_ALL, spa);
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
- ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
+ ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_ALL, spa);
* If the config changed, update the config cache.
*/
if (config_changed)
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
}
/*
spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
{
boolean_t config_changed = B_FALSE;
+ vdev_t *vdev_top;
+
+ if (vd == NULL || vd == spa->spa_root_vdev) {
+ vdev_top = spa->spa_root_vdev;
+ } else {
+ vdev_top = vd->vdev_top;
+ }
if (vd != NULL || error == 0)
- vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
- 0, 0, B_FALSE);
+ vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE);
if (vd != NULL) {
- vdev_state_dirty(vd->vdev_top);
+ if (vd != spa->spa_root_vdev)
+ vdev_state_dirty(vdev_top);
+
config_changed = B_TRUE;
spa->spa_config_generation++;
}
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
mutex_exit(&spa_namespace_lock);
}
* ==========================================================================
*/
+void
+spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
+{
+ if (!nvlist_exists(spa->spa_label_features, feature)) {
+ fnvlist_add_boolean(spa->spa_label_features, feature);
+ /*
+ * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
+ * dirty the vdev config because lock SCL_CONFIG is not held.
+ * Thankfully, in this case we don't need to dirty the config
+ * because it will be written out anyway when we finish
+ * creating the pool.
+ */
+ if (tx->tx_txg != TXG_INITIAL)
+ vdev_config_dirty(spa->spa_root_vdev);
+ }
+}
+
+void
+spa_deactivate_mos_feature(spa_t *spa, const char *feature)
+{
+ if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
+ vdev_config_dirty(spa->spa_root_vdev);
+}
+
/*
* Rename a spa_t.
*/
/*
* Sync the updated config cache.
*/
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_close(spa, FTAG);
ASSERT(range != 0);
+ if (range == 1)
+ return (0);
+
(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
return (r % range);
}
void
-sprintf_blkptr(char *buf, const blkptr_t *bp)
+snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
- char *type = NULL;
+ char type[256];
char *checksum = NULL;
char *compress = NULL;
if (bp != NULL) {
- type = dmu_ot[BP_GET_TYPE(bp)].ot_name;
- checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
+ if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
+ dmu_object_byteswap_t bswap =
+ DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
+ (void) snprintf(type, sizeof (type), "bswap %s %s",
+ DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
+ "metadata" : "data",
+ dmu_ot_byteswap[bswap].ob_name);
+ } else {
+ (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
+ sizeof (type));
+ }
+ if (!BP_IS_EMBEDDED(bp)) {
+ checksum =
+ zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
+ }
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
- SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
+ SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
+ compress);
}
void
/*
* This is a stripped-down version of strtoull, suitable only for converting
- * lowercase hexidecimal numbers that don't overflow.
+ * lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
-strtonum(const char *str, char **nptr)
+zfs_strtonum(const char *str, char **nptr)
{
uint64_t val = 0;
char c;
return (val);
}
+void
+spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
+{
+ /*
+ * We bump the feature refcount for each special vdev added to the pool
+ */
+ ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
+ spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
+}
+
/*
* ==========================================================================
* Accessor functions
return (spa->spa_dsl_pool);
}
+boolean_t
+spa_is_initializing(spa_t *spa)
+{
+ return (spa->spa_is_initializing);
+}
+
+boolean_t
+spa_indirect_vdevs_loaded(spa_t *spa)
+{
+ return (spa->spa_indirect_vdevs_loaded);
+}
+
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
uint64_t
spa_guid(spa_t *spa)
{
+ dsl_pool_t *dp = spa_get_dsl(spa);
+ uint64_t guid;
+
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
- * vdev. We stash the original pool guid in 'spa_load_guid' to handle
+ * vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
- if (spa->spa_root_vdev != NULL)
+ if (spa->spa_root_vdev == NULL)
+ return (spa->spa_config_guid);
+
+ guid = spa->spa_last_synced_guid != 0 ?
+ spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
+
+ /*
+ * Return the most recently synced out guid unless we're
+ * in syncing context.
+ */
+ if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
- return (spa->spa_load_guid);
+ return (guid);
+}
+
+uint64_t
+spa_load_guid(spa_t *spa)
+{
+ /*
+ * This is a GUID that exists solely as a reference for the
+ * purposes of the arc. It is generated at load time, and
+ * is never written to persistent storage.
+ */
+ return (spa->spa_load_guid);
}
uint64_t
return (spa->spa_syncing_txg);
}
+/*
+ * Return the last txg where data can be dirtied. The final txgs
+ * will be used to just clear out any deferred frees that remain.
+ */
+uint64_t
+spa_final_dirty_txg(spa_t *spa)
+{
+ return (spa->spa_final_txg - TXG_DEFER_SIZE);
+}
+
pool_state_t
spa_state(spa_t *spa)
{
return (spa->spa_freeze_txg);
}
-/* ARGSUSED */
+/*
+ * Return the inflated asize for a logical write in bytes. This is used by the
+ * DMU to calculate the space a logical write will require on disk.
+ * If lsize is smaller than the largest physical block size allocatable on this
+ * pool we use its value instead, since the write will end up using the whole
+ * block anyway.
+ */
uint64_t
-spa_get_asize(spa_t *spa, uint64_t lsize)
+spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
{
- /*
- * The worst case is single-sector max-parity RAID-Z blocks, in which
- * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
- * times the size; so just assume that. Add to this the fact that
- * we can have up to 3 DVAs per bp, and one more factor of 2 because
- * the block may be dittoed with up to 3 DVAs by ddt_sync().
- */
- return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2);
+ if (lsize == 0)
+ return (0); /* No inflation needed */
+ return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
+}
+
+/*
+ * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%),
+ * or at least 128MB, unless that would cause it to be more than half the
+ * pool size.
+ *
+ * See the comment above spa_slop_shift for details.
+ */
+uint64_t
+spa_get_slop_space(spa_t *spa)
+{
+ uint64_t space = spa_get_dspace(spa);
+ return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
}
uint64_t
return (spa->spa_dspace);
}
+uint64_t
+spa_get_checkpoint_space(spa_t *spa)
+{
+ return (spa->spa_checkpoint_info.sci_dspace);
+}
+
void
spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa);
+ if (spa->spa_vdev_removal != NULL) {
+ /*
+ * We can't allocate from the removing device, so
+ * subtract its size. This prevents the DMU/DSL from
+ * filling up the (now smaller) pool while we are in the
+ * middle of removing the device.
+ *
+ * Note that the DMU/DSL doesn't actually know or care
+ * how much space is allocated (it does its own tracking
+ * of how much space has been logically used). So it
+ * doesn't matter that the data we are moving may be
+ * allocated twice (on the old device and the new
+ * device).
+ */
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ vdev_t *vd =
+ vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
+ spa->spa_dspace -= spa_deflate(spa) ?
+ vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+ }
}
/*
* Return the failure mode that has been set to this pool. The default
* behavior will be to block all I/Os when a complete failure occurs.
*/
-uint8_t
+uint64_t
spa_get_failmode(spa_t *spa)
{
return (spa->spa_failmode);
boolean_t
spa_suspended(spa_t *spa)
{
- return (spa->spa_suspended);
+ return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
return (spa->spa_log_class);
}
+metaslab_class_t *
+spa_special_class(spa_t *spa)
+{
+ return (spa->spa_special_class);
+}
+
+metaslab_class_t *
+spa_dedup_class(spa_t *spa)
+{
+ return (spa->spa_dedup_class);
+}
+
+/*
+ * Locate an appropriate allocation class
+ */
+metaslab_class_t *
+spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
+ uint_t level, uint_t special_smallblk)
+{
+ if (DMU_OT_IS_ZIL(objtype)) {
+ if (spa->spa_log_class->mc_groups != 0)
+ return (spa_log_class(spa));
+ else
+ return (spa_normal_class(spa));
+ }
+
+ boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
+
+ if (DMU_OT_IS_DDT(objtype)) {
+ if (spa->spa_dedup_class->mc_groups != 0)
+ return (spa_dedup_class(spa));
+ else if (has_special_class && zfs_ddt_data_is_special)
+ return (spa_special_class(spa));
+ else
+ return (spa_normal_class(spa));
+ }
+
+ /* Indirect blocks for user data can land in special if allowed */
+ if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
+ if (has_special_class && zfs_user_indirect_is_special)
+ return (spa_special_class(spa));
+ else
+ return (spa_normal_class(spa));
+ }
+
+ if (DMU_OT_IS_METADATA(objtype) || level > 0) {
+ if (has_special_class)
+ return (spa_special_class(spa));
+ else
+ return (spa_normal_class(spa));
+ }
+
+ /*
+ * Allow small file blocks in special class in some cases (like
+ * for the dRAID vdev feature). But always leave a reserve of
+ * zfs_special_class_metadata_reserve_pct exclusively for metadata.
+ */
+ if (DMU_OT_IS_FILE(objtype) &&
+ has_special_class && size < special_smallblk) {
+ metaslab_class_t *special = spa_special_class(spa);
+ uint64_t alloc = metaslab_class_get_alloc(special);
+ uint64_t space = metaslab_class_get_space(special);
+ uint64_t limit =
+ (space * (100 - zfs_special_class_metadata_reserve_pct))
+ / 100;
+
+ if (alloc < limit)
+ return (special);
+ }
+
+ return (spa_normal_class(spa));
+}
+
+void
+spa_evicting_os_register(spa_t *spa, objset_t *os)
+{
+ mutex_enter(&spa->spa_evicting_os_lock);
+ list_insert_head(&spa->spa_evicting_os_list, os);
+ mutex_exit(&spa->spa_evicting_os_lock);
+}
+
+void
+spa_evicting_os_deregister(spa_t *spa, objset_t *os)
+{
+ mutex_enter(&spa->spa_evicting_os_lock);
+ list_remove(&spa->spa_evicting_os_list, os);
+ cv_broadcast(&spa->spa_evicting_os_cv);
+ mutex_exit(&spa->spa_evicting_os_lock);
+}
+
+void
+spa_evicting_os_wait(spa_t *spa)
+{
+ mutex_enter(&spa->spa_evicting_os_lock);
+ while (!list_is_empty(&spa->spa_evicting_os_list))
+ cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
+ mutex_exit(&spa->spa_evicting_os_lock);
+
+ dmu_buf_user_evict_wait();
+}
+
int
spa_max_replication(spa_t *spa)
{
return (spa->spa_prev_software_version);
}
+uint64_t
+spa_deadman_synctime(spa_t *spa)
+{
+ return (spa->spa_deadman_synctime);
+}
+
+uint64_t
+spa_deadman_ziotime(spa_t *spa)
+{
+ return (spa->spa_deadman_ziotime);
+}
+
+uint64_t
+spa_get_deadman_failmode(spa_t *spa)
+{
+ return (spa->spa_deadman_failmode);
+}
+
+void
+spa_set_deadman_failmode(spa_t *spa, const char *failmode)
+{
+ if (strcmp(failmode, "wait") == 0)
+ spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
+ else if (strcmp(failmode, "continue") == 0)
+ spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
+ else if (strcmp(failmode, "panic") == 0)
+ spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
+ else
+ spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
+}
+
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
- dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
+ if (vd != NULL)
+ dsize = (asize >> SPA_MINBLOCKSHIFT) *
+ vd->vdev_deflate_ratio;
}
return (dsize);
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
- int d;
- for (d = 0; d < SPA_DVAS_PER_BP; d++)
+ for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
- int d;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
- for (d = 0; d < SPA_DVAS_PER_BP; d++)
+ for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (dsize);
}
+uint64_t
+spa_dirty_data(spa_t *spa)
+{
+ return (spa->spa_dsl_pool->dp_dirty_total);
+}
+
/*
* ==========================================================================
* Initialization and Termination
int s;
s = strcmp(s1->spa_name, s2->spa_name);
- if (s > 0)
- return (1);
- if (s < 0)
- return (-1);
- return (0);
+
+ return (AVL_ISIGN(s));
}
void
spa_mode_global = mode;
+#ifndef _KERNEL
+ if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
+ struct sigaction sa;
+
+ sa.sa_flags = SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_sigaction = arc_buf_sigsegv;
+
+ if (sigaction(SIGSEGV, &sa, NULL) == -1) {
+ perror("could not enable watchpoints: "
+ "sigaction(SIGSEGV, ...) = ");
+ } else {
+ arc_watch = B_TRUE;
+ }
+ }
+#endif
+
fm_init();
refcount_init();
unique_init();
+ range_tree_init();
+ metaslab_alloc_trace_init();
+ ddt_init();
zio_init();
dmu_init();
zil_init();
vdev_cache_stat_init();
+ vdev_mirror_stat_init();
+ vdev_raidz_math_init();
+ vdev_file_init();
zfs_prop_init();
zpool_prop_init();
+ zpool_feature_init();
spa_config_load();
l2arc_start();
+ scan_init();
+ qat_init();
}
void
spa_evict_all();
+ vdev_file_fini();
vdev_cache_stat_fini();
+ vdev_mirror_stat_fini();
+ vdev_raidz_math_fini();
zil_fini();
dmu_fini();
zio_fini();
+ ddt_fini();
+ metaslab_alloc_trace_fini();
+ range_tree_fini();
unique_fini();
refcount_fini();
fm_fini();
+ scan_fini();
+ qat_fini();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
boolean_t
spa_writeable(spa_t *spa)
{
- return (!!(spa->spa_mode & FWRITE));
+ return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config);
+}
+
+/*
+ * Returns true if there is a pending sync task in any of the current
+ * syncing txg, the current quiescing txg, or the current open txg.
+ */
+boolean_t
+spa_has_pending_synctask(spa_t *spa)
+{
+ return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
+ !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
}
int
{
/* data not stored on disk */
spa->spa_scan_pass_start = gethrestime_sec();
+ if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
+ spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
+ else
+ spa->spa_scan_pass_scrub_pause = 0;
+ spa->spa_scan_pass_scrub_spent_paused = 0;
spa->spa_scan_pass_exam = 0;
+ spa->spa_scan_pass_issued = 0;
vdev_scan_stat_init(spa->spa_root_vdev);
}
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
- return (ENOENT);
+ return (SET_ERROR(ENOENT));
bzero(ps, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
+ ps->pss_state = scn->scn_phys.scn_state;
ps->pss_start_time = scn->scn_phys.scn_start_time;
ps->pss_end_time = scn->scn_phys.scn_end_time;
ps->pss_to_examine = scn->scn_phys.scn_to_examine;
ps->pss_to_process = scn->scn_phys.scn_to_process;
ps->pss_processed = scn->scn_phys.scn_processed;
ps->pss_errors = scn->scn_phys.scn_errors;
- ps->pss_state = scn->scn_phys.scn_state;
/* data not stored on disk */
- ps->pss_pass_start = spa->spa_scan_pass_start;
ps->pss_pass_exam = spa->spa_scan_pass_exam;
+ ps->pss_pass_start = spa->spa_scan_pass_start;
+ ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
+ ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
+ ps->pss_pass_issued = spa->spa_scan_pass_issued;
+ ps->pss_issued =
+ scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
return (0);
}
+int
+spa_maxblocksize(spa_t *spa)
+{
+ if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
+ return (SPA_MAXBLOCKSIZE);
+ else
+ return (SPA_OLD_MAXBLOCKSIZE);
+}
+
+
+/*
+ * Returns the txg that the last device removal completed. No indirect mappings
+ * have been added since this txg.
+ */
+uint64_t
+spa_get_last_removal_txg(spa_t *spa)
+{
+ uint64_t vdevid;
+ uint64_t ret = -1ULL;
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ /*
+ * sr_prev_indirect_vdev is only modified while holding all the
+ * config locks, so it is sufficient to hold SCL_VDEV as reader when
+ * examining it.
+ */
+ vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
+
+ while (vdevid != -1ULL) {
+ vdev_t *vd = vdev_lookup_top(spa, vdevid);
+ vdev_indirect_births_t *vib = vd->vdev_indirect_births;
+
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ /*
+ * If the removal did not remap any data, we don't care.
+ */
+ if (vdev_indirect_births_count(vib) != 0) {
+ ret = vdev_indirect_births_last_entry_txg(vib);
+ break;
+ }
+
+ vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
+ }
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ IMPLY(ret != -1ULL,
+ spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+ return (ret);
+}
+
+int
+spa_maxdnodesize(spa_t *spa)
+{
+ if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
+ return (DNODE_MAX_SIZE);
+ else
+ return (DNODE_MIN_SIZE);
+}
+
+boolean_t
+spa_multihost(spa_t *spa)
+{
+ return (spa->spa_multihost ? B_TRUE : B_FALSE);
+}
+
+unsigned long
+spa_get_hostid(void)
+{
+ unsigned long myhostid;
+
+#ifdef _KERNEL
+ myhostid = zone_get_hostid(NULL);
+#else /* _KERNEL */
+ /*
+ * We're emulating the system's hostid in userland, so
+ * we can't use zone_get_hostid().
+ */
+ (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
+#endif /* _KERNEL */
+
+ return (myhostid);
+}
+
boolean_t
-spa_debug_enabled(spa_t *spa)
+spa_trust_config(spa_t *spa)
+{
+ return (spa->spa_trust_config);
+}
+
+uint64_t
+spa_missing_tvds_allowed(spa_t *spa)
+{
+ return (spa->spa_missing_tvds_allowed);
+}
+
+void
+spa_set_missing_tvds(spa_t *spa, uint64_t missing)
+{
+ spa->spa_missing_tvds = missing;
+}
+
+/*
+ * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
+ */
+const char *
+spa_state_to_name(spa_t *spa)
+{
+ vdev_state_t state = spa->spa_root_vdev->vdev_state;
+ vdev_aux_t aux = spa->spa_root_vdev->vdev_stat.vs_aux;
+
+ if (spa_suspended(spa) &&
+ (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
+ return ("SUSPENDED");
+
+ switch (state) {
+ case VDEV_STATE_CLOSED:
+ case VDEV_STATE_OFFLINE:
+ return ("OFFLINE");
+ case VDEV_STATE_REMOVED:
+ return ("REMOVED");
+ case VDEV_STATE_CANT_OPEN:
+ if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
+ return ("FAULTED");
+ else if (aux == VDEV_AUX_SPLIT_POOL)
+ return ("SPLIT");
+ else
+ return ("UNAVAIL");
+ case VDEV_STATE_FAULTED:
+ return ("FAULTED");
+ case VDEV_STATE_DEGRADED:
+ return ("DEGRADED");
+ case VDEV_STATE_HEALTHY:
+ return ("ONLINE");
+ default:
+ break;
+ }
+
+ return ("UNKNOWN");
+}
+
+boolean_t
+spa_top_vdevs_spacemap_addressable(spa_t *spa)
+{
+ vdev_t *rvd = spa->spa_root_vdev;
+ for (uint64_t c = 0; c < rvd->vdev_children; c++) {
+ if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+boolean_t
+spa_has_checkpoint(spa_t *spa)
+{
+ return (spa->spa_checkpoint_txg != 0);
+}
+
+boolean_t
+spa_importing_readonly_checkpoint(spa_t *spa)
+{
+ return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
+ spa->spa_mode == FREAD);
+}
+
+uint64_t
+spa_min_claim_txg(spa_t *spa)
+{
+ uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
+
+ if (checkpoint_txg != 0)
+ return (checkpoint_txg + 1);
+
+ return (spa->spa_first_txg);
+}
+
+/*
+ * If there is a checkpoint, async destroys may consume more space from
+ * the pool instead of freeing it. In an attempt to save the pool from
+ * getting suspended when it is about to run out of space, we stop
+ * processing async destroys.
+ */
+boolean_t
+spa_suspend_async_destroy(spa_t *spa)
+{
+ dsl_pool_t *dp = spa_get_dsl(spa);
+
+ uint64_t unreserved = dsl_pool_unreserved_space(dp,
+ ZFS_SPACE_CHECK_EXTRA_RESERVED);
+ uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
+ uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
+
+ if (spa_has_checkpoint(spa) && avail == 0)
+ return (B_TRUE);
+
+ return (B_FALSE);
+}
+
+#if defined(_KERNEL)
+
+#include <linux/mod_compat.h>
+
+static int
+param_set_deadman_failmode(const char *val, zfs_kernel_param_t *kp)
{
- return (spa->spa_debug);
+ spa_t *spa = NULL;
+ char *p;
+
+ if (val == NULL)
+ return (SET_ERROR(-EINVAL));
+
+ if ((p = strchr(val, '\n')) != NULL)
+ *p = '\0';
+
+ if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
+ strcmp(val, "panic"))
+ return (SET_ERROR(-EINVAL));
+
+ if (spa_mode_global != 0) {
+ mutex_enter(&spa_namespace_lock);
+ while ((spa = spa_next(spa)) != NULL)
+ spa_set_deadman_failmode(spa, val);
+ mutex_exit(&spa_namespace_lock);
+ }
+
+ return (param_set_charp(val, kp));
+}
+
+static int
+param_set_deadman_ziotime(const char *val, zfs_kernel_param_t *kp)
+{
+ spa_t *spa = NULL;
+ int error;
+
+ error = param_set_ulong(val, kp);
+ if (error < 0)
+ return (SET_ERROR(error));
+
+ if (spa_mode_global != 0) {
+ mutex_enter(&spa_namespace_lock);
+ while ((spa = spa_next(spa)) != NULL)
+ spa->spa_deadman_ziotime =
+ MSEC2NSEC(zfs_deadman_ziotime_ms);
+ mutex_exit(&spa_namespace_lock);
+ }
+
+ return (0);
+}
+
+static int
+param_set_deadman_synctime(const char *val, zfs_kernel_param_t *kp)
+{
+ spa_t *spa = NULL;
+ int error;
+
+ error = param_set_ulong(val, kp);
+ if (error < 0)
+ return (SET_ERROR(error));
+
+ if (spa_mode_global != 0) {
+ mutex_enter(&spa_namespace_lock);
+ while ((spa = spa_next(spa)) != NULL)
+ spa->spa_deadman_synctime =
+ MSEC2NSEC(zfs_deadman_synctime_ms);
+ mutex_exit(&spa_namespace_lock);
+ }
+
+ return (0);
}
-#if defined(_KERNEL) && defined(HAVE_SPL)
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
EXPORT_SYMBOL(spa_add);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
-EXPORT_SYMBOL(spa_get_asize);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
EXPORT_SYMBOL(spa_normal_class);
EXPORT_SYMBOL(spa_log_class);
+EXPORT_SYMBOL(spa_special_class);
+EXPORT_SYMBOL(spa_preferred_class);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_prev_software_version);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_bootfs);
EXPORT_SYMBOL(spa_delegation);
EXPORT_SYMBOL(spa_meta_objset);
+EXPORT_SYMBOL(spa_maxblocksize);
+EXPORT_SYMBOL(spa_maxdnodesize);
/* Miscellaneous support routines */
EXPORT_SYMBOL(spa_rename);
EXPORT_SYMBOL(spa_strfree);
EXPORT_SYMBOL(spa_get_random);
EXPORT_SYMBOL(spa_generate_guid);
-EXPORT_SYMBOL(sprintf_blkptr);
+EXPORT_SYMBOL(snprintf_blkptr);
EXPORT_SYMBOL(spa_freeze);
EXPORT_SYMBOL(spa_upgrade);
EXPORT_SYMBOL(spa_evict_all);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
-
EXPORT_SYMBOL(spa_namespace_lock);
+EXPORT_SYMBOL(spa_trust_config);
+EXPORT_SYMBOL(spa_missing_tvds_allowed);
+EXPORT_SYMBOL(spa_set_missing_tvds);
+EXPORT_SYMBOL(spa_state_to_name);
+EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
+EXPORT_SYMBOL(spa_min_claim_txg);
+EXPORT_SYMBOL(spa_suspend_async_destroy);
+EXPORT_SYMBOL(spa_has_checkpoint);
+EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
+
+/* BEGIN CSTYLED */
+module_param(zfs_flags, uint, 0644);
+MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags");
module_param(zfs_recover, int, 0644);
MODULE_PARM_DESC(zfs_recover, "Set to attempt to recover from fatal errors");
+
+module_param(zfs_free_leak_on_eio, int, 0644);
+MODULE_PARM_DESC(zfs_free_leak_on_eio,
+ "Set to ignore IO errors during free and permanently leak the space");
+
+module_param_call(zfs_deadman_synctime_ms, param_set_deadman_synctime,
+ param_get_ulong, &zfs_deadman_synctime_ms, 0644);
+MODULE_PARM_DESC(zfs_deadman_synctime_ms,
+ "Pool sync expiration time in milliseconds");
+
+module_param_call(zfs_deadman_ziotime_ms, param_set_deadman_ziotime,
+ param_get_ulong, &zfs_deadman_ziotime_ms, 0644);
+MODULE_PARM_DESC(zfs_deadman_ziotime_ms,
+ "IO expiration time in milliseconds");
+
+module_param(zfs_deadman_checktime_ms, ulong, 0644);
+MODULE_PARM_DESC(zfs_deadman_checktime_ms,
+ "Dead I/O check interval in milliseconds");
+
+module_param(zfs_deadman_enabled, int, 0644);
+MODULE_PARM_DESC(zfs_deadman_enabled, "Enable deadman timer");
+
+module_param_call(zfs_deadman_failmode, param_set_deadman_failmode,
+ param_get_charp, &zfs_deadman_failmode, 0644);
+MODULE_PARM_DESC(zfs_deadman_failmode, "Failmode for deadman timer");
+
+module_param(spa_asize_inflation, int, 0644);
+MODULE_PARM_DESC(spa_asize_inflation,
+ "SPA size estimate multiplication factor");
+
+module_param(spa_slop_shift, int, 0644);
+MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool");
+
+module_param(zfs_ddt_data_is_special, int, 0644);
+MODULE_PARM_DESC(zfs_ddt_data_is_special,
+ "Place DDT data into the special class");
+
+module_param(zfs_user_indirect_is_special, int, 0644);
+MODULE_PARM_DESC(zfs_user_indirect_is_special,
+ "Place user data indirect blocks into the special class");
+/* END CSTYLED */
#endif