]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/spa_misc.c
Pool allocation classes
[mirror_zfs.git] / module / zfs / spa_misc.c
index 5e2af24e3b6d7240e5ba16c15373b6f9a4b6069f..2c500c010c35ceab917cf8434327bb343e515409 100644 (file)
  */
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  * Copyright 2013 Saso Kiselkov. All rights reserved.
  * Copyright (c) 2017 Datto Inc.
+ * Copyright (c) 2017, Intel Corporation.
  */
 
 #include <sys/zfs_context.h>
@@ -243,7 +244,12 @@ kmem_cache_t *spa_buffer_pool;
 int spa_mode_global;
 
 #ifdef ZFS_DEBUG
-int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | ZFS_DEBUG_SPA);
+/*
+ * Everything except dprintf, set_error, spa, and indirect_remap is on
+ * by default in debug builds.
+ */
+int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
+    ZFS_DEBUG_INDIRECT_REMAP);
 #else
 int zfs_flags = 0;
 #endif
@@ -352,12 +358,15 @@ int spa_asize_inflation = 24;
  * These are the operations that call dsl_pool_adjustedsize() with the netfree
  * argument set to TRUE.
  *
+ * Operations that are almost guaranteed to free up space in the absence of
+ * a pool checkpoint can use up to three quarters of the slop space
+ * (e.g zfs destroy).
+ *
  * A very restricted set of operations are always permitted, regardless of
  * the amount of free space.  These are the operations that call
- * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy".  If these
- * operations result in a net increase in the amount of space used,
- * it is possible to run the pool completely out of space, causing it to
- * be permanently read-only.
+ * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
+ * increase in the amount of space used, it is possible to run the pool
+ * completely out of space, causing it to be permanently read-only.
  *
  * Note that on very small pools, the slop space will be larger than
  * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
@@ -367,6 +376,51 @@ int spa_asize_inflation = 24;
  */
 int spa_slop_shift = 5;
 uint64_t spa_min_slop = 128 * 1024 * 1024;
+int spa_allocators = 4;
+
+
+/*PRINTFLIKE2*/
+void
+spa_load_failed(spa_t *spa, const char *fmt, ...)
+{
+       va_list adx;
+       char buf[256];
+
+       va_start(adx, fmt);
+       (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+       va_end(adx);
+
+       zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
+           spa->spa_trust_config ? "trusted" : "untrusted", buf);
+}
+
+/*PRINTFLIKE2*/
+void
+spa_load_note(spa_t *spa, const char *fmt, ...)
+{
+       va_list adx;
+       char buf[256];
+
+       va_start(adx, fmt);
+       (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+       va_end(adx);
+
+       zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
+           spa->spa_trust_config ? "trusted" : "untrusted", buf);
+}
+
+/*
+ * By default dedup and user data indirects land in the special class
+ */
+int zfs_ddt_data_is_special = B_TRUE;
+int zfs_user_indirect_is_special = B_TRUE;
+
+/*
+ * The percentage of special class final space reserved for metadata only.
+ * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
+ * let metadata into the class.
+ */
+int zfs_special_class_metadata_reserve_pct = 25;
 
 /*
  * ==========================================================================
@@ -460,7 +514,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
                (void) refcount_add(&scl->scl_count, tag);
                mutex_exit(&scl->scl_lock);
        }
-       ASSERT(wlocks_held <= locks);
+       ASSERT3U(wlocks_held, <=, locks);
 }
 
 void
@@ -586,7 +640,6 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
        mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
        mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
        mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
-       mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL);
 
        cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
        cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
@@ -604,6 +657,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
        spa->spa_load_max_txg = UINT64_MAX;
        spa->spa_proc = &p0;
        spa->spa_proc_state = SPA_PROC_NONE;
+       spa->spa_trust_config = B_TRUE;
 
        spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
        spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
@@ -621,8 +675,16 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
        if (altroot)
                spa->spa_root = spa_strdup(altroot);
 
-       avl_create(&spa->spa_alloc_tree, zio_bookmark_compare,
-           sizeof (zio_t), offsetof(zio_t, io_alloc_node));
+       spa->spa_alloc_count = spa_allocators;
+       spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
+           sizeof (kmutex_t), KM_SLEEP);
+       spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
+           sizeof (avl_tree_t), KM_SLEEP);
+       for (int i = 0; i < spa->spa_alloc_count; i++) {
+               mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
+               avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
+                   sizeof (zio_t), offsetof(zio_t, io_alloc_node));
+       }
 
        /*
         * Every pool starts with the default cachefile
@@ -654,8 +716,6 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
                    KM_SLEEP) == 0);
        }
 
-       spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
-
        spa->spa_min_ashift = INT_MAX;
        spa->spa_max_ashift = 0;
 
@@ -703,7 +763,15 @@ spa_remove(spa_t *spa)
                kmem_free(dp, sizeof (spa_config_dirent_t));
        }
 
-       avl_destroy(&spa->spa_alloc_tree);
+       for (int i = 0; i < spa->spa_alloc_count; i++) {
+               avl_destroy(&spa->spa_alloc_trees[i]);
+               mutex_destroy(&spa->spa_alloc_locks[i]);
+       }
+       kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
+           sizeof (kmutex_t));
+       kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
+           sizeof (avl_tree_t));
+
        list_destroy(&spa->spa_config_list);
 
        nvlist_free(spa->spa_label_features);
@@ -727,7 +795,6 @@ spa_remove(spa_t *spa)
        cv_destroy(&spa->spa_scrub_io_cv);
        cv_destroy(&spa->spa_suspend_cv);
 
-       mutex_destroy(&spa->spa_alloc_lock);
        mutex_destroy(&spa->spa_async_lock);
        mutex_destroy(&spa->spa_errlist_lock);
        mutex_destroy(&spa->spa_errlog_lock);
@@ -1106,6 +1173,8 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
         */
        ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
        ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
+       ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
+       ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
 
        spa_config_exit(spa, SCL_ALL, spa);
 
@@ -1136,7 +1205,7 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
         * If the config changed, update the config cache.
         */
        if (config_changed)
-               spa_config_sync(spa, B_FALSE, B_TRUE);
+               spa_write_cachefile(spa, B_FALSE, B_TRUE);
 }
 
 /*
@@ -1228,7 +1297,7 @@ spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
         */
        if (config_changed) {
                mutex_enter(&spa_namespace_lock);
-               spa_config_sync(spa, B_FALSE, B_TRUE);
+               spa_write_cachefile(spa, B_FALSE, B_TRUE);
                mutex_exit(&spa_namespace_lock);
        }
 
@@ -1306,7 +1375,7 @@ spa_rename(const char *name, const char *newname)
        /*
         * Sync the updated config cache.
         */
-       spa_config_sync(spa, B_FALSE, B_TRUE);
+       spa_write_cachefile(spa, B_FALSE, B_TRUE);
 
        spa_close(spa, FTAG);
 
@@ -1421,7 +1490,6 @@ snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
        char type[256];
        char *checksum = NULL;
        char *compress = NULL;
-       char *crypt_type = NULL;
 
        if (bp != NULL) {
                if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
@@ -1435,15 +1503,6 @@ snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
                        (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
                            sizeof (type));
                }
-               if (BP_IS_ENCRYPTED(bp)) {
-                       crypt_type = "encrypted";
-               } else if (BP_IS_AUTHENTICATED(bp)) {
-                       crypt_type = "authenticated";
-               } else if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
-                       crypt_type = "indirect-MAC";
-               } else {
-                       crypt_type = "unencrypted";
-               }
                if (!BP_IS_EMBEDDED(bp)) {
                        checksum =
                            zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
@@ -1452,7 +1511,7 @@ snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
        }
 
        SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
-           crypt_type, compress);
+           compress);
 }
 
 void
@@ -1511,6 +1570,16 @@ zfs_strtonum(const char *str, char **nptr)
        return (val);
 }
 
+void
+spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
+{
+       /*
+        * We bump the feature refcount for each special vdev added to the pool
+        */
+       ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
+       spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
+}
+
 /*
  * ==========================================================================
  * Accessor functions
@@ -1535,6 +1604,12 @@ spa_is_initializing(spa_t *spa)
        return (spa->spa_is_initializing);
 }
 
+boolean_t
+spa_indirect_vdevs_loaded(spa_t *spa)
+{
+       return (spa->spa_indirect_vdevs_loaded);
+}
+
 blkptr_t *
 spa_get_rootblkptr(spa_t *spa)
 {
@@ -1688,11 +1763,38 @@ spa_get_dspace(spa_t *spa)
        return (spa->spa_dspace);
 }
 
+uint64_t
+spa_get_checkpoint_space(spa_t *spa)
+{
+       return (spa->spa_checkpoint_info.sci_dspace);
+}
+
 void
 spa_update_dspace(spa_t *spa)
 {
        spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
            ddt_get_dedup_dspace(spa);
+       if (spa->spa_vdev_removal != NULL) {
+               /*
+                * We can't allocate from the removing device, so
+                * subtract its size.  This prevents the DMU/DSL from
+                * filling up the (now smaller) pool while we are in the
+                * middle of removing the device.
+                *
+                * Note that the DMU/DSL doesn't actually know or care
+                * how much space is allocated (it does its own tracking
+                * of how much space has been logically used).  So it
+                * doesn't matter that the data we are moving may be
+                * allocated twice (on the old device and the new
+                * device).
+                */
+               spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+               vdev_t *vd =
+                   vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
+               spa->spa_dspace -= spa_deflate(spa) ?
+                   vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
+               spa_config_exit(spa, SCL_VDEV, FTAG);
+       }
 }
 
 /*
@@ -1735,6 +1837,79 @@ spa_log_class(spa_t *spa)
        return (spa->spa_log_class);
 }
 
+metaslab_class_t *
+spa_special_class(spa_t *spa)
+{
+       return (spa->spa_special_class);
+}
+
+metaslab_class_t *
+spa_dedup_class(spa_t *spa)
+{
+       return (spa->spa_dedup_class);
+}
+
+/*
+ * Locate an appropriate allocation class
+ */
+metaslab_class_t *
+spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
+    uint_t level, uint_t special_smallblk)
+{
+       if (DMU_OT_IS_ZIL(objtype)) {
+               if (spa->spa_log_class->mc_groups != 0)
+                       return (spa_log_class(spa));
+               else
+                       return (spa_normal_class(spa));
+       }
+
+       boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
+
+       if (DMU_OT_IS_DDT(objtype)) {
+               if (spa->spa_dedup_class->mc_groups != 0)
+                       return (spa_dedup_class(spa));
+               else if (has_special_class && zfs_ddt_data_is_special)
+                       return (spa_special_class(spa));
+               else
+                       return (spa_normal_class(spa));
+       }
+
+       /* Indirect blocks for user data can land in special if allowed */
+       if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
+               if (has_special_class && zfs_user_indirect_is_special)
+                       return (spa_special_class(spa));
+               else
+                       return (spa_normal_class(spa));
+       }
+
+       if (DMU_OT_IS_METADATA(objtype) || level > 0) {
+               if (has_special_class)
+                       return (spa_special_class(spa));
+               else
+                       return (spa_normal_class(spa));
+       }
+
+       /*
+        * Allow small file blocks in special class in some cases (like
+        * for the dRAID vdev feature). But always leave a reserve of
+        * zfs_special_class_metadata_reserve_pct exclusively for metadata.
+        */
+       if (DMU_OT_IS_FILE(objtype) &&
+           has_special_class && size < special_smallblk) {
+               metaslab_class_t *special = spa_special_class(spa);
+               uint64_t alloc = metaslab_class_get_alloc(special);
+               uint64_t space = metaslab_class_get_space(special);
+               uint64_t limit =
+                   (space * (100 - zfs_special_class_metadata_reserve_pct))
+                   / 100;
+
+               if (alloc < limit)
+                       return (special);
+       }
+
+       return (spa_normal_class(spa));
+}
+
 void
 spa_evicting_os_register(spa_t *spa, objset_t *os)
 {
@@ -1857,6 +2032,12 @@ bp_get_dsize(spa_t *spa, const blkptr_t *bp)
        return (dsize);
 }
 
+uint64_t
+spa_dirty_data(spa_t *spa)
+{
+       return (spa->spa_dsl_pool->dp_dirty_total);
+}
+
 /*
  * ==========================================================================
  * Initialization and Termination
@@ -2004,7 +2185,7 @@ spa_is_root(spa_t *spa)
 boolean_t
 spa_writeable(spa_t *spa)
 {
-       return (!!(spa->spa_mode & FWRITE));
+       return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config);
 }
 
 /*
@@ -2014,7 +2195,8 @@ spa_writeable(spa_t *spa)
 boolean_t
 spa_has_pending_synctask(spa_t *spa)
 {
-       return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks));
+       return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
+           !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
 }
 
 int
@@ -2100,12 +2282,6 @@ spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
        return (0);
 }
 
-boolean_t
-spa_debug_enabled(spa_t *spa)
-{
-       return (spa->spa_debug);
-}
-
 int
 spa_maxblocksize(spa_t *spa)
 {
@@ -2115,6 +2291,49 @@ spa_maxblocksize(spa_t *spa)
                return (SPA_OLD_MAXBLOCKSIZE);
 }
 
+
+/*
+ * Returns the txg that the last device removal completed. No indirect mappings
+ * have been added since this txg.
+ */
+uint64_t
+spa_get_last_removal_txg(spa_t *spa)
+{
+       uint64_t vdevid;
+       uint64_t ret = -1ULL;
+
+       spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+       /*
+        * sr_prev_indirect_vdev is only modified while holding all the
+        * config locks, so it is sufficient to hold SCL_VDEV as reader when
+        * examining it.
+        */
+       vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
+
+       while (vdevid != -1ULL) {
+               vdev_t *vd = vdev_lookup_top(spa, vdevid);
+               vdev_indirect_births_t *vib = vd->vdev_indirect_births;
+
+               ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+               /*
+                * If the removal did not remap any data, we don't care.
+                */
+               if (vdev_indirect_births_count(vib) != 0) {
+                       ret = vdev_indirect_births_last_entry_txg(vib);
+                       break;
+               }
+
+               vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
+       }
+       spa_config_exit(spa, SCL_VDEV, FTAG);
+
+       IMPLY(ret != -1ULL,
+           spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+       return (ret);
+}
+
 int
 spa_maxdnodesize(spa_t *spa)
 {
@@ -2148,7 +2367,121 @@ spa_get_hostid(void)
        return (myhostid);
 }
 
-#if defined(_KERNEL) && defined(HAVE_SPL)
+boolean_t
+spa_trust_config(spa_t *spa)
+{
+       return (spa->spa_trust_config);
+}
+
+uint64_t
+spa_missing_tvds_allowed(spa_t *spa)
+{
+       return (spa->spa_missing_tvds_allowed);
+}
+
+void
+spa_set_missing_tvds(spa_t *spa, uint64_t missing)
+{
+       spa->spa_missing_tvds = missing;
+}
+
+/*
+ * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
+ */
+const char *
+spa_state_to_name(spa_t *spa)
+{
+       vdev_state_t state = spa->spa_root_vdev->vdev_state;
+       vdev_aux_t aux = spa->spa_root_vdev->vdev_stat.vs_aux;
+
+       if (spa_suspended(spa) &&
+           (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
+               return ("SUSPENDED");
+
+       switch (state) {
+       case VDEV_STATE_CLOSED:
+       case VDEV_STATE_OFFLINE:
+               return ("OFFLINE");
+       case VDEV_STATE_REMOVED:
+               return ("REMOVED");
+       case VDEV_STATE_CANT_OPEN:
+               if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
+                       return ("FAULTED");
+               else if (aux == VDEV_AUX_SPLIT_POOL)
+                       return ("SPLIT");
+               else
+                       return ("UNAVAIL");
+       case VDEV_STATE_FAULTED:
+               return ("FAULTED");
+       case VDEV_STATE_DEGRADED:
+               return ("DEGRADED");
+       case VDEV_STATE_HEALTHY:
+               return ("ONLINE");
+       default:
+               break;
+       }
+
+       return ("UNKNOWN");
+}
+
+boolean_t
+spa_top_vdevs_spacemap_addressable(spa_t *spa)
+{
+       vdev_t *rvd = spa->spa_root_vdev;
+       for (uint64_t c = 0; c < rvd->vdev_children; c++) {
+               if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
+                       return (B_FALSE);
+       }
+       return (B_TRUE);
+}
+
+boolean_t
+spa_has_checkpoint(spa_t *spa)
+{
+       return (spa->spa_checkpoint_txg != 0);
+}
+
+boolean_t
+spa_importing_readonly_checkpoint(spa_t *spa)
+{
+       return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
+           spa->spa_mode == FREAD);
+}
+
+uint64_t
+spa_min_claim_txg(spa_t *spa)
+{
+       uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
+
+       if (checkpoint_txg != 0)
+               return (checkpoint_txg + 1);
+
+       return (spa->spa_first_txg);
+}
+
+/*
+ * If there is a checkpoint, async destroys may consume more space from
+ * the pool instead of freeing it. In an attempt to save the pool from
+ * getting suspended when it is about to run out of space, we stop
+ * processing async destroys.
+ */
+boolean_t
+spa_suspend_async_destroy(spa_t *spa)
+{
+       dsl_pool_t *dp = spa_get_dsl(spa);
+
+       uint64_t unreserved = dsl_pool_unreserved_space(dp,
+           ZFS_SPACE_CHECK_EXTRA_RESERVED);
+       uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
+       uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
+
+       if (spa_has_checkpoint(spa) && avail == 0)
+               return (B_TRUE);
+
+       return (B_FALSE);
+}
+
+#if defined(_KERNEL)
 
 #include <linux/mod_compat.h>
 
@@ -2168,14 +2501,58 @@ param_set_deadman_failmode(const char *val, zfs_kernel_param_t *kp)
            strcmp(val, "panic"))
                return (SET_ERROR(-EINVAL));
 
-       mutex_enter(&spa_namespace_lock);
-       while ((spa = spa_next(spa)) != NULL)
-               spa_set_deadman_failmode(spa, val);
-       mutex_exit(&spa_namespace_lock);
+       if (spa_mode_global != 0) {
+               mutex_enter(&spa_namespace_lock);
+               while ((spa = spa_next(spa)) != NULL)
+                       spa_set_deadman_failmode(spa, val);
+               mutex_exit(&spa_namespace_lock);
+       }
 
        return (param_set_charp(val, kp));
 }
 
+static int
+param_set_deadman_ziotime(const char *val, zfs_kernel_param_t *kp)
+{
+       spa_t *spa = NULL;
+       int error;
+
+       error = param_set_ulong(val, kp);
+       if (error < 0)
+               return (SET_ERROR(error));
+
+       if (spa_mode_global != 0) {
+               mutex_enter(&spa_namespace_lock);
+               while ((spa = spa_next(spa)) != NULL)
+                       spa->spa_deadman_ziotime =
+                           MSEC2NSEC(zfs_deadman_ziotime_ms);
+               mutex_exit(&spa_namespace_lock);
+       }
+
+       return (0);
+}
+
+static int
+param_set_deadman_synctime(const char *val, zfs_kernel_param_t *kp)
+{
+       spa_t *spa = NULL;
+       int error;
+
+       error = param_set_ulong(val, kp);
+       if (error < 0)
+               return (SET_ERROR(error));
+
+       if (spa_mode_global != 0) {
+               mutex_enter(&spa_namespace_lock);
+               while ((spa = spa_next(spa)) != NULL)
+                       spa->spa_deadman_synctime =
+                           MSEC2NSEC(zfs_deadman_synctime_ms);
+               mutex_exit(&spa_namespace_lock);
+       }
+
+       return (0);
+}
+
 /* Namespace manipulation */
 EXPORT_SYMBOL(spa_lookup);
 EXPORT_SYMBOL(spa_add);
@@ -2222,6 +2599,8 @@ EXPORT_SYMBOL(spa_update_dspace);
 EXPORT_SYMBOL(spa_deflate);
 EXPORT_SYMBOL(spa_normal_class);
 EXPORT_SYMBOL(spa_log_class);
+EXPORT_SYMBOL(spa_special_class);
+EXPORT_SYMBOL(spa_preferred_class);
 EXPORT_SYMBOL(spa_max_replication);
 EXPORT_SYMBOL(spa_prev_software_version);
 EXPORT_SYMBOL(spa_get_failmode);
@@ -2253,6 +2632,15 @@ EXPORT_SYMBOL(spa_is_root);
 EXPORT_SYMBOL(spa_writeable);
 EXPORT_SYMBOL(spa_mode);
 EXPORT_SYMBOL(spa_namespace_lock);
+EXPORT_SYMBOL(spa_trust_config);
+EXPORT_SYMBOL(spa_missing_tvds_allowed);
+EXPORT_SYMBOL(spa_set_missing_tvds);
+EXPORT_SYMBOL(spa_state_to_name);
+EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
+EXPORT_SYMBOL(spa_min_claim_txg);
+EXPORT_SYMBOL(spa_suspend_async_destroy);
+EXPORT_SYMBOL(spa_has_checkpoint);
+EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
 
 /* BEGIN CSTYLED */
 module_param(zfs_flags, uint, 0644);
@@ -2265,11 +2653,13 @@ module_param(zfs_free_leak_on_eio, int, 0644);
 MODULE_PARM_DESC(zfs_free_leak_on_eio,
        "Set to ignore IO errors during free and permanently leak the space");
 
-module_param(zfs_deadman_synctime_ms, ulong, 0644);
+module_param_call(zfs_deadman_synctime_ms, param_set_deadman_synctime,
+    param_get_ulong, &zfs_deadman_synctime_ms, 0644);
 MODULE_PARM_DESC(zfs_deadman_synctime_ms,
        "Pool sync expiration time in milliseconds");
 
-module_param(zfs_deadman_ziotime_ms, ulong, 0644);
+module_param_call(zfs_deadman_ziotime_ms, param_set_deadman_ziotime,
+    param_get_ulong, &zfs_deadman_ziotime_ms, 0644);
 MODULE_PARM_DESC(zfs_deadman_ziotime_ms,
        "IO expiration time in milliseconds");
 
@@ -2290,5 +2680,13 @@ MODULE_PARM_DESC(spa_asize_inflation,
 
 module_param(spa_slop_shift, int, 0644);
 MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool");
+
+module_param(zfs_ddt_data_is_special, int, 0644);
+MODULE_PARM_DESC(zfs_ddt_data_is_special,
+       "Place DDT data into the special class");
+
+module_param(zfs_user_indirect_is_special, int, 0644);
+MODULE_PARM_DESC(zfs_user_indirect_is_special,
+       "Place user data indirect blocks into the special class");
 /* END CSTYLED */
 #endif