*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
+#include <sys/vdev_file.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
+#include <sys/fm/util.h>
#include <sys/dsl_scan.h>
#include <sys/fs/zfs.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/ddt.h>
+#include <sys/kstat.h>
#include "zfs_prop.h"
+#include "zfeature_common.h"
/*
* SPA locking
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*
- * spa_rename() is also implemented within this file since is requires
+ * spa_rename() is also implemented within this file since it requires
* manipulation of the namespace.
*/
int spa_mode_global;
#ifdef ZFS_DEBUG
-/* Everything except dprintf is on by default in debug builds */
-int zfs_flags = ~ZFS_DEBUG_DPRINTF;
+/* Everything except dprintf and spa is on by default in debug builds */
+int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
#else
int zfs_flags = 0;
#endif
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
+ * This should only be used as a last resort, as it typically results
+ * in leaked space, or worse.
*/
-int zfs_recover = 0;
+int zfs_recover = B_FALSE;
+/*
+ * If destroy encounters an EIO while reading metadata (e.g. indirect
+ * blocks), space referenced by the missing metadata can not be freed.
+ * Normally this causes the background destroy to become "stalled", as
+ * it is unable to make forward progress. While in this stalled state,
+ * all remaining space to free from the error-encountering filesystem is
+ * "temporarily leaked". Set this flag to cause it to ignore the EIO,
+ * permanently leak the space from indirect blocks that can not be read,
+ * and continue to free everything else that it can.
+ *
+ * The default, "stalling" behavior is useful if the storage partially
+ * fails (i.e. some but not all i/os fail), and then later recovers. In
+ * this case, we will be able to continue pool operations while it is
+ * partially failed, and when it recovers, we can continue to free the
+ * space, with no leaks. However, note that this case is actually
+ * fairly rare.
+ *
+ * Typically pools either (a) fail completely (but perhaps temporarily,
+ * e.g. a top-level vdev going offline), or (b) have localized,
+ * permanent errors (e.g. disk returns the wrong data due to bit flip or
+ * firmware bug). In case (a), this setting does not matter because the
+ * pool will be suspended and the sync thread will not be able to make
+ * forward progress regardless. In case (b), because the error is
+ * permanent, the best we can do is leak the minimum amount of space,
+ * which is what setting this flag will do. Therefore, it is reasonable
+ * for this flag to normally be set, but we chose the more conservative
+ * approach of not setting it, so that there is no possibility of
+ * leaking space in the "partial temporary" failure case.
+ */
+int zfs_free_leak_on_eio = B_FALSE;
+
+/*
+ * Expiration time in milliseconds. This value has two meanings. First it is
+ * used to determine when the spa_deadman() logic should fire. By default the
+ * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
+ * Secondly, the value determines if an I/O is considered "hung". Any I/O that
+ * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
+ * in a system panic.
+ */
+unsigned long zfs_deadman_synctime_ms = 1000000ULL;
+
+/*
+ * By default the deadman is enabled.
+ */
+int zfs_deadman_enabled = 1;
+
+/*
+ * The worst case is single-sector max-parity RAID-Z blocks, in which
+ * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
+ * times the size; so just assume that. Add to this the fact that
+ * we can have up to 3 DVAs per bp, and one more factor of 2 because
+ * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
+ * the worst case is:
+ * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
+ */
+int spa_asize_inflation = 24;
/*
* ==========================================================================
static void
spa_config_lock_init(spa_t *spa)
{
- for (int i = 0; i < SCL_LOCKS; i++) {
+ int i;
+
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
- refcount_create(&scl->scl_count);
+ refcount_create_untracked(&scl->scl_count);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
}
static void
spa_config_lock_destroy(spa_t *spa)
{
- for (int i = 0; i < SCL_LOCKS; i++) {
+ int i;
+
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
int
spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
{
- for (int i = 0; i < SCL_LOCKS; i++) {
+ int i;
+
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
{
int wlocks_held = 0;
+ int i;
+
+ ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
- for (int i = 0; i < SCL_LOCKS; i++) {
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
void
spa_config_exit(spa_t *spa, int locks, void *tag)
{
- for (int i = SCL_LOCKS - 1; i >= 0; i--) {
+ int i;
+
+ for (i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
- int locks_held = 0;
+ int i, locks_held = 0;
- for (int i = 0; i < SCL_LOCKS; i++) {
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
- char c;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
+
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
- cp = strpbrk(name, "/@");
- if (cp) {
- c = *cp;
+ cp = strpbrk(search.spa_name, "/@#");
+ if (cp != NULL)
*cp = '\0';
- }
- (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
spa = avl_find(&spa_namespace_avl, &search, &where);
- if (cp)
- *cp = c;
-
return (spa);
}
+/*
+ * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
+ * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
+ * looking for potentially hung I/Os.
+ */
+void
+spa_deadman(void *arg)
+{
+ spa_t *spa = arg;
+
+ zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
+ (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
+ ++spa->spa_deadman_calls);
+ if (zfs_deadman_enabled)
+ vdev_deadman(spa->spa_root_vdev);
+
+ spa->spa_deadman_tqid = taskq_dispatch_delay(system_taskq,
+ spa_deadman, spa, TQ_PUSHPAGE, ddi_get_lbolt() +
+ NSEC_TO_TICK(spa->spa_deadman_synctime));
+}
+
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
{
spa_t *spa;
spa_config_dirent_t *dp;
+ int t;
+ int i;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
- spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
+ spa = kmem_zalloc(sizeof (spa_t), KM_PUSHPAGE | KM_NODEBUG);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
- for (int t = 0; t < TXG_SIZE; t++)
+ for (t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
+ spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
+
refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
+ spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
offsetof(spa_config_dirent_t, scd_link));
- dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
+ dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_PUSHPAGE);
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
- if (config != NULL)
+ VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
+ KM_PUSHPAGE) == 0);
+
+ if (config != NULL) {
+ nvlist_t *features;
+
+ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
+ &features) == 0) {
+ VERIFY(nvlist_dup(features, &spa->spa_label_features,
+ 0) == 0);
+ }
+
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
+ }
+
+ if (spa->spa_label_features == NULL) {
+ VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
+ KM_PUSHPAGE) == 0);
+ }
+
+ spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
+
+ /*
+ * As a pool is being created, treat all features as disabled by
+ * setting SPA_FEATURE_DISABLED for all entries in the feature
+ * refcount cache.
+ */
+ for (i = 0; i < SPA_FEATURES; i++) {
+ spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
+ }
return (spa);
}
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
+ int t;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
list_destroy(&spa->spa_config_list);
+ nvlist_free(spa->spa_label_features);
+ nvlist_free(spa->spa_load_info);
spa_config_set(spa, NULL);
refcount_destroy(&spa->spa_refcount);
+ spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
- for (int t = 0; t < TXG_SIZE; t++)
+ for (t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
cv_destroy(&spa->spa_async_cv);
if ((aux = avl_find(avl, &search, &where)) != NULL) {
aux->aux_count++;
} else {
- aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
+ aux = kmem_zalloc(sizeof (spa_aux_t), KM_PUSHPAGE);
aux->aux_guid = vd->vdev_guid;
aux->aux_count = 1;
avl_insert(avl, aux, where);
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
{
- ASSERT(MUTEX_HELD(&spa_namespace_lock));
-
int config_changed = B_FALSE;
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
*/
vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
- /*
- * If the config changed, notify the scrub that it must restart.
- * This will initiate a resilver if needed.
- */
if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
config_changed = B_TRUE;
spa->spa_config_generation++;
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
- ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
+ ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_ALL, spa);
* ==========================================================================
*/
+void
+spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
+{
+ if (!nvlist_exists(spa->spa_label_features, feature)) {
+ fnvlist_add_boolean(spa->spa_label_features, feature);
+ /*
+ * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
+ * dirty the vdev config because lock SCL_CONFIG is not held.
+ * Thankfully, in this case we don't need to dirty the config
+ * because it will be written out anyway when we finish
+ * creating the pool.
+ */
+ if (tx->tx_txg != TXG_INITIAL)
+ vdev_config_dirty(spa->spa_root_vdev);
+ }
+}
+
+void
+spa_deactivate_mos_feature(spa_t *spa, const char *feature)
+{
+ if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
+ vdev_config_dirty(spa->spa_root_vdev);
+}
+
/*
* Rename a spa_t.
*/
}
/*
- * Determine whether a pool with given pool_guid exists. If device_guid is
- * non-zero, determine whether the pool exists *and* contains a device with the
- * specified device_guid.
+ * Return the spa_t associated with given pool_guid, if it exists. If
+ * device_guid is non-zero, determine whether the pool exists *and* contains
+ * a device with the specified device_guid.
*/
-boolean_t
-spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
+spa_t *
+spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
}
}
- return (spa != NULL);
+ return (spa);
+}
+
+/*
+ * Determine whether a pool with the given pool_guid exists.
+ */
+boolean_t
+spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
+{
+ return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
char *new;
len = strlen(s);
- new = kmem_alloc(len + 1, KM_SLEEP);
+ new = kmem_alloc(len + 1, KM_PUSHPAGE);
bcopy(s, new, len);
new[len] = '\0';
}
void
-sprintf_blkptr(char *buf, const blkptr_t *bp)
+snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
- char *type = NULL;
+ char type[256];
char *checksum = NULL;
char *compress = NULL;
if (bp != NULL) {
- type = dmu_ot[BP_GET_TYPE(bp)].ot_name;
- checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
+ if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
+ dmu_object_byteswap_t bswap =
+ DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
+ (void) snprintf(type, sizeof (type), "bswap %s %s",
+ DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
+ "metadata" : "data",
+ dmu_ot_byteswap[bswap].ob_name);
+ } else {
+ (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
+ sizeof (type));
+ }
+ if (!BP_IS_EMBEDDED(bp)) {
+ checksum =
+ zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
+ }
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
- SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
+ SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
+ compress);
}
void
/*
* This is a stripped-down version of strtoull, suitable only for converting
- * lowercase hexidecimal numbers that don't overflow.
+ * lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
strtonum(const char *str, char **nptr)
return (spa->spa_dsl_pool);
}
+boolean_t
+spa_is_initializing(spa_t *spa)
+{
+ return (spa->spa_is_initializing);
+}
+
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
uint64_t
spa_guid(spa_t *spa)
{
+ dsl_pool_t *dp = spa_get_dsl(spa);
+ uint64_t guid;
+
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
- * vdev. We stash the original pool guid in 'spa_load_guid' to handle
+ * vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
- if (spa->spa_root_vdev != NULL)
+ if (spa->spa_root_vdev == NULL)
+ return (spa->spa_config_guid);
+
+ guid = spa->spa_last_synced_guid != 0 ?
+ spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
+
+ /*
+ * Return the most recently synced out guid unless we're
+ * in syncing context.
+ */
+ if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
- return (spa->spa_load_guid);
+ return (guid);
+}
+
+uint64_t
+spa_load_guid(spa_t *spa)
+{
+ /*
+ * This is a GUID that exists solely as a reference for the
+ * purposes of the arc. It is generated at load time, and
+ * is never written to persistent storage.
+ */
+ return (spa->spa_load_guid);
}
uint64_t
uint64_t
spa_get_asize(spa_t *spa, uint64_t lsize)
{
- /*
- * The worst case is single-sector max-parity RAID-Z blocks, in which
- * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
- * times the size; so just assume that. Add to this the fact that
- * we can have up to 3 DVAs per bp, and one more factor of 2 because
- * the block may be dittoed with up to 3 DVAs by ddt_sync().
- */
- return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2);
+ return (lsize * spa_asize_inflation);
}
uint64_t
return (spa->spa_prev_software_version);
}
+uint64_t
+spa_deadman_synctime(spa_t *spa)
+{
+ return (spa->spa_deadman_synctime);
+}
+
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
- dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
+ if (vd != NULL)
+ dsize = (asize >> SPA_MINBLOCKSHIFT) *
+ vd->vdev_deflate_ratio;
}
return (dsize);
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
+ int d;
- for (int d = 0; d < SPA_DVAS_PER_BP; d++)
+ for (d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
+ int d;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
- for (int d = 0; d < SPA_DVAS_PER_BP; d++)
+ for (d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (0);
}
-int
-spa_busy(void)
-{
- return (spa_active_count);
-}
-
void
-spa_boot_init()
+spa_boot_init(void)
{
spa_config_load();
}
spa_mode_global = mode;
+#ifndef _KERNEL
+ if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
+ struct sigaction sa;
+
+ sa.sa_flags = SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_sigaction = arc_buf_sigsegv;
+
+ if (sigaction(SIGSEGV, &sa, NULL) == -1) {
+ perror("could not enable watchpoints: "
+ "sigaction(SIGSEGV, ...) = ");
+ } else {
+ arc_watch = B_TRUE;
+ }
+ }
+#endif
+
+ fm_init();
refcount_init();
unique_init();
+ range_tree_init();
+ ddt_init();
zio_init();
dmu_init();
zil_init();
vdev_cache_stat_init();
+ vdev_file_init();
zfs_prop_init();
zpool_prop_init();
+ zpool_feature_init();
spa_config_load();
l2arc_start();
}
spa_evict_all();
+ vdev_file_fini();
vdev_cache_stat_fini();
zil_fini();
dmu_fini();
zio_fini();
+ ddt_fini();
+ range_tree_fini();
unique_fini();
refcount_fini();
+ fm_fini();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
return (!!(spa->spa_mode & FWRITE));
}
+/*
+ * Returns true if there is a pending sync task in any of the current
+ * syncing txg, the current quiescing txg, or the current open txg.
+ */
+boolean_t
+spa_has_pending_synctask(spa_t *spa)
+{
+ return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks));
+}
+
int
spa_mode(spa_t *spa)
{
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
- return (ENOENT);
+ return (SET_ERROR(ENOENT));
bzero(ps, sizeof (pool_scan_stat_t));
/* data stored on disk */
return (0);
}
+
+boolean_t
+spa_debug_enabled(spa_t *spa)
+{
+ return (spa->spa_debug);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+/* Namespace manipulation */
+EXPORT_SYMBOL(spa_lookup);
+EXPORT_SYMBOL(spa_add);
+EXPORT_SYMBOL(spa_remove);
+EXPORT_SYMBOL(spa_next);
+
+/* Refcount functions */
+EXPORT_SYMBOL(spa_open_ref);
+EXPORT_SYMBOL(spa_close);
+EXPORT_SYMBOL(spa_refcount_zero);
+
+/* Pool configuration lock */
+EXPORT_SYMBOL(spa_config_tryenter);
+EXPORT_SYMBOL(spa_config_enter);
+EXPORT_SYMBOL(spa_config_exit);
+EXPORT_SYMBOL(spa_config_held);
+
+/* Pool vdev add/remove lock */
+EXPORT_SYMBOL(spa_vdev_enter);
+EXPORT_SYMBOL(spa_vdev_exit);
+
+/* Pool vdev state change lock */
+EXPORT_SYMBOL(spa_vdev_state_enter);
+EXPORT_SYMBOL(spa_vdev_state_exit);
+
+/* Accessor functions */
+EXPORT_SYMBOL(spa_shutting_down);
+EXPORT_SYMBOL(spa_get_dsl);
+EXPORT_SYMBOL(spa_get_rootblkptr);
+EXPORT_SYMBOL(spa_set_rootblkptr);
+EXPORT_SYMBOL(spa_altroot);
+EXPORT_SYMBOL(spa_sync_pass);
+EXPORT_SYMBOL(spa_name);
+EXPORT_SYMBOL(spa_guid);
+EXPORT_SYMBOL(spa_last_synced_txg);
+EXPORT_SYMBOL(spa_first_txg);
+EXPORT_SYMBOL(spa_syncing_txg);
+EXPORT_SYMBOL(spa_version);
+EXPORT_SYMBOL(spa_state);
+EXPORT_SYMBOL(spa_load_state);
+EXPORT_SYMBOL(spa_freeze_txg);
+EXPORT_SYMBOL(spa_get_asize);
+EXPORT_SYMBOL(spa_get_dspace);
+EXPORT_SYMBOL(spa_update_dspace);
+EXPORT_SYMBOL(spa_deflate);
+EXPORT_SYMBOL(spa_normal_class);
+EXPORT_SYMBOL(spa_log_class);
+EXPORT_SYMBOL(spa_max_replication);
+EXPORT_SYMBOL(spa_prev_software_version);
+EXPORT_SYMBOL(spa_get_failmode);
+EXPORT_SYMBOL(spa_suspended);
+EXPORT_SYMBOL(spa_bootfs);
+EXPORT_SYMBOL(spa_delegation);
+EXPORT_SYMBOL(spa_meta_objset);
+
+/* Miscellaneous support routines */
+EXPORT_SYMBOL(spa_rename);
+EXPORT_SYMBOL(spa_guid_exists);
+EXPORT_SYMBOL(spa_strdup);
+EXPORT_SYMBOL(spa_strfree);
+EXPORT_SYMBOL(spa_get_random);
+EXPORT_SYMBOL(spa_generate_guid);
+EXPORT_SYMBOL(snprintf_blkptr);
+EXPORT_SYMBOL(spa_freeze);
+EXPORT_SYMBOL(spa_upgrade);
+EXPORT_SYMBOL(spa_evict_all);
+EXPORT_SYMBOL(spa_lookup_by_guid);
+EXPORT_SYMBOL(spa_has_spare);
+EXPORT_SYMBOL(dva_get_dsize_sync);
+EXPORT_SYMBOL(bp_get_dsize_sync);
+EXPORT_SYMBOL(bp_get_dsize);
+EXPORT_SYMBOL(spa_has_slogs);
+EXPORT_SYMBOL(spa_is_root);
+EXPORT_SYMBOL(spa_writeable);
+EXPORT_SYMBOL(spa_mode);
+
+EXPORT_SYMBOL(spa_namespace_lock);
+
+module_param(zfs_flags, uint, 0644);
+MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags");
+
+module_param(zfs_recover, int, 0644);
+MODULE_PARM_DESC(zfs_recover, "Set to attempt to recover from fatal errors");
+
+module_param(zfs_free_leak_on_eio, int, 0644);
+MODULE_PARM_DESC(zfs_free_leak_on_eio,
+ "Set to ignore IO errors during free and permanently leak the space");
+
+module_param(zfs_deadman_synctime_ms, ulong, 0644);
+MODULE_PARM_DESC(zfs_deadman_synctime_ms, "Expiration time in milliseconds");
+
+module_param(zfs_deadman_enabled, int, 0644);
+MODULE_PARM_DESC(zfs_deadman_enabled, "Enable deadman timer");
+
+module_param(spa_asize_inflation, int, 0644);
+MODULE_PARM_DESC(spa_asize_inflation,
+ "SPA size estimate multiplication factor");
+#endif