]> git.proxmox.com Git - mirror_zfs.git/blobdiff - cmd/zpool/zpool_main.c
Sequential scrub and resilvers
[mirror_zfs.git] / cmd / zpool / zpool_main.c
index 6412a8e935b9355d0e978d5c2a2ab45582493a9d..440b2979960e85666a005c0cb8546cd7b99603e7 100644 (file)
@@ -26,6 +26,9 @@
  * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
  * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
  * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
+ * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
+ * Copyright (c) 2017 Datto Inc.
+ * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
  */
 
 #include <assert.h>
 #include <string.h>
 #include <strings.h>
 #include <unistd.h>
-#include <priv.h>
 #include <pwd.h>
 #include <zone.h>
+#include <sys/wait.h>
 #include <zfs_prop.h>
 #include <sys/fs/zfs.h>
 #include <sys/stat.h>
+#include <sys/systeminfo.h>
+#include <sys/fm/fs/zfs.h>
 #include <sys/fm/util.h>
 #include <sys/fm/protocol.h>
 #include <sys/zfs_ioctl.h>
+#include <sys/mount.h>
+#include <sys/sysmacros.h>
+
 #include <math.h>
 
 #include <libzfs.h>
@@ -97,6 +105,8 @@ static int zpool_do_events(int, char **);
 static int zpool_do_get(int, char **);
 static int zpool_do_set(int, char **);
 
+static int zpool_do_sync(int, char **);
+
 /*
  * These libumem hooks provide a reasonable set of defaults for the allocator's
  * debugging facilities.
@@ -140,6 +150,7 @@ typedef enum {
        HELP_GET,
        HELP_SET,
        HELP_SPLIT,
+       HELP_SYNC,
        HELP_REGUID,
        HELP_REOPEN
 } zpool_help_t;
@@ -153,6 +164,7 @@ enum iostat_type {
        IOS_LATENCY = 1,
        IOS_QUEUES = 2,
        IOS_L_HISTO = 3,
+       IOS_RQ_HISTO = 4,
        IOS_COUNT,      /* always last element */
 };
 
@@ -161,6 +173,62 @@ enum iostat_type {
 #define        IOS_LATENCY_M   (1ULL << IOS_LATENCY)
 #define        IOS_QUEUES_M    (1ULL << IOS_QUEUES)
 #define        IOS_L_HISTO_M   (1ULL << IOS_L_HISTO)
+#define        IOS_RQ_HISTO_M  (1ULL << IOS_RQ_HISTO)
+
+/* Mask of all the histo bits */
+#define        IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
+
+/*
+ * Lookup table for iostat flags to nvlist names.  Basically a list
+ * of all the nvlists a flag requires.  Also specifies the order in
+ * which data gets printed in zpool iostat.
+ */
+static const char *vsx_type_to_nvlist[IOS_COUNT][11] = {
+       [IOS_L_HISTO] = {
+           ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
+           NULL},
+       [IOS_LATENCY] = {
+           ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
+           NULL},
+       [IOS_QUEUES] = {
+           ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
+           ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
+           ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
+           ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
+           ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
+           NULL},
+       [IOS_RQ_HISTO] = {
+           ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
+           ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
+           ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
+           ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
+           ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
+           ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
+           ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
+           ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
+           ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
+           ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
+           NULL},
+};
+
+
+/*
+ * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
+ * Right now, only one histo bit is ever set at one time, so we can
+ * just do a highbit64(a)
+ */
+#define        IOS_HISTO_IDX(a)        (highbit64(a & IOS_ANYHISTO_M) - 1)
 
 typedef struct zpool_command {
        const char      *name;
@@ -212,6 +280,7 @@ static zpool_command_t command_table[] = {
        { NULL },
        { "get",        zpool_do_get,           HELP_GET                },
        { "set",        zpool_do_set,           HELP_SET                },
+       { "sync",       zpool_do_sync,          HELP_SYNC               },
 };
 
 #define        NCOMMAND        (ARRAY_SIZE(command_table))
@@ -222,7 +291,8 @@ static boolean_t log_history = B_TRUE;
 static uint_t timestamp_fmt = NODATE;
 
 static const char *
-get_usage(zpool_help_t idx) {
+get_usage(zpool_help_t idx)
+{
        switch (idx) {
        case HELP_ADD:
                return (gettext("\tadd [-fgLnP] [-o property=value] "
@@ -246,25 +316,27 @@ get_usage(zpool_help_t idx) {
                return (gettext("\thistory [-il] [<pool>] ...\n"));
        case HELP_IMPORT:
                return (gettext("\timport [-d dir] [-D]\n"
-                   "\timport [-d dir | -c cachefile] [-F [-n]] <pool | id>\n"
+                   "\timport [-d dir | -c cachefile] [-F [-n]] [-l] "
+                   "<pool | id>\n"
                    "\timport [-o mntopts] [-o property=value] ... \n"
-                   "\t    [-d dir | -c cachefile] [-D] [-f] [-m] [-N] "
+                   "\t    [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
                    "[-R root] [-F [-n]] -a\n"
                    "\timport [-o mntopts] [-o property=value] ... \n"
-                   "\t    [-d dir | -c cachefile] [-D] [-f] [-m] [-N] "
+                   "\t    [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
                    "[-R root] [-F [-n]]\n"
                    "\t    <pool | id> [newpool]\n"));
        case HELP_IOSTAT:
-               return (gettext("\tiostat [-T d | u] [-ghHLpPvy] [[-lq]|-w]\n"
-                   "\t    [[pool ...]|[pool vdev ...]|[vdev ...]] "
-                   "[interval [count]]\n"));
+               return (gettext("\tiostat [[[-c [script1,script2,...]"
+                   "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
+                   "\t    [[pool ...]|[pool vdev ...]|[vdev ...]]"
+                   " [interval [count]]\n"));
        case HELP_LABELCLEAR:
                return (gettext("\tlabelclear [-f] <vdev>\n"));
        case HELP_LIST:
                return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
                    "[-T d|u] [pool] ... [interval [count]]\n"));
        case HELP_OFFLINE:
-               return (gettext("\toffline [-t] <pool> <device> ...\n"));
+               return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
        case HELP_ONLINE:
                return (gettext("\tonline <pool> <device> ...\n"));
        case HELP_REPLACE:
@@ -273,29 +345,31 @@ get_usage(zpool_help_t idx) {
        case HELP_REMOVE:
                return (gettext("\tremove <pool> <device> ...\n"));
        case HELP_REOPEN:
-               return (gettext("\treopen <pool>\n"));
+               return (gettext("\treopen [-n] <pool>\n"));
        case HELP_SCRUB:
-               return (gettext("\tscrub [-s] <pool> ...\n"));
+               return (gettext("\tscrub [-s | -p] <pool> ...\n"));
        case HELP_STATUS:
-               return (gettext("\tstatus [-gLPvxD] [-T d|u] [pool] ... "
-                   "[interval [count]]\n"));
+               return (gettext("\tstatus [-c [script1,script2,...]] [-gLPvxD]"
+                   "[-T d|u] [pool] ... [interval [count]]\n"));
        case HELP_UPGRADE:
                return (gettext("\tupgrade\n"
                    "\tupgrade -v\n"
                    "\tupgrade [-V version] <-a | pool ...>\n"));
        case HELP_EVENTS:
-               return (gettext("\tevents [-vHfc]\n"));
+               return (gettext("\tevents [-vHf [pool] | -c]\n"));
        case HELP_GET:
                return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
                    "<\"all\" | property[,...]> <pool> ...\n"));
        case HELP_SET:
                return (gettext("\tset <property=value> <pool> \n"));
        case HELP_SPLIT:
-               return (gettext("\tsplit [-gLnP] [-R altroot] [-o mntopts]\n"
+               return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
                    "\t    [-o property=value] <pool> <newpool> "
                    "[<device> ...]\n"));
        case HELP_REGUID:
                return (gettext("\treguid <pool>\n"));
+       case HELP_SYNC:
+               return (gettext("\tsync [pool] ...\n"));
        }
 
        abort();
@@ -616,6 +690,20 @@ zpool_do_add(int argc, char **argv)
                return (1);
        }
 
+       /* unless manually specified use "ashift" pool property (if set) */
+       if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
+               int intval;
+               zprop_source_t src;
+               char strval[ZPOOL_MAXPROPLEN];
+
+               intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
+               if (src != ZPROP_SRC_DEFAULT) {
+                       (void) sprintf(strval, "%" PRId32, intval);
+                       verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
+                           &props, B_TRUE) == 0);
+               }
+       }
+
        /* pass off to get_vdev_spec for processing */
        nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
            argc, argv);
@@ -700,7 +788,7 @@ zpool_do_remove(int argc, char **argv)
 {
        char *poolname;
        int i, ret = 0;
-       zpool_handle_t *zhp;
+       zpool_handle_t *zhp = NULL;
 
        argc--;
        argv++;
@@ -724,12 +812,16 @@ zpool_do_remove(int argc, char **argv)
                if (zpool_vdev_remove(zhp, argv[i]) != 0)
                        ret = 1;
        }
+       zpool_close(zhp);
 
        return (ret);
 }
 
 /*
- * zpool labelclear <vdev>
+ * zpool labelclear [-f] <vdev>
+ *
+ *     -f      Force clearing the label for the vdevs which are members of
+ *             the exported or foreign pools.
  *
  * Verifies that the vdev is not active and zeros out the label information
  * on the device.
@@ -737,8 +829,11 @@ zpool_do_remove(int argc, char **argv)
 int
 zpool_do_labelclear(int argc, char **argv)
 {
-       char *vdev, *name;
+       char vdev[MAXPATHLEN];
+       char *name = NULL;
+       struct stat st;
        int c, fd = -1, ret = 0;
+       nvlist_t *config;
        pool_state_t state;
        boolean_t inuse = B_FALSE;
        boolean_t force = B_FALSE;
@@ -761,90 +856,113 @@ zpool_do_labelclear(int argc, char **argv)
 
        /* get vdev name */
        if (argc < 1) {
-               (void) fprintf(stderr, gettext("missing vdev device name\n"));
+               (void) fprintf(stderr, gettext("missing vdev name\n"));
                usage(B_FALSE);
        }
-
-       vdev = argv[0];
-       if ((fd = open(vdev, O_RDWR)) < 0) {
-               (void) fprintf(stderr, gettext("Unable to open %s\n"), vdev);
-               return (B_FALSE);
+       if (argc > 1) {
+               (void) fprintf(stderr, gettext("too many arguments\n"));
+               usage(B_FALSE);
        }
 
-       name = NULL;
-       if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0) {
-               if (force)
-                       goto wipe_label;
+       /*
+        * Check if we were given absolute path and use it as is.
+        * Otherwise if the provided vdev name doesn't point to a file,
+        * try prepending expected disk paths and partition numbers.
+        */
+       (void) strlcpy(vdev, argv[0], sizeof (vdev));
+       if (vdev[0] != '/' && stat(vdev, &st) != 0) {
+               int error;
+
+               error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
+               if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
+                       if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
+                               error = ENOENT;
+               }
 
-               (void) fprintf(stderr,
-                   gettext("Unable to determine pool state for %s\n"
-                   "Use -f to force the clearing any label data\n"), vdev);
+               if (error || (stat(vdev, &st) != 0)) {
+                       (void) fprintf(stderr, gettext(
+                           "failed to find device %s, try specifying absolute "
+                           "path instead\n"), argv[0]);
+                       return (1);
+               }
+       }
 
+       if ((fd = open(vdev, O_RDWR)) < 0) {
+               (void) fprintf(stderr, gettext("failed to open %s: %s\n"),
+                   vdev, strerror(errno));
                return (1);
        }
 
-       if (inuse) {
-               switch (state) {
-               default:
-               case POOL_STATE_ACTIVE:
-               case POOL_STATE_SPARE:
-               case POOL_STATE_L2CACHE:
-                       (void) fprintf(stderr,
-                           gettext("labelclear operation failed.\n"
-                           "\tVdev %s is a member (%s), of pool \"%s\".\n"
-                           "\tTo remove label information from this device, "
-                           "export or destroy\n\tthe pool, or remove %s from "
-                           "the configuration of this pool\n\tand retry the "
-                           "labelclear operation.\n"),
-                           vdev, zpool_pool_state_to_name(state), name, vdev);
-                       ret = 1;
-                       goto errout;
+       if (ioctl(fd, BLKFLSBUF) != 0)
+               (void) fprintf(stderr, gettext("failed to invalidate "
+                   "cache for %s: %s\n"), vdev, strerror(errno));
 
-               case POOL_STATE_EXPORTED:
-                       if (force)
-                               break;
+       if (zpool_read_label(fd, &config, NULL) != 0 || config == NULL) {
+               (void) fprintf(stderr,
+                   gettext("failed to check state for %s\n"), vdev);
+               ret = 1;
+               goto errout;
+       }
+       nvlist_free(config);
 
-                       (void) fprintf(stderr,
-                           gettext("labelclear operation failed.\n\tVdev "
-                           "%s is a member of the exported pool \"%s\".\n"
-                           "\tUse \"zpool labelclear -f %s\" to force the "
-                           "removal of label\n\tinformation.\n"),
-                           vdev, name, vdev);
-                       ret = 1;
-                       goto errout;
+       ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
+       if (ret != 0) {
+               (void) fprintf(stderr,
+                   gettext("failed to check state for %s\n"), vdev);
+               ret = 1;
+               goto errout;
+       }
 
-               case POOL_STATE_POTENTIALLY_ACTIVE:
-                       if (force)
-                               break;
+       if (!inuse)
+               goto wipe_label;
 
-                       (void) fprintf(stderr,
-                           gettext("labelclear operation failed.\n"
-                           "\tVdev %s is a member of the pool \"%s\".\n"
-                           "\tThis pool is unknown to this system, but may "
-                           "be active on\n\tanother system. Use "
-                           "\'zpool labelclear -f %s\' to force the\n"
-                           "\tremoval of label information.\n"),
-                           vdev, name, vdev);
-                       ret = 1;
-                       goto errout;
+       switch (state) {
+       default:
+       case POOL_STATE_ACTIVE:
+       case POOL_STATE_SPARE:
+       case POOL_STATE_L2CACHE:
+               (void) fprintf(stderr, gettext(
+                   "%s is a member (%s) of pool \"%s\"\n"),
+                   vdev, zpool_pool_state_to_name(state), name);
+               ret = 1;
+               goto errout;
 
-               case POOL_STATE_DESTROYED:
-                       /* inuse should never be set for a destroyed pool... */
+       case POOL_STATE_EXPORTED:
+               if (force)
                        break;
-               }
+               (void) fprintf(stderr, gettext(
+                   "use '-f' to override the following error:\n"
+                   "%s is a member of exported pool \"%s\"\n"),
+                   vdev, name);
+               ret = 1;
+               goto errout;
+
+       case POOL_STATE_POTENTIALLY_ACTIVE:
+               if (force)
+                       break;
+               (void) fprintf(stderr, gettext(
+                   "use '-f' to override the following error:\n"
+                   "%s is a member of potentially active pool \"%s\"\n"),
+                   vdev, name);
+               ret = 1;
+               goto errout;
+
+       case POOL_STATE_DESTROYED:
+               /* inuse should never be set for a destroyed pool */
+               assert(0);
+               break;
        }
 
 wipe_label:
-       if (zpool_clear_label(fd) != 0) {
+       ret = zpool_clear_label(fd);
+       if (ret != 0) {
                (void) fprintf(stderr,
-                   gettext("Label clear failed on vdev %s\n"), vdev);
-               ret = 1;
+                   gettext("failed to clear label for %s\n"), vdev);
        }
 
 errout:
-       close(fd);
-       if (name != NULL)
-               free(name);
+       free(name);
+       (void) close(fd);
 
        return (ret);
 }
@@ -861,6 +979,7 @@ errout:
  *      -m     Set default mountpoint for the root dataset.  By default it's
  *             '/<pool>'
  *     -o      Set property=value.
+ *     -o      Set feature@feature=enabled|disabled.
  *     -d      Don't automatically enable all supported pool features
  *             (individual features can be enabled with -o).
  *     -O      Set fsproperty=value in the pool's root file system
@@ -1129,22 +1248,26 @@ zpool_do_create(int argc, char **argv)
                /*
                 * Hand off to libzfs.
                 */
-               if (enable_all_pool_feat) {
-                       spa_feature_t i;
-                       for (i = 0; i < SPA_FEATURES; i++) {
-                               char propname[MAXPATHLEN];
-                               zfeature_info_t *feat = &spa_feature_table[i];
-
-                               (void) snprintf(propname, sizeof (propname),
-                                   "feature@%s", feat->fi_uname);
+               spa_feature_t i;
+               for (i = 0; i < SPA_FEATURES; i++) {
+                       char propname[MAXPATHLEN];
+                       char *propval;
+                       zfeature_info_t *feat = &spa_feature_table[i];
 
-                               /*
-                                * Skip feature if user specified it manually
-                                * on the command line.
-                                */
-                               if (nvlist_exists(props, propname))
-                                       continue;
+                       (void) snprintf(propname, sizeof (propname),
+                           "feature@%s", feat->fi_uname);
 
+                       /*
+                        * Only features contained in props will be enabled:
+                        * remove from the nvlist every ZFS_FEATURE_DISABLED
+                        * value and add every missing ZFS_FEATURE_ENABLED if
+                        * enable_all_pool_feat is set.
+                        */
+                       if (!nvlist_lookup_string(props, propname, &propval)) {
+                               if (strcmp(propval, ZFS_FEATURE_DISABLED) == 0)
+                                       (void) nvlist_remove_all(props,
+                                           propname);
+                       } else if (enable_all_pool_feat) {
                                ret = add_prop_list(propname,
                                    ZFS_FEATURE_ENABLED, &props, B_TRUE);
                                if (ret != 0)
@@ -1238,6 +1361,7 @@ zpool_do_destroy(int argc, char **argv)
        if (zpool_disable_datasets(zhp, force) != 0) {
                (void) fprintf(stderr, gettext("could not destroy '%s': "
                    "could not unmount datasets\n"), zpool_get_name(zhp));
+               zpool_close(zhp);
                return (1);
        }
 
@@ -1357,7 +1481,7 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
        uint_t c, children;
        int ret;
 
-       name = zpool_vdev_name(g_zfs, zhp, nv, name_flags | VDEV_NAME_TYPE_ID);
+       name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
        if (strlen(name) + depth > max)
                max = strlen(name) + depth;
 
@@ -1435,12 +1559,90 @@ find_spare(zpool_handle_t *zhp, void *data)
        return (0);
 }
 
+typedef struct status_cbdata {
+       int             cb_count;
+       int             cb_name_flags;
+       int             cb_namewidth;
+       boolean_t       cb_allpools;
+       boolean_t       cb_verbose;
+       boolean_t       cb_explain;
+       boolean_t       cb_first;
+       boolean_t       cb_dedup_stats;
+       boolean_t       cb_print_status;
+       vdev_cmd_data_list_t    *vcdl;
+} status_cbdata_t;
+
+/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
+static int
+is_blank_str(char *str)
+{
+       while (str != NULL && *str != '\0') {
+               if (!isblank(*str))
+                       return (0);
+               str++;
+       }
+       return (1);
+}
+
+/* Print command output lines for specific vdev in a specific pool */
+static void
+zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
+{
+       vdev_cmd_data_t *data;
+       int i, j;
+       char *val;
+
+       for (i = 0; i < vcdl->count; i++) {
+               if ((strcmp(vcdl->data[i].path, path) != 0) ||
+                   (strcmp(vcdl->data[i].pool, pool) != 0)) {
+                       /* Not the vdev we're looking for */
+                       continue;
+               }
+
+               data = &vcdl->data[i];
+               /* Print out all the output values for this vdev */
+               for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
+                       val = NULL;
+                       /* Does this vdev have values for this column? */
+                       for (int k = 0; k < data->cols_cnt; k++) {
+                               if (strcmp(data->cols[k],
+                                   vcdl->uniq_cols[j]) == 0) {
+                                       /* yes it does, record the value */
+                                       val = data->lines[k];
+                                       break;
+                               }
+                       }
+                       /*
+                        * Mark empty values with dashes to make output
+                        * awk-able.
+                        */
+                       if (is_blank_str(val))
+                               val = "-";
+
+                       printf("%*s", vcdl->uniq_cols_width[j], val);
+                       if (j < vcdl->uniq_cols_cnt - 1)
+                               printf("  ");
+               }
+
+               /* Print out any values that aren't in a column at the end */
+               for (j = data->cols_cnt; j < data->lines_cnt; j++) {
+                       /* Did we have any columns?  If so print a spacer. */
+                       if (vcdl->uniq_cols_cnt > 0)
+                               printf("  ");
+
+                       val = data->lines[j];
+                       printf("%s", val ? val : "");
+               }
+               break;
+       }
+}
+
 /*
  * Print out configuration state as requested by status_callback.
  */
 static void
-print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
-    int namewidth, int depth, boolean_t isspare, int name_flags)
+print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
+    nvlist_t *nv, int depth, boolean_t isspare)
 {
        nvlist_t **child;
        uint_t c, children;
@@ -1449,8 +1651,9 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
        char rbuf[6], wbuf[6], cbuf[6];
        char *vname;
        uint64_t notpresent;
-       spare_cbdata_t cb;
+       spare_cbdata_t spare_cb;
        char *state;
+       char *path = NULL;
 
        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
            &child, &children) != 0)
@@ -1471,7 +1674,7 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
                        state = "AVAIL";
        }
 
-       (void) printf("\t%*s%-*s  %-8s", depth, "", namewidth - depth,
+       (void) printf("\t%*s%-*s  %-8s", depth, "", cb->cb_namewidth - depth,
            name, state);
 
        if (!isspare) {
@@ -1483,7 +1686,6 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
 
        if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
            &notpresent) == 0) {
-               char *path;
                verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
                (void) printf("  was %s", path);
        } else if (vs->vs_aux != 0) {
@@ -1512,17 +1714,17 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
 
                case VDEV_AUX_SPARED:
                        verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
-                           &cb.cb_guid) == 0);
-                       if (zpool_iter(g_zfs, find_spare, &cb) == 1) {
-                               if (strcmp(zpool_get_name(cb.cb_zhp),
+                           &spare_cb.cb_guid) == 0);
+                       if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
+                               if (strcmp(zpool_get_name(spare_cb.cb_zhp),
                                    zpool_get_name(zhp)) == 0)
                                        (void) printf(gettext("currently in "
                                            "use"));
                                else
                                        (void) printf(gettext("in use by "
                                            "pool '%s'"),
-                                           zpool_get_name(cb.cb_zhp));
-                               zpool_close(cb.cb_zhp);
+                                           zpool_get_name(spare_cb.cb_zhp));
+                               zpool_close(spare_cb.cb_zhp);
                        } else {
                                (void) printf(gettext("currently in use"));
                        }
@@ -1548,6 +1750,10 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
                        (void) printf(gettext("split into new pool"));
                        break;
 
+               case VDEV_AUX_ACTIVE:
+                       (void) printf(gettext("currently in use"));
+                       break;
+
                default:
                        (void) printf(gettext("corrupted data"));
                        break;
@@ -1557,13 +1763,20 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
        (void) nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_SCAN_STATS,
            (uint64_t **)&ps, &c);
 
-       if (ps && ps->pss_state == DSS_SCANNING &&
+       if (ps != NULL && ps->pss_state == DSS_SCANNING &&
            vs->vs_scan_processed != 0 && children == 0) {
                (void) printf(gettext("  (%s)"),
                    (ps->pss_func == POOL_SCAN_RESILVER) ?
                    "resilvering" : "repairing");
        }
 
+       if (cb->vcdl != NULL) {
+               if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
+                       printf("  ");
+                       zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
+               }
+       }
+
        (void) printf("\n");
 
        for (c = 0; c < children; c++) {
@@ -1577,9 +1790,9 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
                if (islog || ishole)
                        continue;
                vname = zpool_vdev_name(g_zfs, zhp, child[c],
-                   name_flags | VDEV_NAME_TYPE_ID);
-               print_status_config(zhp, vname, child[c],
-                   namewidth, depth + 2, isspare, name_flags);
+                   cb->cb_name_flags | VDEV_NAME_TYPE_ID);
+               print_status_config(zhp, cb, vname, child[c], depth + 2,
+                   isspare);
                free(vname);
        }
 }
@@ -1589,8 +1802,8 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
  * pool, printing out the name and status for each one.
  */
 static void
-print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
-    int name_flags)
+print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
+    int depth)
 {
        nvlist_t **child;
        uint_t c, children;
@@ -1605,7 +1818,7 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
        verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
            (uint64_t **)&vs, &c) == 0);
 
-       (void) printf("\t%*s%-*s", depth, "", namewidth - depth, name);
+       (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
        (void) printf("  %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
 
        if (vs->vs_aux != 0) {
@@ -1636,6 +1849,10 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
                        (void) printf(gettext("too many errors"));
                        break;
 
+               case VDEV_AUX_ACTIVE:
+                       (void) printf(gettext("currently in use"));
+                       break;
+
                default:
                        (void) printf(gettext("corrupted data"));
                        break;
@@ -1656,9 +1873,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
                        continue;
 
                vname = zpool_vdev_name(g_zfs, NULL, child[c],
-                   name_flags | VDEV_NAME_TYPE_ID);
-               print_import_config(vname, child[c], namewidth, depth + 2,
-                   name_flags);
+                   cb->cb_name_flags | VDEV_NAME_TYPE_ID);
+               print_import_config(cb, vname, child[c], depth + 2);
                free(vname);
        }
 
@@ -1667,7 +1883,7 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
                (void) printf(gettext("\tcache\n"));
                for (c = 0; c < children; c++) {
                        vname = zpool_vdev_name(g_zfs, NULL, child[c],
-                           name_flags);
+                           cb->cb_name_flags);
                        (void) printf("\t  %s\n", vname);
                        free(vname);
                }
@@ -1678,7 +1894,7 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
                (void) printf(gettext("\tspares\n"));
                for (c = 0; c < children; c++) {
                        vname = zpool_vdev_name(g_zfs, NULL, child[c],
-                           name_flags);
+                           cb->cb_name_flags);
                        (void) printf("\t  %s\n", vname);
                        free(vname);
                }
@@ -1694,8 +1910,7 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
  * works because only the top level vdev is marked "is_log"
  */
 static void
-print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose,
-    int name_flags)
+print_logs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv)
 {
        uint_t c, children;
        nvlist_t **child;
@@ -1715,13 +1930,12 @@ print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose,
                if (!is_log)
                        continue;
                name = zpool_vdev_name(g_zfs, zhp, child[c],
-                   name_flags | VDEV_NAME_TYPE_ID);
-               if (verbose)
-                       print_status_config(zhp, name, child[c], namewidth,
-                           2, B_FALSE, name_flags);
+                   cb->cb_name_flags | VDEV_NAME_TYPE_ID);
+               if (cb->cb_print_status)
+                       print_status_config(zhp, cb, name, child[c], 2,
+                           B_FALSE);
                else
-                       print_import_config(name, child[c], namewidth, 2,
-                           name_flags);
+                       print_import_config(cb, name, child[c], 2);
                free(name);
        }
 }
@@ -1736,14 +1950,16 @@ show_import(nvlist_t *config)
        vdev_stat_t *vs;
        char *name;
        uint64_t guid;
+       uint64_t hostid = 0;
        char *msgid;
-       nvlist_t *nvroot;
+       char *hostname = "unknown";
+       nvlist_t *nvroot, *nvinfo;
        zpool_status_t reason;
        zpool_errata_t errata;
        const char *health;
        uint_t vsc;
-       int namewidth;
        char *comment;
+       status_cbdata_t cb = { 0 };
 
        verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
            &name) == 0);
@@ -1813,7 +2029,7 @@ show_import(nvlist_t *config)
 
        case ZPOOL_STATUS_UNSUP_FEAT_READ:
                (void) printf(gettext("status: The pool uses the following "
-                   "feature(s) not supported on this sytem:\n"));
+                   "feature(s) not supported on this system:\n"));
                zpool_print_unsup_feat(config);
                break;
 
@@ -1825,6 +2041,17 @@ show_import(nvlist_t *config)
                zpool_print_unsup_feat(config);
                break;
 
+       case ZPOOL_STATUS_HOSTID_ACTIVE:
+               (void) printf(gettext(" status: The pool is currently "
+                   "imported by another system.\n"));
+               break;
+
+       case ZPOOL_STATUS_HOSTID_REQUIRED:
+               (void) printf(gettext(" status: The pool has the "
+                   "multihost property on.  It cannot\n\tbe safely imported "
+                   "when the system hostid is not set.\n"));
+               break;
+
        case ZPOOL_STATUS_HOSTID_MISMATCH:
                (void) printf(gettext(" status: The pool was last accessed by "
                    "another system.\n"));
@@ -1938,6 +2165,27 @@ show_import(nvlist_t *config)
                            "imported. Attach the missing\n\tdevices and try "
                            "again.\n"));
                        break;
+               case ZPOOL_STATUS_HOSTID_ACTIVE:
+                       VERIFY0(nvlist_lookup_nvlist(config,
+                           ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
+
+                       if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
+                               hostname = fnvlist_lookup_string(nvinfo,
+                                   ZPOOL_CONFIG_MMP_HOSTNAME);
+
+                       if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
+                               hostid = fnvlist_lookup_uint64(nvinfo,
+                                   ZPOOL_CONFIG_MMP_HOSTID);
+
+                       (void) printf(gettext(" action: The pool must be "
+                           "exported from %s (hostid=%lx)\n\tbefore it "
+                           "can be safely imported.\n"), hostname,
+                           (unsigned long) hostid);
+                       break;
+               case ZPOOL_STATUS_HOSTID_REQUIRED:
+                       (void) printf(gettext(" action: Set a unique system "
+                           "hostid with the zgenhostid(8) command.\n"));
+                       break;
                default:
                        (void) printf(gettext(" action: The pool cannot be "
                            "imported due to damaged devices or data.\n"));
@@ -1970,13 +2218,14 @@ show_import(nvlist_t *config)
 
        (void) printf(gettext(" config:\n\n"));
 
-       namewidth = max_width(NULL, nvroot, 0, 0, 0);
-       if (namewidth < 10)
-               namewidth = 10;
+       cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
+           VDEV_NAME_TYPE_ID);
+       if (cb.cb_namewidth < 10)
+               cb.cb_namewidth = 10;
 
-       print_import_config(name, nvroot, namewidth, 0, 0);
+       print_import_config(&cb, name, nvroot, 0);
        if (num_logs(nvroot) > 0)
-               print_logs(NULL, nvroot, namewidth, B_FALSE, 0);
+               print_logs(NULL, &cb, nvroot);
 
        if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
                (void) printf(gettext("\n\tAdditional devices are known to "
@@ -1985,6 +2234,31 @@ show_import(nvlist_t *config)
        }
 }
 
+static boolean_t
+zfs_force_import_required(nvlist_t *config)
+{
+       uint64_t state;
+       uint64_t hostid = 0;
+       nvlist_t *nvinfo;
+
+       state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
+       (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
+
+       if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
+               return (B_TRUE);
+
+       nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
+       if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
+               mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
+                   ZPOOL_CONFIG_MMP_STATE);
+
+               if (mmp_state != MMP_STATE_INACTIVE)
+                       return (B_TRUE);
+       }
+
+       return (B_FALSE);
+}
+
 /*
  * Perform the import for the given configuration.  This passes the heavy
  * lifting off to zpool_import_props(), and then mounts the datasets contained
@@ -1994,50 +2268,78 @@ static int
 do_import(nvlist_t *config, const char *newname, const char *mntopts,
     nvlist_t *props, int flags)
 {
+       int ret = 0;
        zpool_handle_t *zhp;
        char *name;
        uint64_t state;
        uint64_t version;
 
-       verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
-           &name) == 0);
+       name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
+       state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
+       version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
 
-       verify(nvlist_lookup_uint64(config,
-           ZPOOL_CONFIG_POOL_STATE, &state) == 0);
-       verify(nvlist_lookup_uint64(config,
-           ZPOOL_CONFIG_VERSION, &version) == 0);
        if (!SPA_VERSION_IS_SUPPORTED(version)) {
                (void) fprintf(stderr, gettext("cannot import '%s': pool "
                    "is formatted using an unsupported ZFS version\n"), name);
                return (1);
-       } else if (state != POOL_STATE_EXPORTED &&
+       } else if (zfs_force_import_required(config) &&
            !(flags & ZFS_IMPORT_ANY_HOST)) {
-               uint64_t hostid = 0;
-               unsigned long system_hostid = get_system_hostid();
+               mmp_state_t mmp_state = MMP_STATE_INACTIVE;
+               nvlist_t *nvinfo;
 
-               (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
-                   &hostid);
+               nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
+               if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
+                       mmp_state = fnvlist_lookup_uint64(nvinfo,
+                           ZPOOL_CONFIG_MMP_STATE);
 
-               if (hostid != 0 && (unsigned long)hostid != system_hostid) {
-                       char *hostname;
-                       uint64_t timestamp;
-                       time_t t;
+               if (mmp_state == MMP_STATE_ACTIVE) {
+                       char *hostname = "<unknown>";
+                       uint64_t hostid = 0;
 
-                       verify(nvlist_lookup_string(config,
-                           ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
-                       verify(nvlist_lookup_uint64(config,
-                           ZPOOL_CONFIG_TIMESTAMP, &timestamp) == 0);
-                       t = timestamp;
-                       (void) fprintf(stderr, gettext("cannot import "
-                           "'%s': pool may be in use from other "
-                           "system, it was last accessed by %s "
-                           "(hostid: 0x%lx) on %s"), name, hostname,
-                           (unsigned long)hostid,
-                           asctime(localtime(&t)));
-                       (void) fprintf(stderr, gettext("use '-f' to "
-                           "import anyway\n"));
-                       return (1);
+                       if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
+                               hostname = fnvlist_lookup_string(nvinfo,
+                                   ZPOOL_CONFIG_MMP_HOSTNAME);
+
+                       if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
+                               hostid = fnvlist_lookup_uint64(nvinfo,
+                                   ZPOOL_CONFIG_MMP_HOSTID);
+
+                       (void) fprintf(stderr, gettext("cannot import '%s': "
+                           "pool is imported on %s (hostid: "
+                           "0x%lx)\nExport the pool on the other system, "
+                           "then run 'zpool import'.\n"),
+                           name, hostname, (unsigned long) hostid);
+               } else if (mmp_state == MMP_STATE_NO_HOSTID) {
+                       (void) fprintf(stderr, gettext("Cannot import '%s': "
+                           "pool has the multihost property on and the\n"
+                           "system's hostid is not set. Set a unique hostid "
+                           "with the zgenhostid(8) command.\n"), name);
+               } else {
+                       char *hostname = "<unknown>";
+                       uint64_t timestamp = 0;
+                       uint64_t hostid = 0;
+
+                       if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
+                               hostname = fnvlist_lookup_string(config,
+                                   ZPOOL_CONFIG_HOSTNAME);
+
+                       if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
+                               timestamp = fnvlist_lookup_uint64(config,
+                                   ZPOOL_CONFIG_TIMESTAMP);
+
+                       if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
+                               hostid = fnvlist_lookup_uint64(config,
+                                   ZPOOL_CONFIG_HOSTID);
+
+                       (void) fprintf(stderr, gettext("cannot import '%s': "
+                           "pool was previously in use from another system.\n"
+                           "Last accessed by %s (hostid=%lx) at %s"
+                           "The pool can be imported, use 'zpool import -f' "
+                           "to import the pool.\n"), name, hostname,
+                           (unsigned long)hostid, ctime((time_t *)&timestamp));
                }
+
+               return (1);
        }
 
        if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
@@ -2049,6 +2351,16 @@ do_import(nvlist_t *config, const char *newname, const char *mntopts,
        if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
                return (1);
 
+       /*
+        * Loading keys is best effort. We don't want to return immediately
+        * if it fails but we do want to give the error to the caller.
+        */
+       if (flags & ZFS_IMPORT_LOAD_KEYS) {
+               ret = zfs_crypto_attempt_load_keys(g_zfs, name);
+               if (ret != 0)
+                       ret = 1;
+       }
+
        if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
            !(flags & ZFS_IMPORT_ONLY) &&
            zpool_enable_datasets(zhp, mntopts, 0) != 0) {
@@ -2057,14 +2369,14 @@ do_import(nvlist_t *config, const char *newname, const char *mntopts,
        }
 
        zpool_close(zhp);
-       return (0);
+       return (ret);
 }
 
 /*
  * zpool import [-d dir] [-D]
- *       import [-o mntopts] [-o prop=value] ... [-R root] [-D]
+ *       import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
  *              [-d dir | -c cachefile] [-f] -a
- *       import [-o mntopts] [-o prop=value] ... [-R root] [-D]
+ *       import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
  *              [-d dir | -c cachefile] [-f] [-n] [-F] <pool | id> [newpool]
  *
  *      -c     Read pool information from a cachefile instead of searching
@@ -2099,6 +2411,8 @@ do_import(nvlist_t *config, const char *newname, const char *mntopts,
  *
  *       -a    Import all pools found.
  *
+ *       -l    Load encryption keys while importing.
+ *
  *       -o    Set property=value and/or temporary mount options (without '=').
  *
  *      -s     Scan using the default search path, the libblkid cache will
@@ -2140,7 +2454,7 @@ zpool_do_import(int argc, char **argv)
        char *endptr;
 
        /* check options */
-       while ((c = getopt(argc, argv, ":aCc:d:DEfFmnNo:R:stT:VX")) != -1) {
+       while ((c = getopt(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX")) != -1) {
                switch (c) {
                case 'a':
                        do_all = B_TRUE;
@@ -2170,6 +2484,9 @@ zpool_do_import(int argc, char **argv)
                case 'F':
                        do_rewind = B_TRUE;
                        break;
+               case 'l':
+                       flags |= ZFS_IMPORT_LOAD_KEYS;
+                       break;
                case 'm':
                        flags |= ZFS_IMPORT_MISSING_LOG;
                        break;
@@ -2244,6 +2561,17 @@ zpool_do_import(int argc, char **argv)
                usage(B_FALSE);
        }
 
+       if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
+               (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
+               usage(B_FALSE);
+       }
+
+       if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
+               (void) fprintf(stderr, gettext("-l is only meaningful during "
+                   "an import\n"));
+               usage(B_FALSE);
+       }
+
        if ((dryrun || xtreme_rewind) && !do_rewind) {
                (void) fprintf(stderr,
                    gettext("-n or -X only meaningful with -F\n"));
@@ -2273,21 +2601,21 @@ zpool_do_import(int argc, char **argv)
                        (void) fprintf(stderr, gettext("too many arguments\n"));
                        usage(B_FALSE);
                }
+       }
 
-               /*
-                * Check for the SYS_CONFIG privilege.  We do this explicitly
-                * here because otherwise any attempt to discover pools will
-                * silently fail.
-                */
-               if (argc == 0 && !priv_ineffect(PRIV_SYS_CONFIG)) {
-                       (void) fprintf(stderr, gettext("cannot "
-                           "discover pools: permission denied\n"));
-                       if (searchdirs != NULL)
-                               free(searchdirs);
+       /*
+        * Check for the effective uid.  We do this explicitly here because
+        * otherwise any attempt to discover pools will silently fail.
+        */
+       if (argc == 0 && geteuid() != 0) {
+               (void) fprintf(stderr, gettext("cannot "
+                   "discover pools: permission denied\n"));
+               if (searchdirs != NULL)
+                       free(searchdirs);
 
-                       nvlist_free(policy);
-                       return (1);
-               }
+               nvlist_free(props);
+               nvlist_free(policy);
+               return (1);
        }
 
        /*
@@ -2352,15 +2680,7 @@ zpool_do_import(int argc, char **argv)
        idata.cachefile = cachefile;
        idata.scan = do_scan;
 
-       /*
-        * Under Linux the zpool_find_import_impl() function leverages the
-        * taskq implementation to parallelize device scanning.  It is
-        * therefore necessary to initialize this functionality for the
-        * duration of the zpool_search_import() function.
-        */
-       thread_init();
        pools = zpool_search_import(g_zfs, &idata);
-       thread_fini();
 
        if (pools != NULL && idata.exists &&
            (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
@@ -2392,6 +2712,8 @@ zpool_do_import(int argc, char **argv)
                if (envdup != NULL)
                        free(envdup);
                nvlist_free(policy);
+               nvlist_free(pools);
+               nvlist_free(props);
                return (1);
        }
 
@@ -2499,6 +2821,45 @@ error:
        return (err ? 1 : 0);
 }
 
+/*
+ * zpool sync [-f] [pool] ...
+ *
+ * -f (undocumented) force uberblock (and config including zpool cache file)
+ *    update.
+ *
+ * Sync the specified pool(s).
+ * Without arguments "zpool sync" will sync all pools.
+ * This command initiates TXG sync(s) and will return after the TXG(s) commit.
+ *
+ */
+static int
+zpool_do_sync(int argc, char **argv)
+{
+       int ret;
+       boolean_t force = B_FALSE;
+
+       /* check options */
+       while ((ret  = getopt(argc, argv, "f")) != -1) {
+               switch (ret) {
+               case 'f':
+                       force = B_TRUE;
+                       break;
+               case '?':
+                       (void) fprintf(stderr, gettext("invalid option '%c'\n"),
+                           optopt);
+                       usage(B_FALSE);
+               }
+       }
+
+       argc -= optind;
+       argv += optind;
+
+       /* if argc == 0 we will execute zpool_sync_one on all pools */
+       ret = for_each_pool(argc, argv, B_FALSE, NULL, zpool_sync_one, &force);
+
+       return (ret);
+}
+
 typedef struct iostat_cbdata {
        uint64_t cb_flags;
        int cb_name_flags;
@@ -2510,6 +2871,7 @@ typedef struct iostat_cbdata {
        boolean_t cb_literal;
        boolean_t cb_scripted;
        zpool_list_t *cb_list;
+       vdev_cmd_data_list_t *vcdl;
 } iostat_cbdata_t;
 
 /*  iostat labels */
@@ -2531,6 +2893,9 @@ static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
            {NULL}},
        [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2},
            {"sync_queue", 2}, {"async_queue", 2}, {NULL}},
+       [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
+           {"async_read", 2}, {"async_write", 2}, {"scrub", 2}, {NULL}},
+
 };
 
 /* Shorthand - if "columns" field not set, default to 1 column */
@@ -2544,6 +2909,13 @@ static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
            {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
        [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
            {"write"}, {"read"}, {"write"}, {"scrub"}, {NULL}},
+       [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
+           {"ind"}, {"agg"}, {"ind"}, {"agg"}, {NULL}},
+};
+
+static const char *histo_to_title[] = {
+       [IOS_L_HISTO] = "latency",
+       [IOS_RQ_HISTO] = "req_size",
 };
 
 /*
@@ -2562,6 +2934,25 @@ label_array_len(const name_and_columns_t *labels)
        return (i);
 }
 
+/*
+ * Return the number of strings in a null-terminated string array.
+ * For example:
+ *
+ *     const char foo[] = {"bar", "baz", NULL}
+ *
+ * returns 2
+ */
+static uint64_t
+str_array_len(const char *array[])
+{
+       uint64_t i = 0;
+       while (array[i])
+               i++;
+
+       return (i);
+}
+
+
 /*
  * Return a default column width for default/latency/queue columns. This does
  * not include histograms, which have their columns autosized.
@@ -2624,7 +3015,7 @@ print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
                        rw_column_width = (column_width * columns) +
                            (2 * (columns - 1));
 
-                       text_start = (int) ((rw_column_width)/columns -
+                       text_start = (int)((rw_column_width)/columns -
                            slen/columns);
 
                        printf("  ");   /* Two spaces between columns */
@@ -2642,9 +3033,54 @@ print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
 
                }
        }
-       printf("\n");
 }
 
+
+/*
+ * print_cmd_columns - Print custom column titles from -c
+ *
+ * If the user specified the "zpool status|iostat -c" then print their custom
+ * column titles in the header.  For example, print_cmd_columns() would print
+ * the "  col1  col2" part of this:
+ *
+ * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
+ * ...
+ *           capacity     operations     bandwidth
+ * pool        alloc   free   read  write   read  write  col1  col2
+ * ----------  -----  -----  -----  -----  -----  -----  ----  ----
+ * mypool       269K  1008M      0      0    107    946
+ *   mirror     269K  1008M      0      0    107    946
+ *     sdb         -      -      0      0    102    473  val1  val2
+ *     sdc         -      -      0      0      5    473  val1  val2
+ * ----------  -----  -----  -----  -----  -----  -----  ----  ----
+ */
+void
+print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
+{
+       int i, j;
+       vdev_cmd_data_t *data = &vcdl->data[0];
+
+       if (vcdl->count == 0 || data == NULL)
+               return;
+
+       /*
+        * Each vdev cmd should have the same column names unless the user did
+        * something weird with their cmd.  Just take the column names from the
+        * first vdev and assume it works for all of them.
+        */
+       for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
+               printf("  ");
+               if (use_dashes) {
+                       for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
+                               printf("-");
+               } else {
+                       printf("%*s", vcdl->uniq_cols_width[i],
+                           vcdl->uniq_cols[i]);
+               }
+       }
+}
+
+
 /*
  * Utility function to print out a line of dashes like:
  *
@@ -2673,14 +3109,22 @@ print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
        uint64_t f;
        int idx;
        const name_and_columns_t *labels;
+       const char *title;
+
+
+       if (cb->cb_flags & IOS_ANYHISTO_M) {
+               title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
+       } else if (cb->cb_vdev_names_count) {
+               title = "vdev";
+       } else  {
+               title = "pool";
+       }
+
+       namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
+           name ? strlen(name) : 0);
 
-       if (cb->cb_flags & IOS_L_HISTO_M)
-               namewidth = MAX(cb->cb_namewidth, strlen("latency"));
-       else
-               namewidth = cb->cb_namewidth;
 
        if (name) {
-               namewidth = MAX(cb->cb_namewidth, strlen(name));
                printf("%-*s", namewidth, name);
        } else {
                for (i = 0; i < namewidth; i++)
@@ -2705,7 +3149,6 @@ print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
                                    "--------------------");
                }
        }
-       printf("\n");
 }
 
 
@@ -2727,26 +3170,42 @@ print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
     const char *histo_vdev_name)
 {
        unsigned int namewidth;
-       uint64_t flags = cb->cb_flags;
+       const char *title;
 
-       if (flags & IOS_L_HISTO_M)
-               namewidth = MAX(cb->cb_namewidth, strlen("latency"));
-       else
-               namewidth = cb->cb_namewidth;
+       if (cb->cb_flags & IOS_ANYHISTO_M) {
+               title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
+       } else if (cb->cb_vdev_names_count) {
+               title = "vdev";
+       } else  {
+               title = "pool";
+       }
+
+       namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
+           histo_vdev_name ? strlen(histo_vdev_name) : 0);
 
-       if (flags & IOS_L_HISTO_M)
+       if (histo_vdev_name)
                printf("%-*s", namewidth, histo_vdev_name);
        else
                printf("%*s", namewidth, "");
 
+
        print_iostat_labels(cb, force_column_width, iostat_top_labels);
+       printf("\n");
 
-       printf("%-*s", namewidth, flags & IOS_L_HISTO_M ? "latency" :
-           cb->cb_vdev_names_count ? "vdev" : "pool");
+       printf("%-*s", namewidth, title);
 
        print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
+       if (cb->vcdl != NULL)
+               print_cmd_columns(cb->vcdl, 0);
+
+       printf("\n");
 
        print_iostat_separator_impl(cb, force_column_width);
+
+       if (cb->vcdl != NULL)
+               print_cmd_columns(cb->vcdl, 1);
+
+       printf("\n");
 }
 
 static void
@@ -2809,7 +3268,8 @@ struct stat_array {
 };
 
 static uint64_t
-stat_histo_max(struct stat_array *nva, unsigned int len) {
+stat_histo_max(struct stat_array *nva, unsigned int len)
+{
        uint64_t max = 0;
        int i;
        for (i = 0; i < len; i++)
@@ -2825,7 +3285,8 @@ stat_histo_max(struct stat_array *nva, unsigned int len) {
  */
 static int
 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
-    struct stat_array *nva) {
+    struct stat_array *nva)
+{
        nvpair_t *tmp;
        int ret;
 
@@ -2918,6 +3379,7 @@ print_iostat_histo(struct stat_array *nva, unsigned int len,
        uint64_t val;
        enum zfs_nicenum_format format;
        unsigned int buckets;
+       unsigned int start_bucket;
 
        if (cb->cb_literal)
                format = ZFS_NICENUM_RAW;
@@ -2927,14 +3389,27 @@ print_iostat_histo(struct stat_array *nva, unsigned int len,
        /* All these histos are the same size, so just use nva[0].count */
        buckets = nva[0].count;
 
-       for (j = 0; j < buckets; j++) {
-               /* Ending range of this bucket */
-               val = (1UL << (j + 1)) - 1;
+       if (cb->cb_flags & IOS_RQ_HISTO_M) {
+               /* Start at 512 - req size should never be lower than this */
+               start_bucket = 9;
+       } else {
+               start_bucket = 0;
+       }
 
+       for (j = start_bucket; j < buckets; j++) {
                /* Print histogram bucket label */
-               zfs_nicetime(val, buf, sizeof (buf));
+               if (cb->cb_flags & IOS_L_HISTO_M) {
+                       /* Ending range of this bucket */
+                       val = (1UL << (j + 1)) - 1;
+                       zfs_nicetime(val, buf, sizeof (buf));
+               } else {
+                       /* Request size (starting range of bucket) */
+                       val = (1UL << j);
+                       zfs_nicenum(val, buf, sizeof (buf));
+               }
+
                if (cb->cb_scripted)
-                       printf("%llu", (u_longlong_t) val);
+                       printf("%llu", (u_longlong_t)val);
                else
                        printf("%-*s", namewidth, buf);
 
@@ -2962,30 +3437,29 @@ print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
        unsigned int column_width;
        unsigned int namewidth;
        unsigned int entire_width;
-
-       const char *names[] = {
-               ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
-               ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
-       };
+       enum iostat_type type;
        struct stat_array *nva;
-       nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
+       const char **names;
+       unsigned int names_len;
+
+       /* What type of histo are we? */
+       type = IOS_HISTO_IDX(cb->cb_flags);
+
+       /* Get NULL-terminated array of nvlist names for our histo */
+       names = vsx_type_to_nvlist[type];
+       names_len = str_array_len(names); /* num of names */
+
+       nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
 
        if (cb->cb_literal) {
                column_width = MAX(5,
-                   (unsigned int) log10(stat_histo_max(nva,
-                   ARRAY_SIZE(names))) + 1);
+                   (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
        } else {
                column_width = 5;
        }
 
-       namewidth = MAX(cb->cb_namewidth, strlen("latency"));
+       namewidth = MAX(cb->cb_namewidth,
+           strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
 
        /*
         * Calculate the entire line width of what we're printing.  The
@@ -2998,17 +3472,17 @@ print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
        /*      |__________|  <--- entire_width         */
        /*                                              */
        entire_width = namewidth + (column_width + 2) *
-           label_array_len(iostat_bottom_labels[IOS_L_HISTO]);
+           label_array_len(iostat_bottom_labels[type]);
 
        if (cb->cb_scripted)
                printf("%s\n", name);
        else
                print_iostat_header_impl(cb, column_width, name);
 
-       print_iostat_histo(nva, ARRAY_SIZE(names), cb, column_width,
+       print_iostat_histo(nva, names_len, cb, column_width,
            namewidth, scale);
 
-       free_calc_stats(nva, ARRAY_SIZE(names));
+       free_calc_stats(nva, names_len);
        if (!cb->cb_scripted)
                print_solid_separator(entire_width);
 }
@@ -3107,7 +3581,7 @@ print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
        nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
 
        if (cb->cb_literal)
-               format = ZFS_NICENUM_RAW;
+               format = ZFS_NICENUM_RAWTIME;
        else
                format = ZFS_NICENUM_TIME;
 
@@ -3219,7 +3693,7 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
         * Print the vdev name unless it's is a histogram.  Histograms
         * display the vdev name in the header itself.
         */
-       if (!(cb->cb_flags & IOS_L_HISTO_M)) {
+       if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
                if (cb->cb_scripted) {
                        printf("%s", name);
                } else {
@@ -3234,7 +3708,7 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
 
        /* Calculate our scaling factor */
        tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
-       if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_L_HISTO_M)) {
+       if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
                /*
                 * If we specify printing histograms with no time interval, then
                 * print the histogram numbers over the entire lifetime of the
@@ -3256,18 +3730,29 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
                print_iostat_latency(cb, oldnv, newnv, scale);
        if (cb->cb_flags & IOS_QUEUES_M)
                print_iostat_queues(cb, oldnv, newnv, scale);
-       if (cb->cb_flags & IOS_L_HISTO_M) {
+       if (cb->cb_flags & IOS_ANYHISTO_M) {
                printf("\n");
                print_iostat_histos(cb, oldnv, newnv, scale, name);
        }
 
-       if (!(cb->cb_flags & IOS_L_HISTO_M))
+       if (cb->vcdl != NULL) {
+               char *path;
+               if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
+                   &path) == 0) {
+                       printf("  ");
+                       zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
+               }
+       }
+
+       if (!(cb->cb_flags & IOS_ANYHISTO_M))
                printf("\n");
 
-       free(calcvs);
        ret++;
 
 children:
+
+       free(calcvs);
+
        if (!cb->cb_verbose)
                return (ret);
 
@@ -3303,10 +3788,11 @@ children:
         */
 
        if (num_logs(newnv) > 0) {
-               if ((!(cb->cb_flags & IOS_L_HISTO_M)) && !cb->cb_scripted &&
+               if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
                    !cb->cb_vdev_names) {
                        print_iostat_dashes(cb, 0, "logs");
                }
+               printf("\n");
 
                for (c = 0; c < children; c++) {
                        uint64_t islog = B_FALSE;
@@ -3337,10 +3823,11 @@ children:
                return (ret);
 
        if (children > 0) {
-               if ((!(cb->cb_flags & IOS_L_HISTO_M)) && !cb->cb_scripted &&
+               if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
                    !cb->cb_vdev_names) {
                        print_iostat_dashes(cb, 0, "cache");
                }
+               printf("\n");
 
                for (c = 0; c < children; c++) {
                        vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
@@ -3398,10 +3885,15 @@ print_iostat(zpool_handle_t *zhp, void *data)
                    &oldnvroot) == 0);
 
        ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
-                                                                       cb, 0);
-       if ((ret != 0) && !(cb->cb_flags & IOS_L_HISTO_M) && !cb->cb_scripted &&
-           cb->cb_verbose && !cb->cb_vdev_names_count)
-                               print_iostat_separator(cb);
+           cb, 0);
+       if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
+           !cb->cb_scripted && cb->cb_verbose && !cb->cb_vdev_names_count) {
+               print_iostat_separator(cb);
+               if (cb->vcdl != NULL) {
+                       print_cmd_columns(cb->vcdl, 1);
+               }
+               printf("\n");
+       }
 
        return (ret);
 }
@@ -3436,7 +3928,7 @@ get_namewidth(zpool_handle_t *zhp, void *data)
                    &nvroot) == 0);
                unsigned int poolname_len = strlen(zpool_get_name(zhp));
                if (!cb->cb_verbose)
-                       cb->cb_namewidth = poolname_len;
+                       cb->cb_namewidth = MAX(poolname_len, cb->cb_namewidth);
                else
                        cb->cb_namewidth = MAX(poolname_len,
                            max_width(zhp, nvroot, 0, cb->cb_namewidth,
@@ -3553,37 +4045,6 @@ get_stat_flags_cb(zpool_handle_t *zhp, void *data)
        uint64_t flags = 0;
        int i, j;
 
-       /*
-        * Lookup table for extended iostat flags to nvlist names.
-        * Basically a list of all the nvpairs a flag requires.
-        */
-       static const char *vsx_type_to_nvlist[IOS_COUNT][10] = {
-               [IOS_L_HISTO] = {
-                   ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
-                   NULL},
-               [IOS_LATENCY] = {
-                   ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
-                   ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
-                   NULL},
-               [IOS_QUEUES] = {
-                   ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
-                   ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
-                   ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
-                   ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
-                   ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
-                   NULL}
-       };
-
        config = zpool_get_config(zhp, NULL);
        verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
            &nvroot) == 0);
@@ -3646,14 +4107,16 @@ static int
 is_vdev_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_data)
 {
        iostat_cbdata_t *cb = cb_data;
-       char *name;
+       char *name = NULL;
+       int ret = 0;
 
        name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags);
 
        if (strcmp(name, cb->cb_vdev_names[0]) == 0)
-               return (1); /* match */
+               ret = 1; /* match */
+       free(name);
 
-       return (0);
+       return (ret);
 }
 
 /*
@@ -3728,7 +4191,8 @@ is_pool(char *name)
 
 /* Are all our argv[] strings pool names?  If so return 1, 0 otherwise. */
 static int
-are_all_pools(int argc, char **argv) {
+are_all_pools(int argc, char **argv)
+{
        if ((argc == 0) || !*argv)
                return (0);
 
@@ -3809,19 +4273,101 @@ get_interval_count_filter_guids(int *argc, char **argv, float *interval,
  * seconds.
  */
 static void
-fsleep(float sec) {
+fsleep(float sec)
+{
        struct timespec req;
        req.tv_sec = floor(sec);
        req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
        nanosleep(&req, NULL);
 }
 
+/*
+ * Run one of the zpool status/iostat -c scripts with the help (-h) option and
+ * print the result.
+ *
+ * name:       Short name of the script ('iostat').
+ * path:       Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
+ */
+static void
+print_zpool_script_help(char *name, char *path)
+{
+       char *argv[] = {path, "-h", NULL};
+       char **lines = NULL;
+       int lines_cnt = 0;
+       int rc;
+
+       rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
+           &lines_cnt);
+       if (rc != 0 || lines == NULL || lines_cnt <= 0) {
+               if (lines != NULL)
+                       libzfs_free_str_array(lines, lines_cnt);
+               return;
+       }
+
+       for (int i = 0; i < lines_cnt; i++)
+               if (!is_blank_str(lines[i]))
+                       printf("  %-14s  %s\n", name, lines[i]);
+
+       libzfs_free_str_array(lines, lines_cnt);
+}
+
+/*
+ * Go though the zpool status/iostat -c scripts in the user's path, run their
+ * help option (-h), and print out the results.
+ */
+static void
+print_zpool_dir_scripts(char *dirpath)
+{
+       DIR *dir;
+       struct dirent *ent;
+       char fullpath[MAXPATHLEN];
+       struct stat dir_stat;
+
+       if ((dir = opendir(dirpath)) != NULL) {
+               /* print all the files and directories within directory */
+               while ((ent = readdir(dir)) != NULL) {
+                       sprintf(fullpath, "%s/%s", dirpath, ent->d_name);
+
+                       /* Print the scripts */
+                       if (stat(fullpath, &dir_stat) == 0)
+                               if (dir_stat.st_mode & S_IXUSR &&
+                                   S_ISREG(dir_stat.st_mode))
+                                       print_zpool_script_help(ent->d_name,
+                                           fullpath);
+               }
+               closedir(dir);
+       }
+}
+
+/*
+ * Print out help text for all zpool status/iostat -c scripts.
+ */
+static void
+print_zpool_script_list(char *subcommand)
+{
+       char *dir, *sp;
+
+       printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
+
+       sp = zpool_get_cmd_search_path();
+       if (sp == NULL)
+               return;
+
+       dir = strtok(sp, ":");
+       while (dir != NULL) {
+               print_zpool_dir_scripts(dir);
+               dir = strtok(NULL, ":");
+       }
+
+       free(sp);
+}
 
 /*
- * zpool iostat [-ghHLpPvy] [[-lq]-w] [-n name] [-T d|u]
- *             [[ pool ...]|[pool vdev ...]|[vdev ...]]
- *             [interval [count]]
+ * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
+ *              [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
+ *              [interval [count]]
  *
+ *     -c CMD  For each vdev, run command CMD
  *     -g      Display guid for individual vdev name.
  *     -L      Follow links when resolving vdev path name.
  *     -P      Display full path for vdev name.
@@ -3832,7 +4378,8 @@ fsleep(float sec) {
  *             by a single tab.
  *     -l      Display average latency
  *     -q      Display queue depths
- *     -w      Display histograms
+ *     -w      Display latency histograms
+ *     -r      Display request size histogram
  *     -T      Display a timestamp in date(1) or Unix format
  *
  * This command can be tricky because we want to be able to deal with pool
@@ -3851,23 +4398,49 @@ zpool_do_iostat(int argc, char **argv)
        unsigned long count = 0;
        zpool_list_t *list;
        boolean_t verbose = B_FALSE;
-       boolean_t latency = B_FALSE, histo = B_FALSE;
-       boolean_t queues = B_FALSE, parseable = B_FALSE, scripted = B_FALSE;
+       boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
+       boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
        boolean_t omit_since_boot = B_FALSE;
        boolean_t guid = B_FALSE;
        boolean_t follow_links = B_FALSE;
        boolean_t full_name = B_FALSE;
        iostat_cbdata_t cb = { 0 };
+       char *cmd = NULL;
 
        /* Used for printing error message */
        const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
-           [IOS_L_HISTO] = 'w'};
+           [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
 
        uint64_t unsupported_flags;
 
        /* check options */
-       while ((c = getopt(argc, argv, "gLPT:vyhplqwH")) != -1) {
+       while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwH")) != -1) {
                switch (c) {
+               case 'c':
+                       if (cmd != NULL) {
+                               fprintf(stderr,
+                                   gettext("Can't set -c flag twice\n"));
+                               exit(1);
+                       }
+
+                       if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
+                           !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
+                               fprintf(stderr, gettext(
+                                   "Can't run -c, disabled by "
+                                   "ZPOOL_SCRIPTS_ENABLED.\n"));
+                               exit(1);
+                       }
+
+                       if ((getuid() <= 0 || geteuid() <= 0) &&
+                           !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
+                               fprintf(stderr, gettext(
+                                   "Can't run -c with root privileges "
+                                   "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
+                               exit(1);
+                       }
+                       cmd = optarg;
+                       verbose = B_TRUE;
+                       break;
                case 'g':
                        guid = B_TRUE;
                        break;
@@ -3884,7 +4457,7 @@ zpool_do_iostat(int argc, char **argv)
                        verbose = B_TRUE;
                        break;
                case 'p':
-                       parseable = B_TRUE;
+                       parsable = B_TRUE;
                        break;
                case 'l':
                        latency = B_TRUE;
@@ -3896,7 +4469,10 @@ zpool_do_iostat(int argc, char **argv)
                        scripted = B_TRUE;
                        break;
                case 'w':
-                       histo = B_TRUE;
+                       l_histo = B_TRUE;
+                       break;
+               case 'r':
+                       rq_histo = B_TRUE;
                        break;
                case 'y':
                        omit_since_boot = B_TRUE;
@@ -3905,8 +4481,13 @@ zpool_do_iostat(int argc, char **argv)
                        usage(B_FALSE);
                        break;
                case '?':
-                       (void) fprintf(stderr, gettext("invalid option '%c'\n"),
-                           optopt);
+                       if (optopt == 'c') {
+                               print_zpool_script_list("iostat");
+                               exit(0);
+                       } else {
+                               fprintf(stderr,
+                                   gettext("invalid option '%c'\n"), optopt);
+                       }
                        usage(B_FALSE);
                }
        }
@@ -3914,7 +4495,7 @@ zpool_do_iostat(int argc, char **argv)
        argc -= optind;
        argv += optind;
 
-       cb.cb_literal = parseable;
+       cb.cb_literal = parsable;
        cb.cb_scripted = scripted;
 
        if (guid)
@@ -3997,10 +4578,18 @@ zpool_do_iostat(int argc, char **argv)
                return (1);
        }
 
-       if (histo && (queues || latency)) {
+       if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
+               pool_list_free(list);
+               (void) fprintf(stderr,
+                   gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
+               usage(B_FALSE);
+               return (1);
+       }
+
+       if (l_histo && rq_histo) {
                pool_list_free(list);
                (void) fprintf(stderr,
-                   gettext("-w isn't allowed with [-q|-l]\n"));
+                   gettext("Only one of [-r|-w] can be passed at a time\n"));
                usage(B_FALSE);
                return (1);
        }
@@ -4010,13 +4599,15 @@ zpool_do_iostat(int argc, char **argv)
         */
        cb.cb_list = list;
 
-       if (histo) {
+       if (l_histo) {
                /*
                 * Histograms tables look out of place when you try to display
                 * them with the other stats, so make a rule that you can only
                 * print histograms by themselves.
                 */
                cb.cb_flags = IOS_L_HISTO_M;
+       } else if (rq_histo) {
+               cb.cb_flags = IOS_RQ_HISTO_M;
        } else {
                cb.cb_flags = IOS_DEFAULT_M;
                if (latency)
@@ -4041,13 +4632,12 @@ zpool_do_iostat(int argc, char **argv)
                        fprintf(stderr, " -%c", flag_to_arg[idx]);
                }
 
-               fprintf(stderr, ".  Try running a newer module.\n"),
+               fprintf(stderr, ".  Try running a newer module.\n");
                pool_list_free(list);
 
                return (1);
        }
 
-
        for (;;) {
                if ((npools = pool_list_count(list)) == 0)
                        (void) fprintf(stderr, gettext("no pools available\n"));
@@ -4079,6 +4669,15 @@ zpool_do_iostat(int argc, char **argv)
                        if (timestamp_fmt != NODATE)
                                print_timestamp(timestamp_fmt);
 
+                       if (cmd != NULL && cb.cb_verbose &&
+                           !(cb.cb_flags & IOS_ANYHISTO_M)) {
+                               cb.vcdl = all_pools_for_each_vdev_run(argc,
+                                   argv, cmd, g_zfs, cb.cb_vdev_names,
+                                   cb.cb_vdev_names_count, cb.cb_name_flags);
+                       } else {
+                               cb.vcdl = NULL;
+                       }
+
                        /*
                         * If it's the first time and we're not skipping it,
                         * or either skip or verbose mode, print the header.
@@ -4088,7 +4687,7 @@ zpool_do_iostat(int argc, char **argv)
                         */
                        if (((++cb.cb_iteration == 1 && !skip) ||
                            (skip != verbose)) &&
-                           (!(cb.cb_flags & IOS_L_HISTO_M)) &&
+                           (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
                            !cb.cb_scripted)
                                print_iostat_header(&cb);
 
@@ -4097,6 +4696,7 @@ zpool_do_iostat(int argc, char **argv)
                                continue;
                        }
 
+
                        pool_list_iter(list, B_FALSE, print_iostat, &cb);
 
                        /*
@@ -4108,12 +4708,19 @@ zpool_do_iostat(int argc, char **argv)
                         * we also want an ending separator.
                         */
                        if (((npools > 1 && !verbose &&
-                           !(cb.cb_flags & IOS_L_HISTO_M)) ||
-                           (!(cb.cb_flags & IOS_L_HISTO_M) &&
+                           !(cb.cb_flags & IOS_ANYHISTO_M)) ||
+                           (!(cb.cb_flags & IOS_ANYHISTO_M) &&
                            cb.cb_vdev_names_count)) &&
                            !cb.cb_scripted) {
                                print_iostat_separator(&cb);
+                               if (cb.vcdl != NULL)
+                                       print_cmd_columns(cb.vcdl, 1);
+                               printf("\n");
                        }
+
+                       if (cb.vcdl != NULL)
+                               free_vdev_cmd_data_list(cb.vcdl);
+
                }
 
                /*
@@ -4268,7 +4875,7 @@ print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
 
 static void
 print_one_column(zpool_prop_t prop, uint64_t value, boolean_t scripted,
-    boolean_t valid)
+    boolean_t valid, enum zfs_nicenum_format format)
 {
        char propval[64];
        boolean_t fixed;
@@ -4279,22 +4886,30 @@ print_one_column(zpool_prop_t prop, uint64_t value, boolean_t scripted,
                if (value == 0)
                        (void) strlcpy(propval, "-", sizeof (propval));
                else
-                       zfs_nicenum(value, propval, sizeof (propval));
+                       zfs_nicenum_format(value, propval, sizeof (propval),
+                           format);
                break;
        case ZPOOL_PROP_FRAGMENTATION:
                if (value == ZFS_FRAG_INVALID) {
                        (void) strlcpy(propval, "-", sizeof (propval));
+               } else if (format == ZFS_NICENUM_RAW) {
+                       (void) snprintf(propval, sizeof (propval), "%llu",
+                           (unsigned long long)value);
                } else {
                        (void) snprintf(propval, sizeof (propval), "%llu%%",
                            (unsigned long long)value);
                }
                break;
        case ZPOOL_PROP_CAPACITY:
-               (void) snprintf(propval, sizeof (propval), "%llu%%",
-                   (unsigned long long)value);
+               if (format == ZFS_NICENUM_RAW)
+                       (void) snprintf(propval, sizeof (propval), "%llu",
+                           (unsigned long long)value);
+               else
+                       (void) snprintf(propval, sizeof (propval), "%llu%%",
+                           (unsigned long long)value);
                break;
        default:
-               zfs_nicenum(value, propval, sizeof (propval));
+               zfs_nicenum_format(value, propval, sizeof (propval), format);
        }
 
        if (!valid)
@@ -4325,6 +4940,12 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
        if (name != NULL) {
                boolean_t toplevel = (vs->vs_space != 0);
                uint64_t cap;
+               enum zfs_nicenum_format format;
+
+               if (cb->cb_literal)
+                       format = ZFS_NICENUM_RAW;
+               else
+                       format = ZFS_NICENUM_1024;
 
                if (scripted)
                        (void) printf("\t%s", name);
@@ -4341,19 +4962,21 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
                 * to indicate that the value is valid.
                 */
                print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, scripted,
-                   toplevel);
+                   toplevel, format);
                print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, scripted,
-                   toplevel);
+                   toplevel, format);
                print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
-                   scripted, toplevel);
+                   scripted, toplevel, format);
                print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, scripted,
-                   B_TRUE);
+                   B_TRUE, format);
                print_one_column(ZPOOL_PROP_FRAGMENTATION,
                    vs->vs_fragmentation, scripted,
-                   (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel));
+                   (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
+                   format);
                cap = (vs->vs_space == 0) ? 0 :
                    (vs->vs_alloc * 100 / vs->vs_space);
-               print_one_column(ZPOOL_PROP_CAPACITY, cap, scripted, toplevel);
+               print_one_column(ZPOOL_PROP_CAPACITY, cap, scripted, toplevel,
+                   format);
                (void) printf("\n");
        }
 
@@ -4636,25 +5259,44 @@ zpool_do_attach_or_replace(int argc, char **argv, int replacing)
                usage(B_FALSE);
        }
 
-       if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+       if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
+               nvlist_free(props);
                return (1);
+       }
 
        if (zpool_get_config(zhp, NULL) == NULL) {
                (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
                    poolname);
                zpool_close(zhp);
+               nvlist_free(props);
                return (1);
        }
 
+       /* unless manually specified use "ashift" pool property (if set) */
+       if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
+               int intval;
+               zprop_source_t src;
+               char strval[ZPOOL_MAXPROPLEN];
+
+               intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
+               if (src != ZPROP_SRC_DEFAULT) {
+                       (void) sprintf(strval, "%" PRId32, intval);
+                       verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
+                           &props, B_TRUE) == 0);
+               }
+       }
+
        nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
            argc, argv);
        if (nvroot == NULL) {
                zpool_close(zhp);
+               nvlist_free(props);
                return (1);
        }
 
        ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing);
 
+       nvlist_free(props);
        nvlist_free(nvroot);
        zpool_close(zhp);
 
@@ -4762,6 +5404,7 @@ zpool_do_detach(int argc, char **argv)
  *     -o      Set property=value, or set mount options.
  *     -P      Display full path for vdev name.
  *     -R      Mount the split-off pool under an alternate root.
+ *     -l      Load encryption keys while importing.
  *
  * Splits the named pool and gives it the new pool name.  Devices to be split
  * off may be listed, provided that no more than one device is specified
@@ -4779,6 +5422,7 @@ zpool_do_split(int argc, char **argv)
        char *mntopts = NULL;
        splitflags_t flags;
        int c, ret = 0;
+       boolean_t loadkeys = B_FALSE;
        zpool_handle_t *zhp;
        nvlist_t *config, *props = NULL;
 
@@ -4787,7 +5431,7 @@ zpool_do_split(int argc, char **argv)
        flags.name_flags = 0;
 
        /* check options */
-       while ((c = getopt(argc, argv, ":gLR:no:P")) != -1) {
+       while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
                switch (c) {
                case 'g':
                        flags.name_flags |= VDEV_NAME_GUID;
@@ -4804,6 +5448,9 @@ zpool_do_split(int argc, char **argv)
                                usage(B_FALSE);
                        }
                        break;
+               case 'l':
+                       loadkeys = B_TRUE;
+                       break;
                case 'n':
                        flags.dryrun = B_TRUE;
                        break;
@@ -4842,6 +5489,12 @@ zpool_do_split(int argc, char **argv)
                usage(B_FALSE);
        }
 
+       if (!flags.import && loadkeys) {
+               (void) fprintf(stderr, gettext("loading keys is only "
+                   "valid when importing the pool\n"));
+               usage(B_FALSE);
+       }
+
        argc -= optind;
        argv += optind;
 
@@ -4860,8 +5513,10 @@ zpool_do_split(int argc, char **argv)
        argc -= 2;
        argv += 2;
 
-       if ((zhp = zpool_open(g_zfs, srcpool)) == NULL)
+       if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
+               nvlist_free(props);
                return (1);
+       }
 
        config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
        if (config == NULL) {
@@ -4873,20 +5528,32 @@ zpool_do_split(int argc, char **argv)
                        print_vdev_tree(NULL, newpool, config, 0, B_FALSE,
                            flags.name_flags);
                }
-               nvlist_free(config);
        }
 
        zpool_close(zhp);
 
-       if (ret != 0 || flags.dryrun || !flags.import)
+       if (ret != 0 || flags.dryrun || !flags.import) {
+               nvlist_free(config);
+               nvlist_free(props);
                return (ret);
+       }
 
        /*
         * The split was successful. Now we need to open the new
         * pool and import it.
         */
-       if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL)
+       if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
+               nvlist_free(config);
+               nvlist_free(props);
                return (1);
+       }
+
+       if (loadkeys) {
+               ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
+               if (ret != 0)
+                       ret = 1;
+       }
+
        if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
            zpool_enable_datasets(zhp, mntopts, 0) != 0) {
                ret = 1;
@@ -4896,6 +5563,8 @@ zpool_do_split(int argc, char **argv)
                    "different altroot\n"), "zpool import");
        }
        zpool_close(zhp);
+       nvlist_free(config);
+       nvlist_free(props);
 
        return (ret);
 }
@@ -4975,11 +5644,9 @@ zpool_do_online(int argc, char **argv)
 /*
  * zpool offline [-ft] <pool> <device> ...
  *
- *     -f      Force the device into the offline state, even if doing
- *             so would appear to compromise pool availability.
- *             (not supported yet)
+ *     -f      Force the device into a faulted state.
  *
- *     -t      Only take the device off-line temporarily.  The offline
+ *     -t      Only take the device off-line temporarily.  The offline/faulted
  *             state will not be persistent across reboots.
  */
 /* ARGSUSED */
@@ -4991,14 +5658,17 @@ zpool_do_offline(int argc, char **argv)
        zpool_handle_t *zhp;
        int ret = 0;
        boolean_t istmp = B_FALSE;
+       boolean_t fault = B_FALSE;
 
        /* check options */
        while ((c = getopt(argc, argv, "ft")) != -1) {
                switch (c) {
+               case 'f':
+                       fault = B_TRUE;
+                       break;
                case 't':
                        istmp = B_TRUE;
                        break;
-               case 'f':
                case '?':
                        (void) fprintf(stderr, gettext("invalid option '%c'\n"),
                            optopt);
@@ -5025,8 +5695,22 @@ zpool_do_offline(int argc, char **argv)
                return (1);
 
        for (i = 1; i < argc; i++) {
-               if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
-                       ret = 1;
+               if (fault) {
+                       uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
+                       vdev_aux_t aux;
+                       if (istmp == B_FALSE) {
+                               /* Force the fault to persist across imports */
+                               aux = VDEV_AUX_EXTERNAL_PERSIST;
+                       } else {
+                               aux = VDEV_AUX_EXTERNAL;
+                       }
+
+                       if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
+                               ret = 1;
+               } else {
+                       if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
+                               ret = 1;
+               }
        }
 
        zpool_close(zhp);
@@ -5175,12 +5859,14 @@ zpool_do_reopen(int argc, char **argv)
 {
        int c;
        int ret = 0;
-       zpool_handle_t *zhp;
-       char *pool;
+       boolean_t scrub_restart = B_TRUE;
 
        /* check options */
-       while ((c = getopt(argc, argv, "")) != -1) {
+       while ((c = getopt(argc, argv, "n")) != -1) {
                switch (c) {
+               case 'n':
+                       scrub_restart = B_FALSE;
+                       break;
                case '?':
                        (void) fprintf(stderr, gettext("invalid option '%c'\n"),
                            optopt);
@@ -5188,25 +5874,13 @@ zpool_do_reopen(int argc, char **argv)
                }
        }
 
-       argc--;
-       argv++;
-
-       if (argc < 1) {
-               (void) fprintf(stderr, gettext("missing pool name\n"));
-               usage(B_FALSE);
-       }
-
-       if (argc > 1) {
-               (void) fprintf(stderr, gettext("too many arguments\n"));
-               usage(B_FALSE);
-       }
+       argc -= optind;
+       argv += optind;
 
-       pool = argv[0];
-       if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL)
-               return (1);
+       /* if argc == 0 we will execute zpool_reopen_one on all pools */
+       ret = for_each_pool(argc, argv, B_TRUE, NULL, zpool_reopen_one,
+           &scrub_restart);
 
-       ret = zpool_reopen(zhp);
-       zpool_close(zhp);
        return (ret);
 }
 
@@ -5214,6 +5888,7 @@ typedef struct scrub_cbdata {
        int     cb_type;
        int     cb_argc;
        char    **cb_argv;
+       pool_scrub_cmd_t cb_scrub_cmd;
 } scrub_cbdata_t;
 
 int
@@ -5231,15 +5906,16 @@ scrub_callback(zpool_handle_t *zhp, void *data)
                return (1);
        }
 
-       err = zpool_scan(zhp, cb->cb_type);
+       err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
 
        return (err != 0);
 }
 
 /*
- * zpool scrub [-s] <pool> ...
+ * zpool scrub [-s | -p] <pool> ...
  *
  *     -s      Stop.  Stops any in-progress scrub.
+ *     -p      Pause. Pause in-progress scrub.
  */
 int
 zpool_do_scrub(int argc, char **argv)
@@ -5248,13 +5924,17 @@ zpool_do_scrub(int argc, char **argv)
        scrub_cbdata_t cb;
 
        cb.cb_type = POOL_SCAN_SCRUB;
+       cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
 
        /* check options */
-       while ((c = getopt(argc, argv, "s")) != -1) {
+       while ((c = getopt(argc, argv, "sp")) != -1) {
                switch (c) {
                case 's':
                        cb.cb_type = POOL_SCAN_NONE;
                        break;
+               case 'p':
+                       cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
+                       break;
                case '?':
                        (void) fprintf(stderr, gettext("invalid option '%c'\n"),
                            optopt);
@@ -5262,6 +5942,13 @@ zpool_do_scrub(int argc, char **argv)
                }
        }
 
+       if (cb.cb_type == POOL_SCAN_NONE &&
+           cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
+               (void) fprintf(stderr, gettext("invalid option combination: "
+                   "-s and -p are mutually exclusive\n"));
+               usage(B_FALSE);
+       }
+
        cb.cb_argc = argc;
        cb.cb_argv = argv;
        argc -= optind;
@@ -5275,28 +5962,20 @@ zpool_do_scrub(int argc, char **argv)
        return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb));
 }
 
-typedef struct status_cbdata {
-       int             cb_count;
-       int             cb_name_flags;
-       boolean_t       cb_allpools;
-       boolean_t       cb_verbose;
-       boolean_t       cb_explain;
-       boolean_t       cb_first;
-       boolean_t       cb_dedup_stats;
-} status_cbdata_t;
-
 /*
  * Print out detailed scrub status.
  */
 void
 print_scan_status(pool_scan_stat_t *ps)
 {
-       time_t start, end;
-       uint64_t elapsed, mins_left, hours_left;
-       uint64_t pass_exam, examined, total;
-       uint_t rate;
+       time_t start, end, pause;
+       uint64_t total_secs_left;
+       uint64_t elapsed, secs_left, mins_left, hours_left, days_left;
+       uint64_t pass_scanned, scanned, pass_issued, issued, total;
+       uint_t scan_rate, issue_rate;
        double fraction_done;
-       char processed_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
+       char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
+       char srate_buf[7], irate_buf[7];
 
        (void) printf(gettext("  scan: "));
 
@@ -5309,30 +5988,36 @@ print_scan_status(pool_scan_stat_t *ps)
 
        start = ps->pss_start_time;
        end = ps->pss_end_time;
-       zfs_nicenum(ps->pss_processed, processed_buf, sizeof (processed_buf));
+       pause = ps->pss_pass_scrub_pause;
+
+       zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
 
        assert(ps->pss_func == POOL_SCAN_SCRUB ||
            ps->pss_func == POOL_SCAN_RESILVER);
-       /*
-        * Scan is finished or canceled.
-        */
+
+       /* Scan is finished or canceled. */
        if (ps->pss_state == DSS_FINISHED) {
-               uint64_t minutes_taken = (end - start) / 60;
-               char *fmt = NULL;
+               total_secs_left = end - start;
+               days_left = total_secs_left / 60 / 60 / 24;
+               hours_left = (total_secs_left / 60 / 60) % 24;
+               mins_left = (total_secs_left / 60) % 60;
+               secs_left = (total_secs_left % 60);
 
                if (ps->pss_func == POOL_SCAN_SCRUB) {
-                       fmt = gettext("scrub repaired %s in %lluh%um with "
-                           "%llu errors on %s");
+                       (void) printf(gettext("scrub repaired %s "
+                           "in %llu days %02llu:%02llu:%02llu "
+                           "with %llu errors on %s"), processed_buf,
+                           (u_longlong_t)days_left, (u_longlong_t)hours_left,
+                           (u_longlong_t)mins_left, (u_longlong_t)secs_left,
+                           (u_longlong_t)ps->pss_errors, ctime(&end));
                } else if (ps->pss_func == POOL_SCAN_RESILVER) {
-                       fmt = gettext("resilvered %s in %lluh%um with "
-                           "%llu errors on %s");
+                       (void) printf(gettext("resilvered %s "
+                           "in %llu days %02llu:%02llu:%02llu "
+                           "with %llu errors on %s"), processed_buf,
+                           (u_longlong_t)days_left, (u_longlong_t)hours_left,
+                           (u_longlong_t)mins_left, (u_longlong_t)secs_left,
+                           (u_longlong_t)ps->pss_errors, ctime(&end));
                }
-               /* LINTED */
-               (void) printf(fmt, processed_buf,
-                   (u_longlong_t)(minutes_taken / 60),
-                   (uint_t)(minutes_taken % 60),
-                   (u_longlong_t)ps->pss_errors,
-                   ctime((time_t *)&end));
                return;
        } else if (ps->pss_state == DSS_CANCELED) {
                if (ps->pss_func == POOL_SCAN_SCRUB) {
@@ -5347,54 +6032,84 @@ print_scan_status(pool_scan_stat_t *ps)
 
        assert(ps->pss_state == DSS_SCANNING);
 
-       /*
-        * Scan is in progress.
-        */
+       /* Scan is in progress. Resilvers can't be paused. */
        if (ps->pss_func == POOL_SCAN_SCRUB) {
-               (void) printf(gettext("scrub in progress since %s"),
-                   ctime(&start));
+               if (pause == 0) {
+                       (void) printf(gettext("scrub in progress since %s"),
+                           ctime(&start));
+               } else {
+                       (void) printf(gettext("scrub paused since %s"),
+                           ctime(&pause));
+                       (void) printf(gettext("\tscrub started on %s"),
+                           ctime(&start));
+               }
        } else if (ps->pss_func == POOL_SCAN_RESILVER) {
                (void) printf(gettext("resilver in progress since %s"),
                    ctime(&start));
        }
 
-       examined = ps->pss_examined ? ps->pss_examined : 1;
+       scanned = ps->pss_examined;
+       pass_scanned = ps->pss_pass_exam;
+       issued = ps->pss_issued;
+       pass_issued = ps->pss_pass_issued;
        total = ps->pss_to_examine;
-       fraction_done = (double)examined / total;
 
-       /* elapsed time for this pass */
-       elapsed = time(NULL) - ps->pss_pass_start;
-       elapsed = elapsed ? elapsed : 1;
-       pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
-       rate = pass_exam / elapsed;
-       rate = rate ? rate : 1;
-       mins_left = ((total - examined) / rate) / 60;
-       hours_left = mins_left / 60;
-
-       zfs_nicenum(examined, examined_buf, sizeof (examined_buf));
-       zfs_nicenum(total, total_buf, sizeof (total_buf));
-       zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
+       /* we are only done with a block once we have issued the IO for it */
+       fraction_done = (double)issued / total;
 
-       /*
-        * do not print estimated time if hours_left is more than 30 days
-        */
-       (void) printf(gettext("\t%s scanned out of %s at %s/s"),
-           examined_buf, total_buf, rate_buf);
-       if (hours_left < (30 * 24)) {
-               (void) printf(gettext(", %lluh%um to go\n"),
-                   (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
+       /* elapsed time for this pass, rounding up to 1 if it's 0 */
+       elapsed = time(NULL) - ps->pss_pass_start;
+       elapsed -= ps->pss_pass_scrub_spent_paused;
+       elapsed = (elapsed != 0) ? elapsed : 1;
+
+       scan_rate = pass_scanned / elapsed;
+       issue_rate = pass_issued / elapsed;
+       total_secs_left = (issue_rate != 0) ?
+           ((total - issued) / issue_rate) : UINT64_MAX;
+
+       days_left = total_secs_left / 60 / 60 / 24;
+       hours_left = (total_secs_left / 60 / 60) % 24;
+       mins_left = (total_secs_left / 60) % 60;
+       secs_left = (total_secs_left % 60);
+
+       /* format all of the numbers we will be reporting */
+       zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
+       zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
+       zfs_nicebytes(total, total_buf, sizeof (total_buf));
+       zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
+       zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
+
+       /* do not print estimated time if we have a paused scrub */
+       if (pause == 0) {
+               (void) printf(gettext("\t%s scanned at %s/s, "
+                   "%s issued at %s/s, %s total\n"),
+                   scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
        } else {
-               (void) printf(gettext(
-                   ", (scan is slow, no estimated time)\n"));
+               (void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
+                   scanned_buf, issued_buf, total_buf);
        }
 
        if (ps->pss_func == POOL_SCAN_RESILVER) {
-               (void) printf(gettext("\t%s resilvered, %.2f%% done\n"),
+               (void) printf(gettext("\t%s resilvered, %.2f%% done"),
                    processed_buf, 100 * fraction_done);
        } else if (ps->pss_func == POOL_SCAN_SCRUB) {
-               (void) printf(gettext("\t%s repaired, %.2f%% done\n"),
+               (void) printf(gettext("\t%s repaired, %.2f%% done"),
                    processed_buf, 100 * fraction_done);
        }
+
+       if (pause == 0) {
+               if (issue_rate >= 10 * 1024 * 1024) {
+                       (void) printf(gettext(", %llu days "
+                           "%02llu:%02llu:%02llu to go\n"),
+                           (u_longlong_t)days_left, (u_longlong_t)hours_left,
+                           (u_longlong_t)mins_left, (u_longlong_t)secs_left);
+               } else {
+                       (void) printf(gettext(", no estimated "
+                           "completion time\n"));
+               }
+       } else {
+               (void) printf(gettext("\n"));
+       }
 }
 
 static void
@@ -5405,11 +6120,8 @@ print_error_log(zpool_handle_t *zhp)
        char *pathname;
        size_t len = MAXPATHLEN * 2;
 
-       if (zpool_get_errlog(zhp, &nverrlist) != 0) {
-               (void) printf("errors: List of errors unavailable "
-                   "(insufficient privileges)\n");
+       if (zpool_get_errlog(zhp, &nverrlist) != 0)
                return;
-       }
 
        (void) printf("errors: Permanent errors have been "
            "detected in the following files:\n\n");
@@ -5433,8 +6145,8 @@ print_error_log(zpool_handle_t *zhp)
 }
 
 static void
-print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares,
-    int namewidth, int name_flags)
+print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
+    uint_t nspares)
 {
        uint_t i;
        char *name;
@@ -5445,16 +6157,16 @@ print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares,
        (void) printf(gettext("\tspares\n"));
 
        for (i = 0; i < nspares; i++) {
-               name = zpool_vdev_name(g_zfs, zhp, spares[i], name_flags);
-               print_status_config(zhp, name, spares[i],
-                   namewidth, 2, B_TRUE, name_flags);
+               name = zpool_vdev_name(g_zfs, zhp, spares[i],
+                   cb->cb_name_flags);
+               print_status_config(zhp, cb, name, spares[i], 2, B_TRUE);
                free(name);
        }
 }
 
 static void
-print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache,
-    int namewidth, int name_flags)
+print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
+    uint_t nl2cache)
 {
        uint_t i;
        char *name;
@@ -5465,9 +6177,9 @@ print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache,
        (void) printf(gettext("\tcache\n"));
 
        for (i = 0; i < nl2cache; i++) {
-               name = zpool_vdev_name(g_zfs, zhp, l2cache[i], name_flags);
-               print_status_config(zhp, name, l2cache[i],
-                   namewidth, 2, B_FALSE, name_flags);
+               name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
+                   cb->cb_name_flags);
+               print_status_config(zhp, cb, name, l2cache[i], 2, B_FALSE);
                free(name);
        }
 }
@@ -5479,6 +6191,7 @@ print_dedup_stats(nvlist_t *config)
        ddt_stat_t *dds;
        ddt_object_t *ddo;
        uint_t c;
+       char dspace[6], mspace[6];
 
        /*
         * If the pool was faulted then we may not have been able to
@@ -5496,10 +6209,12 @@ print_dedup_stats(nvlist_t *config)
                return;
        }
 
-       (void) printf("DDT entries %llu, size %llu on disk, %llu in core\n",
+       zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
+       zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
+       (void) printf("DDT entries %llu, size %s on disk, %s in core\n",
            (u_longlong_t)ddo->ddo_count,
-           (u_longlong_t)ddo->ddo_dspace,
-           (u_longlong_t)ddo->ddo_mspace);
+           dspace,
+           mspace);
 
        verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
            (uint64_t **)&dds, &c) == 0);
@@ -5746,7 +6461,7 @@ status_callback(zpool_handle_t *zhp, void *data)
        case ZPOOL_STATUS_BAD_LOG:
                (void) printf(gettext("status: An intent log record "
                    "could not be read.\n"
-                   "\tWaiting for adminstrator intervention to fix the "
+                   "\tWaiting for administrator intervention to fix the "
                    "faulted pool.\n"));
                (void) printf(gettext("action: Either restore the affected "
                    "device(s) and run 'zpool online',\n"
@@ -5799,7 +6514,6 @@ status_callback(zpool_handle_t *zhp, void *data)
                    msgid);
 
        if (config != NULL) {
-               int namewidth;
                uint64_t nerr;
                nvlist_t **spares, **l2cache;
                uint_t nspares, nl2cache;
@@ -5809,28 +6523,32 @@ status_callback(zpool_handle_t *zhp, void *data)
                    ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &c);
                print_scan_status(ps);
 
-               namewidth = max_width(zhp, nvroot, 0, 0, cbp->cb_name_flags);
-               if (namewidth < 10)
-                       namewidth = 10;
+               cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
+                   cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
+               if (cbp->cb_namewidth < 10)
+                       cbp->cb_namewidth = 10;
 
                (void) printf(gettext("config:\n\n"));
-               (void) printf(gettext("\t%-*s  %-8s %5s %5s %5s\n"), namewidth,
-                   "NAME", "STATE", "READ", "WRITE", "CKSUM");
-               print_status_config(zhp, zpool_get_name(zhp), nvroot,
-                   namewidth, 0, B_FALSE, cbp->cb_name_flags);
+               (void) printf(gettext("\t%-*s  %-8s %5s %5s %5s"),
+                   cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
+                   "CKSUM");
+
+               if (cbp->vcdl != NULL)
+                       print_cmd_columns(cbp->vcdl, 0);
+
+               printf("\n");
+               print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
+                   B_FALSE);
 
                if (num_logs(nvroot) > 0)
-                       print_logs(zhp, nvroot, namewidth, B_TRUE,
-                           cbp->cb_name_flags);
+                       print_logs(zhp, cbp, nvroot);
                if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
                    &l2cache, &nl2cache) == 0)
-                       print_l2cache(zhp, l2cache, nl2cache, namewidth,
-                               cbp->cb_name_flags);
+                       print_l2cache(zhp, cbp, l2cache, nl2cache);
 
                if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
                    &spares, &nspares) == 0)
-                       print_spares(zhp, spares, nspares, namewidth,
-                           cbp->cb_name_flags);
+                       print_spares(zhp, cbp, spares, nspares);
 
                if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
                    &nerr) == 0) {
@@ -5878,8 +6596,10 @@ status_callback(zpool_handle_t *zhp, void *data)
 }
 
 /*
- * zpool status [-gLPvx] [-T d|u] [pool] ... [interval [count]]
+ * zpool status [-c [script1,script2,...]] [-gLPvx] [-T d|u] [pool] ...
+ *              [interval [count]]
  *
+ *     -c CMD  For each vdev, run command CMD
  *     -g      Display guid for individual vdev name.
  *     -L      Follow links when resolving vdev path name.
  *     -P      Display full path for vdev name.
@@ -5898,10 +6618,35 @@ zpool_do_status(int argc, char **argv)
        float interval = 0;
        unsigned long count = 0;
        status_cbdata_t cb = { 0 };
+       char *cmd = NULL;
 
        /* check options */
-       while ((c = getopt(argc, argv, "gLPvxDT:")) != -1) {
+       while ((c = getopt(argc, argv, "c:gLPvxDT:")) != -1) {
                switch (c) {
+               case 'c':
+                       if (cmd != NULL) {
+                               fprintf(stderr,
+                                   gettext("Can't set -c flag twice\n"));
+                               exit(1);
+                       }
+
+                       if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
+                           !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
+                               fprintf(stderr, gettext(
+                                   "Can't run -c, disabled by "
+                                   "ZPOOL_SCRIPTS_ENABLED.\n"));
+                               exit(1);
+                       }
+
+                       if ((getuid() <= 0 || geteuid() <= 0) &&
+                           !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
+                               fprintf(stderr, gettext(
+                                   "Can't run -c with root privileges "
+                                   "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
+                               exit(1);
+                       }
+                       cmd = optarg;
+                       break;
                case 'g':
                        cb.cb_name_flags |= VDEV_NAME_GUID;
                        break;
@@ -5924,8 +6669,13 @@ zpool_do_status(int argc, char **argv)
                        get_timestamp_arg(*optarg);
                        break;
                case '?':
-                       (void) fprintf(stderr, gettext("invalid option '%c'\n"),
-                           optopt);
+                       if (optopt == 'c') {
+                               print_zpool_script_list("status");
+                               exit(0);
+                       } else {
+                               fprintf(stderr,
+                                   gettext("invalid option '%c'\n"), optopt);
+                       }
                        usage(B_FALSE);
                }
        }
@@ -5939,14 +6689,22 @@ zpool_do_status(int argc, char **argv)
                cb.cb_allpools = B_TRUE;
 
        cb.cb_first = B_TRUE;
+       cb.cb_print_status = B_TRUE;
 
        for (;;) {
                if (timestamp_fmt != NODATE)
                        print_timestamp(timestamp_fmt);
 
+               if (cmd != NULL)
+                       cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
+                           NULL, NULL, 0, 0);
+
                ret = for_each_pool(argc, argv, B_TRUE, NULL,
                    status_callback, &cb);
 
+               if (cb.vcdl != NULL)
+                       free_vdev_cmd_data_list(cb.vcdl);
+
                if (argc == 0 && cb.cb_count == 0)
                        (void) fprintf(stderr, gettext("no pools available\n"));
                else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
@@ -5977,7 +6735,7 @@ typedef struct upgrade_cbdata {
 static int
 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
 {
-       int zfs_version = (int) zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
+       int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
        int *count = (int *)unsupp_fs;
 
        if (zfs_version > ZPL_VERSION) {
@@ -6016,7 +6774,7 @@ upgrade_version(zpool_handle_t *zhp, uint64_t version)
        if (unsupp_fs) {
                (void) fprintf(stderr, gettext("Upgrade not performed due "
                    "to %d unsupported filesystems (max v%d).\n"),
-                   unsupp_fs, (int) ZPL_VERSION);
+                   unsupp_fs, (int)ZPL_VERSION);
                return (1);
        }
 
@@ -6027,12 +6785,12 @@ upgrade_version(zpool_handle_t *zhp, uint64_t version)
        if (version >= SPA_VERSION_FEATURES) {
                (void) printf(gettext("Successfully upgraded "
                    "'%s' from version %llu to feature flags.\n"),
-                   zpool_get_name(zhp), (u_longlong_t) oldversion);
+                   zpool_get_name(zhp), (u_longlong_t)oldversion);
        } else {
                (void) printf(gettext("Successfully upgraded "
                    "'%s' from version %llu to version %llu.\n"),
-                   zpool_get_name(zhp), (u_longlong_t) oldversion,
-                   (u_longlong_t) version);
+                   zpool_get_name(zhp), (u_longlong_t)oldversion,
+                   (u_longlong_t)version);
        }
 
        return (0);
@@ -6239,14 +6997,14 @@ upgrade_one(zpool_handle_t *zhp, void *data)
        if (cur_version > cbp->cb_version) {
                (void) printf(gettext("Pool '%s' is already formatted "
                    "using more current version '%llu'.\n\n"),
-                   zpool_get_name(zhp), (u_longlong_t) cur_version);
+                   zpool_get_name(zhp), (u_longlong_t)cur_version);
                return (0);
        }
 
        if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
                (void) printf(gettext("Pool '%s' is already formatted "
                    "using version %llu.\n\n"), zpool_get_name(zhp),
-                   (u_longlong_t) cbp->cb_version);
+                   (u_longlong_t)cbp->cb_version);
                return (0);
        }
 
@@ -6433,7 +7191,7 @@ zpool_do_upgrade(int argc, char **argv)
                        } else {
                                (void) printf(gettext("All pools are already "
                                    "formatted with version %llu or higher.\n"),
-                                   (u_longlong_t) cb.cb_version);
+                                   (u_longlong_t)cb.cb_version);
                        }
                }
        } else if (argc == 0) {
@@ -6524,14 +7282,14 @@ get_history_one(zpool_handle_t *zhp, void *data)
                        }
                        (void) printf("%s [internal %s txg:%lld] %s", tbuf,
                            zfs_history_event_names[ievent],
-                           (longlong_t) fnvlist_lookup_uint64(
+                           (longlong_t)fnvlist_lookup_uint64(
                            rec, ZPOOL_HIST_TXG),
                            fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
                } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
                        if (!cb->internal)
                                continue;
                        (void) printf("%s [txg:%lld] %s", tbuf,
-                           (longlong_t) fnvlist_lookup_uint64(
+                           (longlong_t)fnvlist_lookup_uint64(
                            rec, ZPOOL_HIST_TXG),
                            fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
                        if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
@@ -6642,10 +7400,11 @@ typedef struct ev_opts {
        int scripted;
        int follow;
        int clear;
+       char poolname[ZFS_MAX_DATASET_NAME_LEN];
 } ev_opts_t;
 
 static void
-zpool_do_events_short(nvlist_t *nvl)
+zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
 {
        char ctime_str[26], str[32], *ptr;
        int64_t *tv;
@@ -6658,7 +7417,10 @@ zpool_do_events_short(nvlist_t *nvl)
        (void) strncpy(str+7, ctime_str+20, 4);         /* '1993' */
        (void) strncpy(str+12, ctime_str+11, 8);        /* '21:49:08' */
        (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
-       (void) printf(gettext("%s "), str);
+       if (opts->scripted)
+               (void) printf(gettext("%s\t"), str);
+       else
+               (void) printf(gettext("%s "), str);
 
        verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
        (void) printf(gettext("%s\n"), ptr);
@@ -6737,7 +7499,20 @@ zpool_do_events_nvprint(nvlist_t *nvl, int depth)
 
                case DATA_TYPE_UINT64:
                        (void) nvpair_value_uint64(nvp, &i64);
-                       printf(gettext("0x%llx"), (u_longlong_t)i64);
+                       /*
+                        * translate vdev state values to readable
+                        * strings to aide zpool events consumers
+                        */
+                       if (strcmp(name,
+                           FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
+                           strcmp(name,
+                           FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
+                               printf(gettext("\"%s\" (0x%llx)"),
+                                   zpool_state_to_name(i64, VDEV_AUX_NONE),
+                                   (u_longlong_t)i64);
+                       } else {
+                               printf(gettext("0x%llx"), (u_longlong_t)i64);
+                       }
                        break;
 
                case DATA_TYPE_HRTIME:
@@ -6893,6 +7668,7 @@ zpool_do_events_next(ev_opts_t *opts)
 {
        nvlist_t *nvl;
        int zevent_fd, ret, dropped;
+       char *pool;
 
        zevent_fd = open(ZFS_DEV, O_RDWR);
        VERIFY(zevent_fd >= 0);
@@ -6909,7 +7685,12 @@ zpool_do_events_next(ev_opts_t *opts)
                if (dropped > 0)
                        (void) printf(gettext("dropped %d events\n"), dropped);
 
-               zpool_do_events_short(nvl);
+               if (strlen(opts->poolname) > 0 &&
+                   nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
+                   strcmp(opts->poolname, pool) != 0)
+                       continue;
+
+               zpool_do_events_short(nvl, opts);
 
                if (opts->verbose) {
                        zpool_do_events_nvprint(nvl, 8);
@@ -6938,7 +7719,7 @@ zpool_do_events_clear(ev_opts_t *opts)
 }
 
 /*
- * zpool events [-vfc]
+ * zpool events [-vHf [pool] | -c]
  *
  * Displays events logs by ZFS.
  */
@@ -6973,6 +7754,25 @@ zpool_do_events(int argc, char **argv)
        argc -= optind;
        argv += optind;
 
+       if (argc > 1) {
+               (void) fprintf(stderr, gettext("too many arguments\n"));
+               usage(B_FALSE);
+       } else if (argc == 1) {
+               (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
+               if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
+                       (void) fprintf(stderr,
+                           gettext("invalid pool name '%s'\n"), opts.poolname);
+                       usage(B_FALSE);
+               }
+       }
+
+       if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
+           opts.clear) {
+               (void) fprintf(stderr,
+                   gettext("invalid options combined with -c\n"));
+               usage(B_FALSE);
+       }
+
        if (opts.clear)
                ret = zpool_do_events_clear(&opts);
        else
@@ -7030,7 +7830,7 @@ get_callback(zpool_handle_t *zhp, void *data)
  *             by a single tab.
  *     -o      List of columns to display.  Defaults to
  *             "name,property,value,source".
- *     -p      Diplay values in parsable (exact) format.
+ *     -p      Display values in parsable (exact) format.
  *
  * Get properties of pools in the system. Output space statistics
  * for each one as well as other attributes.
@@ -7242,7 +8042,7 @@ find_command_idx(char *command, int *idx)
 int
 main(int argc, char **argv)
 {
-       int ret;
+       int ret = 0;
        int i = 0;
        char *cmdname;
 
@@ -7250,8 +8050,6 @@ main(int argc, char **argv)
        (void) textdomain(TEXT_DOMAIN);
        srand(time(NULL));
 
-       dprintf_setup(&argc, argv);
-
        opterr = 0;
 
        /*
@@ -7294,10 +8092,17 @@ main(int argc, char **argv)
                 * 'freeze' is a vile debugging abomination, so we treat
                 * it as such.
                 */
-               char buf[16384];
-               int fd = open(ZFS_DEV, O_RDWR);
-               (void) strcpy((void *)buf, argv[2]);
-               return (!!ioctl(fd, ZFS_IOC_POOL_FREEZE, buf));
+               zfs_cmd_t zc = {"\0"};
+
+               (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
+               ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
+               if (ret != 0) {
+                       (void) fprintf(stderr,
+                       gettext("failed to freeze pool: %d\n"), errno);
+                       ret = 1;
+               }
+
+               log_history = 0;
        } else {
                (void) fprintf(stderr, gettext("unrecognized "
                    "command '%s'\n"), cmdname);