]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/vdev_label.c
Correct snprintf() size argument
[mirror_zfs.git] / module / zfs / vdev_label.c
index 48d5fc232b34f61e80111588369f2bc96c19eb65..a0e373b3dfc51850cb2ddd9f5f1cc79abbdf1636 100644 (file)
  *
  * CDDL HEADER END
  */
+
 /*
- * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2017, Intel Corporation.
  */
 
 /*
  *     1. Uniquely identify this device as part of a ZFS pool and confirm its
  *        identity within the pool.
  *
- *     2. Verify that all the devices given in a configuration are present
+ *     2. Verify that all the devices given in a configuration are present
  *         within the pool.
  *
- *     3. Determine the uberblock for the pool.
+ *     3. Determine the uberblock for the pool.
  *
- *     4. In case of an import operation, determine the configuration of the
+ *     4. In case of an import operation, determine the configuration of the
  *         toplevel vdev of which it is a part.
  *
- *     5. If an import operation cannot find all the devices in the pool,
+ *     5. If an import operation cannot find all the devices in the pool,
  *         provide enough information to the administrator to determine which
  *         devices are missing.
  *
@@ -76,9 +79,9 @@
  * In order to identify which labels are valid, the labels are written in the
  * following manner:
  *
- *     1. For each vdev, update 'L1' to the new label
- *     2. Update the uberblock
- *     3. For each vdev, update 'L2' to the new label
+ *     1. For each vdev, update 'L1' to the new label
+ *     2. Update the uberblock
+ *     3. For each vdev, update 'L2' to the new label
  *
  * Given arbitrary failure, we can determine the correct label to use based on
  * the transaction group.  If we fail after updating L1 but before updating the
  *
  * The nvlist describing the pool and vdev contains the following elements:
  *
- *     version         ZFS on-disk version
- *     name            Pool name
- *     state           Pool state
- *     txg             Transaction group in which this label was written
- *     pool_guid       Unique identifier for this pool
- *     vdev_tree       An nvlist describing vdev tree.
+ *     version         ZFS on-disk version
+ *     name            Pool name
+ *     state           Pool state
+ *     txg             Transaction group in which this label was written
+ *     pool_guid       Unique identifier for this pool
+ *     vdev_tree       An nvlist describing vdev tree.
+ *     features_for_read
+ *                     An nvlist of the features necessary for reading the MOS.
  *
  * Each leaf device label also contains the following:
  *
- *     top_guid        Unique ID for top-level vdev in which this is contained
- *     guid            Unique ID for the leaf vdev
+ *     top_guid        Unique ID for top-level vdev in which this is contained
+ *     guid            Unique ID for the leaf vdev
  *
  * The 'vs' configuration follows the format described in 'spa_config.c'.
  */
 #include <sys/vdev_impl.h>
 #include <sys/uberblock_impl.h>
 #include <sys/metaslab.h>
+#include <sys/metaslab_impl.h>
 #include <sys/zio.h>
+#include <sys/dsl_scan.h>
+#include <sys/abd.h>
 #include <sys/fs/zfs.h>
 
 /*
@@ -174,11 +182,12 @@ vdev_label_number(uint64_t psize, uint64_t offset)
 }
 
 static void
-vdev_label_read(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset,
-       uint64_t size, zio_done_func_t *done, void *private, int flags)
+vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
+    uint64_t size, zio_done_func_t *done, void *private, int flags)
 {
-       ASSERT(spa_config_held(zio->io_spa, SCL_STATE_ALL, RW_WRITER) ==
-           SCL_STATE_ALL);
+       ASSERT(
+           spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
+           spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
        ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
 
        zio_nowait(zio_read_phys(zio, vd,
@@ -187,14 +196,13 @@ vdev_label_read(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset,
            ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
 }
 
-static void
-vdev_label_write(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset,
-       uint64_t size, zio_done_func_t *done, void *private, int flags)
+void
+vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
+    uint64_t size, zio_done_func_t *done, void *private, int flags)
 {
-       ASSERT(spa_config_held(zio->io_spa, SCL_ALL, RW_WRITER) == SCL_ALL ||
-           (spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
-           (SCL_CONFIG | SCL_STATE) &&
-           dsl_pool_sync_context(spa_get_dsl(zio->io_spa))));
+       ASSERT(
+           spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
+           spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
        ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
 
        zio_nowait(zio_write_phys(zio, vd,
@@ -203,39 +211,233 @@ vdev_label_write(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset,
            ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
 }
 
+/*
+ * Generate the nvlist representing this vdev's stats
+ */
+void
+vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
+{
+       nvlist_t *nvx;
+       vdev_stat_t *vs;
+       vdev_stat_ex_t *vsx;
+
+       vs = kmem_alloc(sizeof (*vs), KM_SLEEP);
+       vsx = kmem_alloc(sizeof (*vsx), KM_SLEEP);
+
+       vdev_get_stats_ex(vd, vs, vsx);
+       fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+           (uint64_t *)vs, sizeof (*vs) / sizeof (uint64_t));
+
+       /*
+        * Add extended stats into a special extended stats nvlist.  This keeps
+        * all the extended stats nicely grouped together.  The extended stats
+        * nvlist is then added to the main nvlist.
+        */
+       nvx = fnvlist_alloc();
+
+       /* ZIOs in flight to disk */
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
+           vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
+           vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
+           vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
+           vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
+           vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
+           vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
+
+       /* ZIOs pending */
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
+           vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
+           vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
+           vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
+           vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
+           vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
+
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
+           vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
+
+       /* Histograms */
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
+           vsx->vsx_total_histo[ZIO_TYPE_READ],
+           ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
+           vsx->vsx_total_histo[ZIO_TYPE_WRITE],
+           ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
+           vsx->vsx_disk_histo[ZIO_TYPE_READ],
+           ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
+           vsx->vsx_disk_histo[ZIO_TYPE_WRITE],
+           ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
+           vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ],
+           ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
+           vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE],
+           ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
+           vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ],
+           ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
+           vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE],
+           ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
+           vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
+           ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
+           vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
+           ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
+
+       /* Request sizes */
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
+           vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
+           ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
+           vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE],
+           ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
+           vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ],
+           ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
+           vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE],
+           ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
+           vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
+           ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
+           vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
+           ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
+           vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
+           ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
+           vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE],
+           ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
+           vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ],
+           ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
+           vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE],
+           ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
+           vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
+           ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
+
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
+           vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
+           ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
+
+       /* IO delays */
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);
+
+       /* Add extended stats nvlist to main nvlist */
+       fnvlist_add_nvlist(nv, ZPOOL_CONFIG_VDEV_STATS_EX, nvx);
+
+       fnvlist_free(nvx);
+       kmem_free(vs, sizeof (*vs));
+       kmem_free(vsx, sizeof (*vsx));
+}
+
+static void
+root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
+{
+       spa_t *spa = vd->vdev_spa;
+
+       if (vd != spa->spa_root_vdev)
+               return;
+
+       /* provide either current or previous scan information */
+       pool_scan_stat_t ps;
+       if (spa_scan_get_stats(spa, &ps) == 0) {
+               fnvlist_add_uint64_array(nvl,
+                   ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
+                   sizeof (pool_scan_stat_t) / sizeof (uint64_t));
+       }
+
+       pool_removal_stat_t prs;
+       if (spa_removal_get_stats(spa, &prs) == 0) {
+               fnvlist_add_uint64_array(nvl,
+                   ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
+                   sizeof (prs) / sizeof (uint64_t));
+       }
+
+       pool_checkpoint_stat_t pcs;
+       if (spa_checkpoint_get_stats(spa, &pcs) == 0) {
+               fnvlist_add_uint64_array(nvl,
+                   ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
+                   sizeof (pcs) / sizeof (uint64_t));
+       }
+}
+
 /*
  * Generate the nvlist representing this vdev's config.
  */
 nvlist_t *
 vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
-    boolean_t isspare, boolean_t isl2cache)
+    vdev_config_flag_t flags)
 {
        nvlist_t *nv = NULL;
+       vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
 
-       VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       nv = fnvlist_alloc();
 
-       VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
-           vd->vdev_ops->vdev_op_type) == 0);
-       if (!isspare && !isl2cache)
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id)
-                   == 0);
-       VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0);
+       fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
+       if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
+       fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
 
        if (vd->vdev_path != NULL)
-               VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_PATH,
-                   vd->vdev_path) == 0);
+               fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
 
        if (vd->vdev_devid != NULL)
-               VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_DEVID,
-                   vd->vdev_devid) == 0);
+               fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
 
        if (vd->vdev_physpath != NULL)
-               VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
-                   vd->vdev_physpath) == 0);
+               fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
+                   vd->vdev_physpath);
+
+       if (vd->vdev_enc_sysfs_path != NULL)
+               fnvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
+                   vd->vdev_enc_sysfs_path);
 
        if (vd->vdev_fru != NULL)
-               VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_FRU,
-                   vd->vdev_fru) == 0);
+               fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
 
        if (vd->vdev_nparity != 0) {
                ASSERT(strcmp(vd->vdev_ops->vdev_op_type,
@@ -246,99 +448,316 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
                 * into a crufty old storage pool.
                 */
                ASSERT(vd->vdev_nparity == 1 ||
-                   (vd->vdev_nparity == 2 &&
-                   spa_version(spa) >= SPA_VERSION_RAID6));
+                   (vd->vdev_nparity <= 2 &&
+                   spa_version(spa) >= SPA_VERSION_RAIDZ2) ||
+                   (vd->vdev_nparity <= 3 &&
+                   spa_version(spa) >= SPA_VERSION_RAIDZ3));
 
                /*
                 * Note that we'll add the nparity tag even on storage pools
                 * that only support a single parity device -- older software
                 * will just ignore it.
                 */
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY,
-                   vd->vdev_nparity) == 0);
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vd->vdev_nparity);
        }
 
        if (vd->vdev_wholedisk != -1ULL)
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
-                   vd->vdev_wholedisk) == 0);
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+                   vd->vdev_wholedisk);
 
-       if (vd->vdev_not_present)
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1) == 0);
+       if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
 
        if (vd->vdev_isspare)
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1) == 0);
-
-       if (!isspare && !isl2cache && vd == vd->vdev_top) {
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
-                   vd->vdev_ms_array) == 0);
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
-                   vd->vdev_ms_shift) == 0);
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT,
-                   vd->vdev_ashift) == 0);
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
-                   vd->vdev_asize) == 0);
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG,
-                   vd->vdev_islog) == 0);
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
+
+       if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
+           vd == vd->vdev_top) {
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
+                   vd->vdev_ms_array);
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
+                   vd->vdev_ms_shift);
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
+                   vd->vdev_asize);
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
+               if (vd->vdev_removing) {
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
+                           vd->vdev_removing);
+               }
+
+               /* zpool command expects alloc class data */
+               if (getstats && vd->vdev_alloc_bias != VDEV_BIAS_NONE) {
+                       const char *bias = NULL;
+
+                       switch (vd->vdev_alloc_bias) {
+                       case VDEV_BIAS_LOG:
+                               bias = VDEV_ALLOC_BIAS_LOG;
+                               break;
+                       case VDEV_BIAS_SPECIAL:
+                               bias = VDEV_ALLOC_BIAS_SPECIAL;
+                               break;
+                       case VDEV_BIAS_DEDUP:
+                               bias = VDEV_ALLOC_BIAS_DEDUP;
+                               break;
+                       default:
+                               ASSERT3U(vd->vdev_alloc_bias, ==,
+                                   VDEV_BIAS_NONE);
+                       }
+                       fnvlist_add_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
+                           bias);
+               }
+       }
+
+       if (vd->vdev_dtl_sm != NULL) {
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
+                   space_map_object(vd->vdev_dtl_sm));
+       }
+
+       if (vic->vic_mapping_object != 0) {
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
+                   vic->vic_mapping_object);
+       }
+
+       if (vic->vic_births_object != 0) {
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
+                   vic->vic_births_object);
+       }
+
+       if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
+                   vic->vic_prev_indirect_vdev);
        }
 
-       if (vd->vdev_dtl_smo.smo_object != 0)
-               VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
-                   vd->vdev_dtl_smo.smo_object) == 0);
+       if (vd->vdev_crtxg)
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
+
+       if (vd->vdev_expansion_time)
+               fnvlist_add_uint64(nv, ZPOOL_CONFIG_EXPANSION_TIME,
+                   vd->vdev_expansion_time);
+
+       if (flags & VDEV_CONFIG_MOS) {
+               if (vd->vdev_leaf_zap != 0) {
+                       ASSERT(vd->vdev_ops->vdev_op_leaf);
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
+                           vd->vdev_leaf_zap);
+               }
+
+               if (vd->vdev_top_zap != 0) {
+                       ASSERT(vd == vd->vdev_top);
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
+                           vd->vdev_top_zap);
+               }
+
+               if (vd->vdev_resilver_deferred) {
+                       ASSERT(vd->vdev_ops->vdev_op_leaf);
+                       ASSERT(spa->spa_resilver_deferred);
+                       fnvlist_add_boolean(nv, ZPOOL_CONFIG_RESILVER_DEFER);
+               }
+       }
 
        if (getstats) {
-               vdev_stat_t vs;
-               vdev_get_stats(vd, &vs);
-               VERIFY(nvlist_add_uint64_array(nv, ZPOOL_CONFIG_STATS,
-                   (uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t)) == 0);
+               vdev_config_generate_stats(vd, nv);
+
+               root_vdev_actions_getprogress(vd, nv);
+
+               /*
+                * Note: this can be called from open context
+                * (spa_get_stats()), so we need the rwlock to prevent
+                * the mapping from being changed by condensing.
+                */
+               rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
+               if (vd->vdev_indirect_mapping != NULL) {
+                       ASSERT(vd->vdev_indirect_births != NULL);
+                       vdev_indirect_mapping_t *vim =
+                           vd->vdev_indirect_mapping;
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
+                           vdev_indirect_mapping_size(vim));
+               }
+               rw_exit(&vd->vdev_indirect_rwlock);
+               if (vd->vdev_mg != NULL &&
+                   vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
+                       /*
+                        * Compute approximately how much memory would be used
+                        * for the indirect mapping if this device were to
+                        * be removed.
+                        *
+                        * Note: If the frag metric is invalid, then not
+                        * enough metaslabs have been converted to have
+                        * histograms.
+                        */
+                       uint64_t seg_count = 0;
+                       uint64_t to_alloc = vd->vdev_stat.vs_alloc;
+
+                       /*
+                        * There are the same number of allocated segments
+                        * as free segments, so we will have at least one
+                        * entry per free segment.  However, small free
+                        * segments (smaller than vdev_removal_max_span)
+                        * will be combined with adjacent allocated segments
+                        * as a single mapping.
+                        */
+                       for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+                               if (1ULL << (i + 1) < vdev_removal_max_span) {
+                                       to_alloc +=
+                                           vd->vdev_mg->mg_histogram[i] <<
+                                           (i + 1);
+                               } else {
+                                       seg_count +=
+                                           vd->vdev_mg->mg_histogram[i];
+                               }
+                       }
+
+                       /*
+                        * The maximum length of a mapping is
+                        * zfs_remove_max_segment, so we need at least one entry
+                        * per zfs_remove_max_segment of allocated data.
+                        */
+                       seg_count += to_alloc / zfs_remove_max_segment;
+
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
+                           seg_count *
+                           sizeof (vdev_indirect_mapping_entry_phys_t));
+               }
        }
 
        if (!vd->vdev_ops->vdev_op_leaf) {
                nvlist_t **child;
-               int c;
+               int c, idx;
+
+               ASSERT(!vd->vdev_ishole);
 
                child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
                    KM_SLEEP);
 
-               for (c = 0; c < vd->vdev_children; c++)
-                       child[c] = vdev_config_generate(spa, vd->vdev_child[c],
-                           getstats, isspare, isl2cache);
+               for (c = 0, idx = 0; c < vd->vdev_children; c++) {
+                       vdev_t *cvd = vd->vdev_child[c];
+
+                       /*
+                        * If we're generating an nvlist of removing
+                        * vdevs then skip over any device which is
+                        * not being removed.
+                        */
+                       if ((flags & VDEV_CONFIG_REMOVING) &&
+                           !cvd->vdev_removing)
+                               continue;
+
+                       child[idx++] = vdev_config_generate(spa, cvd,
+                           getstats, flags);
+               }
 
-               VERIFY(nvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
-                   child, vd->vdev_children) == 0);
+               if (idx) {
+                       fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+                           child, idx);
+               }
 
-               for (c = 0; c < vd->vdev_children; c++)
+               for (c = 0; c < idx; c++)
                        nvlist_free(child[c]);
 
                kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
 
        } else {
+               const char *aux = NULL;
+
                if (vd->vdev_offline && !vd->vdev_tmpoffline)
-                       VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE,
-                           B_TRUE) == 0);
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
+               if (vd->vdev_resilver_txg != 0)
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
+                           vd->vdev_resilver_txg);
                if (vd->vdev_faulted)
-                       VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED,
-                           B_TRUE) == 0);
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
                if (vd->vdev_degraded)
-                       VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED,
-                           B_TRUE) == 0);
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
                if (vd->vdev_removed)
-                       VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED,
-                           B_TRUE) == 0);
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
                if (vd->vdev_unspare)
-                       VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE,
-                           B_TRUE) == 0);
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
+               if (vd->vdev_ishole)
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
+
+               /* Set the reason why we're FAULTED/DEGRADED. */
+               switch (vd->vdev_stat.vs_aux) {
+               case VDEV_AUX_ERR_EXCEEDED:
+                       aux = "err_exceeded";
+                       break;
+
+               case VDEV_AUX_EXTERNAL:
+                       aux = "external";
+                       break;
+               }
+
+               if (aux != NULL && !vd->vdev_tmpoffline) {
+                       fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
+               } else {
+                       /*
+                        * We're healthy - clear any previous AUX_STATE values.
+                        */
+                       if (nvlist_exists(nv, ZPOOL_CONFIG_AUX_STATE))
+                               nvlist_remove_all(nv, ZPOOL_CONFIG_AUX_STATE);
+               }
+
+               if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
+                       fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
+                           vd->vdev_orig_guid);
+               }
        }
 
        return (nv);
 }
 
+/*
+ * Generate a view of the top-level vdevs.  If we currently have holes
+ * in the namespace, then generate an array which contains a list of holey
+ * vdevs.  Additionally, add the number of top-level children that currently
+ * exist.
+ */
+void
+vdev_top_config_generate(spa_t *spa, nvlist_t *config)
+{
+       vdev_t *rvd = spa->spa_root_vdev;
+       uint64_t *array;
+       uint_t c, idx;
+
+       array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
+
+       for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
+               vdev_t *tvd = rvd->vdev_child[c];
+
+               if (tvd->vdev_ishole) {
+                       array[idx++] = c;
+               }
+       }
+
+       if (idx) {
+               VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
+                   array, idx) == 0);
+       }
+
+       VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
+           rvd->vdev_children) == 0);
+
+       kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
+}
+
+/*
+ * Returns the configuration from the label of the given vdev. For vdevs
+ * which don't have a txg value stored on their label (i.e. spares/cache)
+ * or have not been completely initialized (txg = 0) just return
+ * the configuration from the first valid label we find. Otherwise,
+ * find the most up-to-date label that does not exceed the specified
+ * 'txg' value.
+ */
 nvlist_t *
-vdev_label_read_config(vdev_t *vd)
+vdev_label_read_config(vdev_t *vd, uint64_t txg)
 {
        spa_t *spa = vd->vdev_spa;
        nvlist_t *config = NULL;
        vdev_phys_t *vp;
+       abd_t *vp_abd;
        zio_t *zio;
+       uint64_t best_txg = 0;
+       uint64_t label_txg = 0;
+       int error = 0;
        int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
            ZIO_FLAG_SPECULATIVE;
 
@@ -347,25 +766,44 @@ vdev_label_read_config(vdev_t *vd)
        if (!vdev_readable(vd))
                return (NULL);
 
-       vp = zio_buf_alloc(sizeof (vdev_phys_t));
+       vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
+       vp = abd_to_buf(vp_abd);
 
 retry:
        for (int l = 0; l < VDEV_LABELS; l++) {
+               nvlist_t *label = NULL;
 
                zio = zio_root(spa, NULL, NULL, flags);
 
-               vdev_label_read(zio, vd, l, vp,
+               vdev_label_read(zio, vd, l, vp_abd,
                    offsetof(vdev_label_t, vl_vdev_phys),
                    sizeof (vdev_phys_t), NULL, NULL, flags);
 
                if (zio_wait(zio) == 0 &&
                    nvlist_unpack(vp->vp_nvlist, sizeof (vp->vp_nvlist),
-                   &config, 0) == 0)
-                       break;
+                   &label, 0) == 0) {
+                       /*
+                        * Auxiliary vdevs won't have txg values in their
+                        * labels and newly added vdevs may not have been
+                        * completely initialized so just return the
+                        * configuration from the first valid label we
+                        * encounter.
+                        */
+                       error = nvlist_lookup_uint64(label,
+                           ZPOOL_CONFIG_POOL_TXG, &label_txg);
+                       if ((error || label_txg == 0) && !config) {
+                               config = label;
+                               break;
+                       } else if (label_txg <= txg && label_txg > best_txg) {
+                               best_txg = label_txg;
+                               nvlist_free(config);
+                               config = fnvlist_dup(label);
+                       }
+               }
 
-               if (config != NULL) {
-                       nvlist_free(config);
-                       config = NULL;
+               if (label != NULL) {
+                       nvlist_free(label);
+                       label = NULL;
                }
        }
 
@@ -374,7 +812,16 @@ retry:
                goto retry;
        }
 
-       zio_buf_free(vp, sizeof (vdev_phys_t));
+       /*
+        * We found a valid label but it didn't pass txg restrictions.
+        */
+       if (config == NULL && label_txg != 0) {
+               vdev_dbgmsg(vd, "label discarded as txg is too large "
+                   "(%llu > %llu)", (u_longlong_t)label_txg,
+                   (u_longlong_t)txg);
+       }
+
+       abd_free(vp_abd);
 
        return (config);
 }
@@ -400,7 +847,7 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
        /*
         * Read the label, if any, and perform some basic sanity checks.
         */
-       if ((label = vdev_label_read_config(vd)) == NULL)
+       if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
                return (B_FALSE);
 
        (void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
@@ -468,6 +915,8 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
 
                case VDEV_LABEL_SPARE:
                        return (spa_has_spare(spa, device_guid));
+               default:
+                       break;
                }
        }
 
@@ -477,6 +926,16 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
        if (spa_l2cache_exists(device_guid, NULL))
                return (B_TRUE);
 
+       /*
+        * We can't rely on a pool's state if it's been imported
+        * read-only.  Instead we look to see if the pools is marked
+        * read-only in the namespace and set the state to active.
+        */
+       if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
+           (spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
+           spa_mode(spa) == FREAD)
+               state = POOL_STATE_ACTIVE;
+
        /*
         * If the device is marked ACTIVE, then this device is in use by another
         * pool on the system.
@@ -498,13 +957,15 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
        spa_t *spa = vd->vdev_spa;
        nvlist_t *label;
        vdev_phys_t *vp;
-       char *pad2;
+       abd_t *vp_abd;
+       abd_t *pad2;
        uberblock_t *ub;
+       abd_t *ub_abd;
        zio_t *zio;
        char *buf;
        size_t buflen;
        int error;
-       uint64_t spare_guid, l2cache_guid;
+       uint64_t spare_guid = 0, l2cache_guid = 0;
        int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
 
        ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
@@ -514,21 +975,24 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
                    crtxg, reason)) != 0)
                        return (error);
 
-       if (!vd->vdev_ops->vdev_op_leaf)
+       /* Track the creation time for this vdev */
+       vd->vdev_crtxg = crtxg;
+
+       if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
                return (0);
 
        /*
         * Dead vdevs cannot be initialized.
         */
        if (vdev_is_dead(vd))
-               return (EIO);
+               return (SET_ERROR(EIO));
 
        /*
         * Determine if the vdev is in use.
         */
-       if (reason != VDEV_LABEL_REMOVE &&
+       if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
            vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
-               return (EBUSY);
+               return (SET_ERROR(EBUSY));
 
        /*
         * If this is a request to add or replace a spare or l2cache device
@@ -552,7 +1016,8 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
                 */
                if (reason == VDEV_LABEL_SPARE)
                        return (0);
-               ASSERT(reason == VDEV_LABEL_REPLACE);
+               ASSERT(reason == VDEV_LABEL_REPLACE ||
+                   reason == VDEV_LABEL_SPLIT);
        }
 
        if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
@@ -577,8 +1042,9 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
        /*
         * Initialize its label.
         */
-       vp = zio_buf_alloc(sizeof (vdev_phys_t));
-       bzero(vp, sizeof (vdev_phys_t));
+       vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
+       abd_zero(vp_abd, sizeof (vdev_phys_t));
+       vp = abd_to_buf(vp_abd);
 
        /*
         * Generate a label describing the pool and our top-level vdev.
@@ -617,7 +1083,11 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
                VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
                    vd->vdev_guid) == 0);
        } else {
-               label = spa_config_generate(spa, vd, 0ULL, B_FALSE);
+               uint64_t txg = 0ULL;
+
+               if (reason == VDEV_LABEL_SPLIT)
+                       txg = spa->spa_uberblock.ub_txg;
+               label = spa_config_generate(spa, vd, txg, B_FALSE);
 
                /*
                 * Add our creation time.  This allows us to detect multiple
@@ -634,22 +1104,23 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
        error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
        if (error != 0) {
                nvlist_free(label);
-               zio_buf_free(vp, sizeof (vdev_phys_t));
+               abd_free(vp_abd);
                /* EFAULT means nvlist_pack ran out of room */
-               return (error == EFAULT ? ENAMETOOLONG : EINVAL);
+               return (SET_ERROR(error == EFAULT ? ENAMETOOLONG : EINVAL));
        }
 
        /*
         * Initialize uberblock template.
         */
-       ub = zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd));
-       bzero(ub, VDEV_UBERBLOCK_SIZE(vd));
-       *ub = spa->spa_uberblock;
+       ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
+       abd_zero(ub_abd, VDEV_UBERBLOCK_RING);
+       abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
+       ub = abd_to_buf(ub_abd);
        ub->ub_txg = 0;
 
        /* Initialize the 2nd padding area. */
-       pad2 = zio_buf_alloc(VDEV_PAD_SIZE);
-       bzero(pad2, VDEV_PAD_SIZE);
+       pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
+       abd_zero(pad2, VDEV_PAD_SIZE);
 
        /*
         * Write everything in parallel.
@@ -659,7 +1130,7 @@ retry:
 
        for (int l = 0; l < VDEV_LABELS; l++) {
 
-               vdev_label_write(zio, vd, l, vp,
+               vdev_label_write(zio, vd, l, vp_abd,
                    offsetof(vdev_label_t, vl_vdev_phys),
                    sizeof (vdev_phys_t), NULL, NULL, flags);
 
@@ -672,11 +1143,9 @@ retry:
                    offsetof(vdev_label_t, vl_pad2),
                    VDEV_PAD_SIZE, NULL, NULL, flags);
 
-               for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
-                       vdev_label_write(zio, vd, l, ub,
-                           VDEV_UBERBLOCK_OFFSET(vd, n),
-                           VDEV_UBERBLOCK_SIZE(vd), NULL, NULL, flags);
-               }
+               vdev_label_write(zio, vd, l, ub_abd,
+                   offsetof(vdev_label_t, vl_uberblock),
+                   VDEV_UBERBLOCK_RING, NULL, NULL, flags);
        }
 
        error = zio_wait(zio);
@@ -687,9 +1156,9 @@ retry:
        }
 
        nvlist_free(label);
-       zio_buf_free(pad2, VDEV_PAD_SIZE);
-       zio_buf_free(ub, VDEV_UBERBLOCK_SIZE(vd));
-       zio_buf_free(vp, sizeof (vdev_phys_t));
+       abd_free(pad2);
+       abd_free(ub_abd);
+       abd_free(vp_abd);
 
        /*
         * If this vdev hasn't been previously identified as a spare, then we
@@ -716,93 +1185,210 @@ retry:
  * ==========================================================================
  */
 
-/*
- * For use by zdb and debugging purposes only
- */
-uint64_t ub_max_txg = UINT64_MAX;
-
 /*
  * Consider the following situation: txg is safely synced to disk.  We've
  * written the first uberblock for txg + 1, and then we lose power.  When we
  * come back up, we fail to see the uberblock for txg + 1 because, say,
  * it was on a mirrored device and the replica to which we wrote txg + 1
  * is now offline.  If we then make some changes and sync txg + 1, and then
- * the missing replica comes back, then for a new seconds we'll have two
+ * the missing replica comes back, then for a few seconds we'll have two
  * conflicting uberblocks on disk with the same txg.  The solution is simple:
  * among uberblocks with equal txg, choose the one with the latest timestamp.
  */
 static int
-vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
+vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
 {
-       if (ub1->ub_txg < ub2->ub_txg)
-               return (-1);
-       if (ub1->ub_txg > ub2->ub_txg)
-               return (1);
+       int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg);
 
-       if (ub1->ub_timestamp < ub2->ub_timestamp)
-               return (-1);
-       if (ub1->ub_timestamp > ub2->ub_timestamp)
-               return (1);
+       if (likely(cmp))
+               return (cmp);
 
-       return (0);
+       cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
+       if (likely(cmp))
+               return (cmp);
+
+       /*
+        * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
+        * ZFS, e.g. zfsonlinux >= 0.7.
+        *
+        * If one ub has MMP and the other does not, they were written by
+        * different hosts, which matters for MMP.  So we treat no MMP/no SEQ as
+        * a 0 value.
+        *
+        * Since timestamp and txg are the same if we get this far, either is
+        * acceptable for importing the pool.
+        */
+       unsigned int seq1 = 0;
+       unsigned int seq2 = 0;
+
+       if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
+               seq1 = MMP_SEQ(ub1);
+
+       if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
+               seq2 = MMP_SEQ(ub2);
+
+       return (AVL_CMP(seq1, seq2));
 }
 
+struct ubl_cbdata {
+       uberblock_t     *ubl_ubbest;    /* Best uberblock */
+       vdev_t          *ubl_vd;        /* vdev associated with the above */
+};
+
 static void
 vdev_uberblock_load_done(zio_t *zio)
 {
+       vdev_t *vd = zio->io_vd;
+       spa_t *spa = zio->io_spa;
        zio_t *rio = zio->io_private;
-       uberblock_t *ub = zio->io_data;
-       uberblock_t *ubbest = rio->io_private;
+       uberblock_t *ub = abd_to_buf(zio->io_abd);
+       struct ubl_cbdata *cbp = rio->io_private;
 
-       ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(zio->io_vd));
+       ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
 
        if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
                mutex_enter(&rio->io_lock);
-               if (ub->ub_txg <= ub_max_txg &&
-                   vdev_uberblock_compare(ub, ubbest) > 0)
-                       *ubbest = *ub;
+               if (ub->ub_txg <= spa->spa_load_max_txg &&
+                   vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
+                       /*
+                        * Keep track of the vdev in which this uberblock
+                        * was found. We will use this information later
+                        * to obtain the config nvlist associated with
+                        * this uberblock.
+                        */
+                       *cbp->ubl_ubbest = *ub;
+                       cbp->ubl_vd = vd;
+               }
                mutex_exit(&rio->io_lock);
        }
 
-       zio_buf_free(zio->io_data, zio->io_size);
+       abd_free(zio->io_abd);
 }
 
-void
-vdev_uberblock_load(zio_t *zio, vdev_t *vd, uberblock_t *ubbest)
+static void
+vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
+    struct ubl_cbdata *cbp)
 {
-       spa_t *spa = vd->vdev_spa;
-       vdev_t *rvd = spa->spa_root_vdev;
-       int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
-           ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
-
-       if (vd == rvd) {
-               ASSERT(zio == NULL);
-               spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
-               zio = zio_root(spa, NULL, ubbest, flags);
-               bzero(ubbest, sizeof (uberblock_t));
-       }
-
-       ASSERT(zio != NULL);
-
        for (int c = 0; c < vd->vdev_children; c++)
-               vdev_uberblock_load(zio, vd->vdev_child[c], ubbest);
+               vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
 
        if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
                for (int l = 0; l < VDEV_LABELS; l++) {
                        for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
                                vdev_label_read(zio, vd, l,
-                                   zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd)),
-                                   VDEV_UBERBLOCK_OFFSET(vd, n),
+                                   abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
+                                   B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
                                    VDEV_UBERBLOCK_SIZE(vd),
                                    vdev_uberblock_load_done, zio, flags);
                        }
                }
        }
+}
 
-       if (vd == rvd) {
-               (void) zio_wait(zio);
-               spa_config_exit(spa, SCL_ALL, FTAG);
+/*
+ * Reads the 'best' uberblock from disk along with its associated
+ * configuration. First, we read the uberblock array of each label of each
+ * vdev, keeping track of the uberblock with the highest txg in each array.
+ * Then, we read the configuration from the same vdev as the best uberblock.
+ */
+void
+vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
+{
+       zio_t *zio;
+       spa_t *spa = rvd->vdev_spa;
+       struct ubl_cbdata cb;
+       int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
+           ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
+
+       ASSERT(ub);
+       ASSERT(config);
+
+       bzero(ub, sizeof (uberblock_t));
+       *config = NULL;
+
+       cb.ubl_ubbest = ub;
+       cb.ubl_vd = NULL;
+
+       spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+       zio = zio_root(spa, NULL, &cb, flags);
+       vdev_uberblock_load_impl(zio, rvd, flags, &cb);
+       (void) zio_wait(zio);
+
+       /*
+        * It's possible that the best uberblock was discovered on a label
+        * that has a configuration which was written in a future txg.
+        * Search all labels on this vdev to find the configuration that
+        * matches the txg for our uberblock.
+        */
+       if (cb.ubl_vd != NULL) {
+               vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
+                   "txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
+
+               *config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
+               if (*config == NULL && spa->spa_extreme_rewind) {
+                       vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
+                           "Trying again without txg restrictions.");
+                       *config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
+               }
+               if (*config == NULL) {
+                       vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
+               }
        }
+       spa_config_exit(spa, SCL_ALL, FTAG);
+}
+
+/*
+ * For use when a leaf vdev is expanded.
+ * The location of labels 2 and 3 changed, and at the new location the
+ * uberblock rings are either empty or contain garbage.  The sync will write
+ * new configs there because the vdev is dirty, but expansion also needs the
+ * uberblock rings copied.  Read them from label 0 which did not move.
+ *
+ * Since the point is to populate labels {2,3} with valid uberblocks,
+ * we zero uberblocks we fail to read or which are not valid.
+ */
+
+static void
+vdev_copy_uberblocks(vdev_t *vd)
+{
+       abd_t *ub_abd;
+       zio_t *write_zio;
+       int locks = (SCL_L2ARC | SCL_ZIO);
+       int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
+           ZIO_FLAG_SPECULATIVE;
+
+       ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_READER) ==
+           SCL_STATE);
+       ASSERT(vd->vdev_ops->vdev_op_leaf);
+
+       spa_config_enter(vd->vdev_spa, locks, FTAG, RW_READER);
+
+       ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
+
+       write_zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
+       for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
+               const int src_label = 0;
+               zio_t *zio;
+
+               zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
+               vdev_label_read(zio, vd, src_label, ub_abd,
+                   VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
+                   NULL, NULL, flags);
+
+               if (zio_wait(zio) || uberblock_verify(abd_to_buf(ub_abd)))
+                       abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
+
+               for (int l = 2; l < VDEV_LABELS; l++)
+                       vdev_label_write(write_zio, vd, l, ub_abd,
+                           VDEV_UBERBLOCK_OFFSET(vd, n),
+                           VDEV_UBERBLOCK_SIZE(vd), NULL, NULL,
+                           flags | ZIO_FLAG_DONT_PROPAGATE);
+       }
+       (void) zio_wait(write_zio);
+
+       spa_config_exit(vd->vdev_spa, locks, FTAG);
+
+       abd_free(ub_abd);
 }
 
 /*
@@ -815,20 +1401,20 @@ vdev_uberblock_sync_done(zio_t *zio)
        uint64_t *good_writes = zio->io_private;
 
        if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
-               atomic_add_64(good_writes, 1);
+               atomic_inc_64(good_writes);
 }
 
 /*
  * Write the uberblock to all labels of all leaves of the specified vdev.
  */
 static void
-vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
+vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
+    uberblock_t *ub, vdev_t *vd, int flags)
 {
-       uberblock_t *ubbuf;
-       int n;
-
-       for (int c = 0; c < vd->vdev_children; c++)
-               vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags);
+       for (uint64_t c = 0; c < vd->vdev_children; c++) {
+               vdev_uberblock_sync(zio, good_writes,
+                   ub, vd->vdev_child[c], flags);
+       }
 
        if (!vd->vdev_ops->vdev_op_leaf)
                return;
@@ -836,21 +1422,31 @@ vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
        if (!vdev_writeable(vd))
                return;
 
-       n = ub->ub_txg & (VDEV_UBERBLOCK_COUNT(vd) - 1);
+       /* If the vdev was expanded, need to copy uberblock rings. */
+       if (vd->vdev_state == VDEV_STATE_HEALTHY &&
+           vd->vdev_copy_uberblocks == B_TRUE) {
+               vdev_copy_uberblocks(vd);
+               vd->vdev_copy_uberblocks = B_FALSE;
+       }
+
+       int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
+       int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
 
-       ubbuf = zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd));
-       bzero(ubbuf, VDEV_UBERBLOCK_SIZE(vd));
-       *ubbuf = *ub;
+       /* Copy the uberblock_t into the ABD */
+       abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
+       abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
+       abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
 
        for (int l = 0; l < VDEV_LABELS; l++)
-               vdev_label_write(zio, vd, l, ubbuf,
+               vdev_label_write(zio, vd, l, ub_abd,
                    VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
-                   vdev_uberblock_sync_done, zio->io_private,
+                   vdev_uberblock_sync_done, good_writes,
                    flags | ZIO_FLAG_DONT_PROPAGATE);
 
-       zio_buf_free(ubbuf, VDEV_UBERBLOCK_SIZE(vd));
+       abd_free(ub_abd);
 }
 
+/* Sync the uberblocks to all vdevs in svd[] */
 int
 vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
 {
@@ -858,10 +1454,10 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
        zio_t *zio;
        uint64_t good_writes = 0;
 
-       zio = zio_root(spa, NULL, &good_writes, flags);
+       zio = zio_root(spa, NULL, NULL, flags);
 
        for (int v = 0; v < svdcount; v++)
-               vdev_uberblock_sync(zio, ub, svd[v], flags);
+               vdev_uberblock_sync(zio, &good_writes, ub, svd[v], flags);
 
        (void) zio_wait(zio);
 
@@ -872,8 +1468,11 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
         */
        zio = zio_root(spa, NULL, NULL, flags);
 
-       for (int v = 0; v < svdcount; v++)
-               zio_flush(zio, svd[v]);
+       for (int v = 0; v < svdcount; v++) {
+               if (vdev_writeable(svd[v])) {
+                       zio_flush(zio, svd[v]);
+               }
+       }
 
        (void) zio_wait(zio);
 
@@ -889,7 +1488,7 @@ vdev_label_sync_done(zio_t *zio)
        uint64_t *good_writes = zio->io_private;
 
        if (zio->io_error == 0)
-               atomic_add_64(good_writes, 1);
+               atomic_inc_64(good_writes);
 }
 
 /*
@@ -901,7 +1500,7 @@ vdev_label_sync_top_done(zio_t *zio)
        uint64_t *good_writes = zio->io_private;
 
        if (*good_writes == 0)
-               zio->io_error = EIO;
+               zio->io_error = SET_ERROR(EIO);
 
        kmem_free(good_writes, sizeof (uint64_t));
 }
@@ -919,15 +1518,19 @@ vdev_label_sync_ignore_done(zio_t *zio)
  * Write all even or odd labels to all leaves of the specified vdev.
  */
 static void
-vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags)
+vdev_label_sync(zio_t *zio, uint64_t *good_writes,
+    vdev_t *vd, int l, uint64_t txg, int flags)
 {
        nvlist_t *label;
        vdev_phys_t *vp;
+       abd_t *vp_abd;
        char *buf;
        size_t buflen;
 
-       for (int c = 0; c < vd->vdev_children; c++)
-               vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags);
+       for (int c = 0; c < vd->vdev_children; c++) {
+               vdev_label_sync(zio, good_writes,
+                   vd->vdev_child[c], l, txg, flags);
+       }
 
        if (!vd->vdev_ops->vdev_op_leaf)
                return;
@@ -940,23 +1543,24 @@ vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags)
         */
        label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
 
-       vp = zio_buf_alloc(sizeof (vdev_phys_t));
-       bzero(vp, sizeof (vdev_phys_t));
+       vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
+       abd_zero(vp_abd, sizeof (vdev_phys_t));
+       vp = abd_to_buf(vp_abd);
 
        buf = vp->vp_nvlist;
        buflen = sizeof (vp->vp_nvlist);
 
-       if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0) {
+       if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP)) {
                for (; l < VDEV_LABELS; l += 2) {
-                       vdev_label_write(zio, vd, l, vp,
+                       vdev_label_write(zio, vd, l, vp_abd,
                            offsetof(vdev_label_t, vl_vdev_phys),
                            sizeof (vdev_phys_t),
-                           vdev_label_sync_done, zio->io_private,
+                           vdev_label_sync_done, good_writes,
                            flags | ZIO_FLAG_DONT_PROPAGATE);
                }
        }
 
-       zio_buf_free(vp, sizeof (vdev_phys_t));
+       abd_free(vp_abd);
        nvlist_free(label);
 }
 
@@ -974,13 +1578,16 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
        zio = zio_root(spa, NULL, NULL, flags);
 
        for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
-               uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t),
-                   KM_SLEEP);
+               uint64_t *good_writes;
+
+               ASSERT(!vd->vdev_ishole);
+
+               good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
                zio_t *vio = zio_null(zio, spa, NULL,
                    (vd->vdev_islog || vd->vdev_aux != NULL) ?
                    vdev_label_sync_ignore_done : vdev_label_sync_top_done,
                    good_writes, flags);
-               vdev_label_sync(vio, vd, l, txg, flags);
+               vdev_label_sync(vio, good_writes, vd, l, txg, flags);
                zio_nowait(vio);
        }
 
@@ -1011,15 +1618,15 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
  * at any time, you can just call it again, and it will resume its work.
  */
 int
-vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard)
+vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
 {
        spa_t *spa = svd[0]->vdev_spa;
        uberblock_t *ub = &spa->spa_uberblock;
-       vdev_t *vd;
-       zio_t *zio;
-       int error;
+       int error = 0;
        int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
 
+       ASSERT(svdcount != 0);
+retry:
        /*
         * Normally, we don't want to try too hard to write every label and
         * uberblock.  If there is a flaky disk, we don't want the rest of the
@@ -1027,8 +1634,11 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard)
         * single label out, we should retry with ZIO_FLAG_TRYHARD before
         * bailing out and declaring the pool faulted.
         */
-       if (tryhard)
+       if (error != 0) {
+               if ((flags & ZIO_FLAG_TRYHARD) != 0)
+                       return (error);
                flags |= ZIO_FLAG_TRYHARD;
+       }
 
        ASSERT(ub->ub_txg <= txg);
 
@@ -1038,10 +1648,13 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard)
         * and the vdev configuration hasn't changed,
         * then there's nothing to do.
         */
-       if (ub->ub_txg < txg &&
-           uberblock_update(ub, spa->spa_root_vdev, txg) == B_FALSE &&
-           list_is_empty(&spa->spa_config_dirty_list))
-               return (0);
+       if (ub->ub_txg < txg) {
+               boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
+                   txg, spa->spa_mmp.mmp_delay);
+
+               if (!changed && list_is_empty(&spa->spa_config_dirty_list))
+                       return (0);
+       }
 
        if (txg > spa_freeze_txg(spa))
                return (0);
@@ -1054,9 +1667,10 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard)
         * written in this txg will be committed to stable storage
         * before any uberblock that references them.
         */
-       zio = zio_root(spa, NULL, NULL, flags);
+       zio_t *zio = zio_root(spa, NULL, NULL, flags);
 
-       for (vd = txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd;
+       for (vdev_t *vd =
+           txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL;
            vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
                zio_flush(zio, vd);
 
@@ -1071,8 +1685,14 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard)
         * the new labels to disk to ensure that all even-label updates
         * are committed to stable storage before the uberblock update.
         */
-       if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0)
-               return (error);
+       if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) {
+               if ((flags & ZIO_FLAG_TRYHARD) != 0) {
+                       zfs_dbgmsg("vdev_label_sync_list() returned error %d "
+                           "for pool '%s' when syncing out the even labels "
+                           "of dirty vdevs", error, spa_name(spa));
+               }
+               goto retry;
+       }
 
        /*
         * Sync the uberblocks to all vdevs in svd[].
@@ -1089,8 +1709,16 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard)
         *      been successfully committed) will be valid with respect
         *      to the new uberblocks.
         */
-       if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0)
-               return (error);
+       if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) {
+               if ((flags & ZIO_FLAG_TRYHARD) != 0) {
+                       zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
+                           "%d for pool '%s'", error, spa_name(spa));
+               }
+               goto retry;
+       }
+
+       if (spa_multihost(spa))
+               mmp_update_uberblock(spa, ub);
 
        /*
         * Sync out odd labels for every dirty vdev.  If the system dies
@@ -1102,5 +1730,14 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard)
         * to disk to ensure that all odd-label updates are committed to
         * stable storage before the next transaction group begins.
         */
-       return (vdev_label_sync_list(spa, 1, txg, flags));
+       if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) {
+               if ((flags & ZIO_FLAG_TRYHARD) != 0) {
+                       zfs_dbgmsg("vdev_label_sync_list() returned error %d "
+                           "for pool '%s' when syncing out the odd labels of "
+                           "dirty vdevs", error, spa_name(spa));
+               }
+               goto retry;
+       }
+
+       return (0);
 }