]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/space_map.c
Get rid of space_map_update() for ms_synced_length
[mirror_zfs.git] / module / zfs / space_map.c
index d84dd7583592e630fdc0b5f3ede7a2b225351b22..5cf3feaae108672fb8f2e05fc21965d27fce8fd7 100644 (file)
@@ -23,7 +23,7 @@
  * Use is subject to license terms.
  */
 /*
- * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
  */
 
 #include <sys/zfs_context.h>
 #include <sys/zfeature.h>
 
 /*
+ * Note on space map block size:
+ *
  * The data for a given space map can be kept on blocks of any size.
- * Larger blocks entail fewer i/o operations, but they also cause the
- * DMU to keep more data in-core, and also to waste more i/o bandwidth
+ * Larger blocks entail fewer I/O operations, but they also cause the
+ * DMU to keep more data in-core, and also to waste more I/O bandwidth
  * when only a few blocks have changed since the last transaction group.
  */
-int space_map_blksz = (1 << 12);
+
+/*
+ * Enabled whenever we want to stress test the use of double-word
+ * space map entries.
+ */
+boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
+
+/*
+ * Override the default indirect block size of 128K, instead use 16K for
+ * spacemaps (2^14 bytes).  This dramatically reduces write inflation since
+ * appending to a spacemap typically has to write one data block (4KB) and one
+ * or two indirect blocks (16K-32K, rather than 128K).
+ */
+int space_map_ibs = 14;
+
+boolean_t
+sm_entry_is_debug(uint64_t e)
+{
+       return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
+}
+
+boolean_t
+sm_entry_is_single_word(uint64_t e)
+{
+       uint8_t prefix = SM_PREFIX_DECODE(e);
+       return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
+}
+
+boolean_t
+sm_entry_is_double_word(uint64_t e)
+{
+       return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
+}
 
 /*
  * Iterate through the space map, invoking the callback on each (non-debug)
- * space map entry.
+ * space map entry. Stop after reading 'end' bytes of the space map.
  */
 int
-space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
+space_map_iterate(space_map_t *sm, uint64_t end, sm_cb_t callback, void *arg)
 {
-       uint64_t *entry, *entry_map, *entry_map_end;
-       uint64_t bufsize, size, offset, end;
+       uint64_t blksz = sm->sm_blksz;
+
+       ASSERT3U(blksz, !=, 0);
+       ASSERT3U(end, <=, space_map_length(sm));
+       ASSERT0(P2PHASE(end, sizeof (uint64_t)));
+
+       dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, end,
+           ZIO_PRIORITY_SYNC_READ);
+
        int error = 0;
+       for (uint64_t block_base = 0; block_base < end && error == 0;
+           block_base += blksz) {
+               dmu_buf_t *db;
+               error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
+                   block_base, FTAG, &db, DMU_READ_PREFETCH);
+               if (error != 0)
+                       return (error);
+
+               uint64_t *block_start = db->db_data;
+               uint64_t block_length = MIN(end - block_base, blksz);
+               uint64_t *block_end = block_start +
+                   (block_length / sizeof (uint64_t));
+
+               VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
+               VERIFY3U(block_length, !=, 0);
+               ASSERT3U(blksz, ==, db->db_size);
+
+               for (uint64_t *block_cursor = block_start;
+                   block_cursor < block_end && error == 0; block_cursor++) {
+                       uint64_t e = *block_cursor;
+
+                       if (sm_entry_is_debug(e)) /* Skip debug entries */
+                               continue;
+
+                       uint64_t raw_offset, raw_run, vdev_id;
+                       maptype_t type;
+                       if (sm_entry_is_single_word(e)) {
+                               type = SM_TYPE_DECODE(e);
+                               vdev_id = SM_NO_VDEVID;
+                               raw_offset = SM_OFFSET_DECODE(e);
+                               raw_run = SM_RUN_DECODE(e);
+                       } else {
+                               /* it is a two-word entry */
+                               ASSERT(sm_entry_is_double_word(e));
+                               raw_run = SM2_RUN_DECODE(e);
+                               vdev_id = SM2_VDEV_DECODE(e);
+
+                               /* move on to the second word */
+                               block_cursor++;
+                               e = *block_cursor;
+                               VERIFY3P(block_cursor, <=, block_end);
+
+                               type = SM2_TYPE_DECODE(e);
+                               raw_offset = SM2_OFFSET_DECODE(e);
+                       }
 
-       end = space_map_length(sm);
+                       uint64_t entry_offset = (raw_offset << sm->sm_shift) +
+                           sm->sm_start;
+                       uint64_t entry_run = raw_run << sm->sm_shift;
+
+                       VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
+                       VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
+                       ASSERT3U(entry_offset, >=, sm->sm_start);
+                       ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
+                       ASSERT3U(entry_run, <=, sm->sm_size);
+                       ASSERT3U(entry_offset + entry_run, <=,
+                           sm->sm_start + sm->sm_size);
+
+                       space_map_entry_t sme = {
+                           .sme_type = type,
+                           .sme_vdev = vdev_id,
+                           .sme_offset = entry_offset,
+                           .sme_run = entry_run
+                       };
+                       error = callback(&sme, arg);
+               }
+               dmu_buf_rele(db, FTAG);
+       }
+       return (error);
+}
 
-       bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
-       entry_map = vmem_alloc(bufsize, KM_SLEEP);
+/*
+ * Reads the entries from the last block of the space map into
+ * buf in reverse order. Populates nwords with number of words
+ * in the last block.
+ *
+ * Refer to block comment within space_map_incremental_destroy()
+ * to understand why this function is needed.
+ */
+static int
+space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
+    uint64_t bufsz, uint64_t *nwords)
+{
+       int error = 0;
+       dmu_buf_t *db;
 
-       if (end > bufsize) {
-               dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
-                   end - bufsize, ZIO_PRIORITY_SYNC_READ);
+       /*
+        * Find the offset of the last word in the space map and use
+        * that to read the last block of the space map with
+        * dmu_buf_hold().
+        */
+       uint64_t last_word_offset =
+           sm->sm_phys->smp_length - sizeof (uint64_t);
+       error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
+           FTAG, &db, DMU_READ_NO_PREFETCH);
+       if (error != 0)
+               return (error);
+
+       ASSERT3U(sm->sm_object, ==, db->db_object);
+       ASSERT3U(sm->sm_blksz, ==, db->db_size);
+       ASSERT3U(bufsz, >=, db->db_size);
+       ASSERT(nwords != NULL);
+
+       uint64_t *words = db->db_data;
+       *nwords =
+           (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
+
+       ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
+
+       uint64_t n = *nwords;
+       uint64_t j = n - 1;
+       for (uint64_t i = 0; i < n; i++) {
+               uint64_t entry = words[i];
+               if (sm_entry_is_double_word(entry)) {
+                       /*
+                        * Since we are populating the buffer backwards
+                        * we have to be extra careful and add the two
+                        * words of the double-word entry in the right
+                        * order.
+                        */
+                       ASSERT3U(j, >, 0);
+                       buf[j - 1] = entry;
+
+                       i++;
+                       ASSERT3U(i, <, n);
+                       entry = words[i];
+                       buf[j] = entry;
+                       j -= 2;
+               } else {
+                       ASSERT(sm_entry_is_debug(entry) ||
+                           sm_entry_is_single_word(entry));
+                       buf[j] = entry;
+                       j--;
+               }
        }
 
-       for (offset = 0; offset < end && error == 0; offset += bufsize) {
-               size = MIN(end - offset, bufsize);
-               VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
-               VERIFY(size != 0);
-               ASSERT3U(sm->sm_blksz, !=, 0);
+       /*
+        * Assert that we wrote backwards all the
+        * way to the beginning of the buffer.
+        */
+       ASSERT3S(j, ==, -1);
+
+       dmu_buf_rele(db, FTAG);
+       return (error);
+}
+
+/*
+ * Note: This function performs destructive actions - specifically
+ * it deletes entries from the end of the space map. Thus, callers
+ * should ensure that they are holding the appropriate locks for
+ * the space map that they provide.
+ */
+int
+space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
+    dmu_tx_t *tx)
+{
+       uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
+       uint64_t *buf = zio_buf_alloc(bufsz);
 
-               dprintf("object=%llu  offset=%llx  size=%llx\n",
-                   space_map_object(sm), offset, size);
+       dmu_buf_will_dirty(sm->sm_dbuf, tx);
 
-               error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
-                   entry_map, DMU_READ_PREFETCH);
+       /*
+        * Ideally we would want to iterate from the beginning of the
+        * space map to the end in incremental steps. The issue with this
+        * approach is that we don't have any field on-disk that points
+        * us where to start between each step. We could try zeroing out
+        * entries that we've destroyed, but this doesn't work either as
+        * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
+        *
+        * As a result, we destroy its entries incrementally starting from
+        * the end after applying the callback to each of them.
+        *
+        * The problem with this approach is that we cannot literally
+        * iterate through the words in the space map backwards as we
+        * can't distinguish two-word space map entries from their second
+        * word. Thus we do the following:
+        *
+        * 1] We get all the entries from the last block of the space map
+        *    and put them into a buffer in reverse order. This way the
+        *    last entry comes first in the buffer, the second to last is
+        *    second, etc.
+        * 2] We iterate through the entries in the buffer and we apply
+        *    the callback to each one. As we move from entry to entry we
+        *    we decrease the size of the space map, deleting effectively
+        *    each entry.
+        * 3] If there are no more entries in the space map or the callback
+        *    returns a value other than 0, we stop iterating over the
+        *    space map. If there are entries remaining and the callback
+        *    returned 0, we go back to step [1].
+        */
+       int error = 0;
+       while (space_map_length(sm) > 0 && error == 0) {
+               uint64_t nwords = 0;
+               error = space_map_reversed_last_block_entries(sm, buf, bufsz,
+                   &nwords);
                if (error != 0)
                        break;
 
-               entry_map_end = entry_map + (size / sizeof (uint64_t));
-               for (entry = entry_map; entry < entry_map_end && error == 0;
-                   entry++) {
-                       uint64_t e = *entry;
-                       uint64_t offset, size;
+               ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
 
-                       if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
+               for (uint64_t i = 0; i < nwords; i++) {
+                       uint64_t e = buf[i];
+
+                       if (sm_entry_is_debug(e)) {
+                               sm->sm_phys->smp_length -= sizeof (uint64_t);
                                continue;
+                       }
 
-                       offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
-                           sm->sm_start;
-                       size = SM_RUN_DECODE(e) << sm->sm_shift;
+                       int words = 1;
+                       uint64_t raw_offset, raw_run, vdev_id;
+                       maptype_t type;
+                       if (sm_entry_is_single_word(e)) {
+                               type = SM_TYPE_DECODE(e);
+                               vdev_id = SM_NO_VDEVID;
+                               raw_offset = SM_OFFSET_DECODE(e);
+                               raw_run = SM_RUN_DECODE(e);
+                       } else {
+                               ASSERT(sm_entry_is_double_word(e));
+                               words = 2;
+
+                               raw_run = SM2_RUN_DECODE(e);
+                               vdev_id = SM2_VDEV_DECODE(e);
+
+                               /* move to the second word */
+                               i++;
+                               e = buf[i];
+
+                               ASSERT3P(i, <=, nwords);
+
+                               type = SM2_TYPE_DECODE(e);
+                               raw_offset = SM2_OFFSET_DECODE(e);
+                       }
 
-                       VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
-                       VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
-                       VERIFY3U(offset, >=, sm->sm_start);
-                       VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
-                       error = callback(SM_TYPE_DECODE(e), offset, size, arg);
+                       uint64_t entry_offset =
+                           (raw_offset << sm->sm_shift) + sm->sm_start;
+                       uint64_t entry_run = raw_run << sm->sm_shift;
+
+                       VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
+                       VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
+                       VERIFY3U(entry_offset, >=, sm->sm_start);
+                       VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
+                       VERIFY3U(entry_run, <=, sm->sm_size);
+                       VERIFY3U(entry_offset + entry_run, <=,
+                           sm->sm_start + sm->sm_size);
+
+                       space_map_entry_t sme = {
+                           .sme_type = type,
+                           .sme_vdev = vdev_id,
+                           .sme_offset = entry_offset,
+                           .sme_run = entry_run
+                       };
+                       error = callback(&sme, arg);
+                       if (error != 0)
+                               break;
+
+                       if (type == SM_ALLOC)
+                               sm->sm_phys->smp_alloc -= entry_run;
+                       else
+                               sm->sm_phys->smp_alloc += entry_run;
+                       sm->sm_phys->smp_length -= words * sizeof (uint64_t);
                }
        }
 
-       vmem_free(entry_map, bufsize);
+       if (space_map_length(sm) == 0) {
+               ASSERT0(error);
+               ASSERT0(space_map_allocated(sm));
+       }
+
+       zio_buf_free(buf, bufsz);
        return (error);
 }
 
@@ -112,54 +375,57 @@ typedef struct space_map_load_arg {
 } space_map_load_arg_t;
 
 static int
-space_map_load_callback(maptype_t type, uint64_t offset, uint64_t size,
-    void *arg)
+space_map_load_callback(space_map_entry_t *sme, void *arg)
 {
        space_map_load_arg_t *smla = arg;
-       if (type == smla->smla_type) {
-               VERIFY3U(range_tree_space(smla->smla_rt) + size, <=,
+       if (sme->sme_type == smla->smla_type) {
+               VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
                    smla->smla_sm->sm_size);
-               range_tree_add(smla->smla_rt, offset, size);
+               range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
        } else {
-               range_tree_remove(smla->smla_rt, offset, size);
+               range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
        }
 
        return (0);
 }
 
 /*
- * Load the space map disk into the specified range tree. Segments of maptype
- * are added to the range tree, other segment types are removed.
+ * Load the spacemap into the rangetree, like space_map_load. But only
+ * read the first 'length' bytes of the spacemap.
  */
 int
-space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
+space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
+    uint64_t length)
 {
-       uint64_t space;
-       int err;
        space_map_load_arg_t smla;
 
        VERIFY0(range_tree_space(rt));
-       space = space_map_allocated(sm);
 
-       if (maptype == SM_FREE) {
+       if (maptype == SM_FREE)
                range_tree_add(rt, sm->sm_start, sm->sm_size);
-               space = sm->sm_size - space;
-       }
 
        smla.smla_rt = rt;
        smla.smla_sm = sm;
        smla.smla_type = maptype;
-       err = space_map_iterate(sm, space_map_load_callback, &smla);
+       int err = space_map_iterate(sm, length,
+           space_map_load_callback, &smla);
 
-       if (err == 0) {
-               VERIFY3U(range_tree_space(rt), ==, space);
-       } else {
+       if (err != 0)
                range_tree_vacate(rt, NULL, NULL);
-       }
 
        return (err);
 }
 
+/*
+ * Load the space map disk into the specified range tree. Segments of maptype
+ * are added to the range tree, other segment types are removed.
+ */
+int
+space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
+{
+       return (space_map_load_length(sm, rt, maptype, space_map_length(sm)));
+}
+
 void
 space_map_histogram_clear(space_map_t *sm)
 {
@@ -233,43 +499,237 @@ space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
        }
 }
 
-uint64_t
-space_map_entries(space_map_t *sm, range_tree_t *rt)
+static void
+space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
 {
-       avl_tree_t *t = &rt->rt_root;
-       range_seg_t *rs;
-       uint64_t size, entries;
+       dmu_buf_will_dirty(sm->sm_dbuf, tx);
+
+       uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
+           SM_DEBUG_ACTION_ENCODE(maptype) |
+           SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
+           SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
+
+       dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length,
+           sizeof (dentry), &dentry, tx);
+
+       sm->sm_phys->smp_length += sizeof (dentry);
+}
+
+/*
+ * Writes one or more entries given a segment.
+ *
+ * Note: The function may release the dbuf from the pointer initially
+ * passed to it, and return a different dbuf. Also, the space map's
+ * dbuf must be dirty for the changes in sm_phys to take effect.
+ */
+static void
+space_map_write_seg(space_map_t *sm, range_seg_t *rs, maptype_t maptype,
+    uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, void *tag, dmu_tx_t *tx)
+{
+       ASSERT3U(words, !=, 0);
+       ASSERT3U(words, <=, 2);
+
+       /* ensure the vdev_id can be represented by the space map */
+       ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
+
+       /*
+        * if this is a single word entry, ensure that no vdev was
+        * specified.
+        */
+       IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
+
+       dmu_buf_t *db = *dbp;
+       ASSERT3U(db->db_size, ==, sm->sm_blksz);
+
+       uint64_t *block_base = db->db_data;
+       uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
+       uint64_t *block_cursor = block_base +
+           (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
+
+       ASSERT3P(block_cursor, <=, block_end);
+
+       uint64_t size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
+       uint64_t start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
+       uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
+
+       ASSERT3U(rs->rs_start, >=, sm->sm_start);
+       ASSERT3U(rs->rs_start, <, sm->sm_start + sm->sm_size);
+       ASSERT3U(rs->rs_end - rs->rs_start, <=, sm->sm_size);
+       ASSERT3U(rs->rs_end, <=, sm->sm_start + sm->sm_size);
+
+       while (size != 0) {
+               ASSERT3P(block_cursor, <=, block_end);
+
+               /*
+                * If we are at the end of this block, flush it and start
+                * writing again from the beginning.
+                */
+               if (block_cursor == block_end) {
+                       dmu_buf_rele(db, tag);
+
+                       uint64_t next_word_offset = sm->sm_phys->smp_length;
+                       VERIFY0(dmu_buf_hold(sm->sm_os,
+                           space_map_object(sm), next_word_offset,
+                           tag, &db, DMU_READ_PREFETCH));
+                       dmu_buf_will_dirty(db, tx);
+
+                       /* update caller's dbuf */
+                       *dbp = db;
+
+                       ASSERT3U(db->db_size, ==, sm->sm_blksz);
 
+                       block_base = db->db_data;
+                       block_cursor = block_base;
+                       block_end = block_base +
+                           (db->db_size / sizeof (uint64_t));
+               }
+
+               /*
+                * If we are writing a two-word entry and we only have one
+                * word left on this block, just pad it with an empty debug
+                * entry and write the two-word entry in the next block.
+                */
+               uint64_t *next_entry = block_cursor + 1;
+               if (next_entry == block_end && words > 1) {
+                       ASSERT3U(words, ==, 2);
+                       *block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
+                           SM_DEBUG_ACTION_ENCODE(0) |
+                           SM_DEBUG_SYNCPASS_ENCODE(0) |
+                           SM_DEBUG_TXG_ENCODE(0);
+                       block_cursor++;
+                       sm->sm_phys->smp_length += sizeof (uint64_t);
+                       ASSERT3P(block_cursor, ==, block_end);
+                       continue;
+               }
+
+               uint64_t run_len = MIN(size, run_max);
+               switch (words) {
+               case 1:
+                       *block_cursor = SM_OFFSET_ENCODE(start) |
+                           SM_TYPE_ENCODE(maptype) |
+                           SM_RUN_ENCODE(run_len);
+                       block_cursor++;
+                       break;
+               case 2:
+                       /* write the first word of the entry */
+                       *block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
+                           SM2_RUN_ENCODE(run_len) |
+                           SM2_VDEV_ENCODE(vdev_id);
+                       block_cursor++;
+
+                       /* move on to the second word of the entry */
+                       ASSERT3P(block_cursor, <, block_end);
+                       *block_cursor = SM2_TYPE_ENCODE(maptype) |
+                           SM2_OFFSET_ENCODE(start);
+                       block_cursor++;
+                       break;
+               default:
+                       panic("%d-word space map entries are not supported",
+                           words);
+                       break;
+               }
+               sm->sm_phys->smp_length += words * sizeof (uint64_t);
+
+               start += run_len;
+               size -= run_len;
+       }
+       ASSERT0(size);
+
+}
+
+/*
+ * Note: The space map's dbuf must be dirty for the changes in sm_phys to
+ * take effect.
+ */
+static void
+space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
+    uint64_t vdev_id, dmu_tx_t *tx)
+{
+       spa_t *spa = tx->tx_pool->dp_spa;
+       dmu_buf_t *db;
+
+       space_map_write_intro_debug(sm, maptype, tx);
+
+#ifdef DEBUG
        /*
-        * All space_maps always have a debug entry so account for it here.
+        * We do this right after we write the intro debug entry
+        * because the estimate does not take it into account.
         */
-       entries = 1;
+       uint64_t initial_objsize = sm->sm_phys->smp_length;
+       uint64_t estimated_growth =
+           space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
+       uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
+#endif
 
        /*
-        * Traverse the range tree and calculate the number of space map
-        * entries that would be required to write out the range tree.
+        * Find the offset right after the last word in the space map
+        * and use that to get a hold of the last block, so we can
+        * start appending to it.
         */
-       for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
-               size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
-               entries += howmany(size, SM_RUN_MAX);
+       uint64_t next_word_offset = sm->sm_phys->smp_length;
+       VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
+           next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
+       ASSERT3U(db->db_size, ==, sm->sm_blksz);
+
+       dmu_buf_will_dirty(db, tx);
+
+       avl_tree_t *t = &rt->rt_root;
+       for (range_seg_t *rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
+               uint64_t offset = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
+               uint64_t length = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
+               uint8_t words = 1;
+
+               /*
+                * We only write two-word entries when both of the following
+                * are true:
+                *
+                * [1] The feature is enabled.
+                * [2] The offset or run is too big for a single-word entry,
+                *      or the vdev_id is set (meaning not equal to
+                *      SM_NO_VDEVID).
+                *
+                * Note that for purposes of testing we've added the case that
+                * we write two-word entries occasionally when the feature is
+                * enabled and zfs_force_some_double_word_sm_entries has been
+                * set.
+                */
+               if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
+                   (offset >= (1ULL << SM_OFFSET_BITS) ||
+                   length > SM_RUN_MAX ||
+                   vdev_id != SM_NO_VDEVID ||
+                   (zfs_force_some_double_word_sm_entries &&
+                   spa_get_random(100) == 0)))
+                       words = 2;
+
+               space_map_write_seg(sm, rs, maptype, vdev_id, words,
+                   &db, FTAG, tx);
        }
-       return (entries);
+
+       dmu_buf_rele(db, FTAG);
+
+#ifdef DEBUG
+       /*
+        * We expect our estimation to be based on the worst case
+        * scenario [see comment in space_map_estimate_optimal_size()].
+        * Therefore we expect the actual objsize to be equal or less
+        * than whatever we estimated it to be.
+        */
+       ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_length);
+#endif
 }
 
+/*
+ * Note: This function manipulates the state of the given space map but
+ * does not hold any locks implicitly. Thus the caller is responsible
+ * for synchronizing writes to the space map.
+ */
 void
 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
-    dmu_tx_t *tx)
+    uint64_t vdev_id, dmu_tx_t *tx)
 {
-       objset_t *os = sm->sm_os;
-       spa_t *spa = dmu_objset_spa(os);
-       avl_tree_t *t = &rt->rt_root;
-       range_seg_t *rs;
-       uint64_t size, total, rt_space, nodes;
-       uint64_t *entry, *entry_map, *entry_map_end;
-       uint64_t expected_entries, actual_entries = 1;
-
-       ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
+       ASSERT(dsl_pool_sync_context(dmu_objset_pool(sm->sm_os)));
        VERIFY3U(space_map_object(sm), !=, 0);
+
        dmu_buf_will_dirty(sm->sm_dbuf, tx);
 
        /*
@@ -279,7 +739,7 @@ space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
         */
        sm->sm_phys->smp_object = sm->sm_object;
 
-       if (range_tree_space(rt) == 0) {
+       if (range_tree_is_empty(rt)) {
                VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
                return;
        }
@@ -289,58 +749,10 @@ space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
        else
                sm->sm_phys->smp_alloc -= range_tree_space(rt);
 
-       expected_entries = space_map_entries(sm, rt);
+       uint64_t nodes = avl_numnodes(&rt->rt_root);
+       uint64_t rt_space = range_tree_space(rt);
 
-       entry_map = vmem_alloc(sm->sm_blksz, KM_SLEEP);
-       entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
-       entry = entry_map;
-
-       *entry++ = SM_DEBUG_ENCODE(1) |
-           SM_DEBUG_ACTION_ENCODE(maptype) |
-           SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
-           SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
-
-       total = 0;
-       nodes = avl_numnodes(&rt->rt_root);
-       rt_space = range_tree_space(rt);
-       for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
-               uint64_t start;
-
-               size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
-               start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
-
-               total += size << sm->sm_shift;
-
-               while (size != 0) {
-                       uint64_t run_len;
-
-                       run_len = MIN(size, SM_RUN_MAX);
-
-                       if (entry == entry_map_end) {
-                               dmu_write(os, space_map_object(sm),
-                                   sm->sm_phys->smp_objsize, sm->sm_blksz,
-                                   entry_map, tx);
-                               sm->sm_phys->smp_objsize += sm->sm_blksz;
-                               entry = entry_map;
-                       }
-
-                       *entry++ = SM_OFFSET_ENCODE(start) |
-                           SM_TYPE_ENCODE(maptype) |
-                           SM_RUN_ENCODE(run_len);
-
-                       start += run_len;
-                       size -= run_len;
-                       actual_entries++;
-               }
-       }
-
-       if (entry != entry_map) {
-               size = (entry - entry_map) * sizeof (uint64_t);
-               dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
-                   size, entry_map, tx);
-               sm->sm_phys->smp_objsize += size;
-       }
-       ASSERT3U(expected_entries, ==, actual_entries);
+       space_map_write_impl(sm, rt, maptype, vdev_id, tx);
 
        /*
         * Ensure that the space_map's accounting wasn't changed
@@ -348,9 +760,6 @@ space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
         */
        VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
        VERIFY3U(range_tree_space(rt), ==, rt_space);
-       VERIFY3U(range_tree_space(rt), ==, total);
-
-       vmem_free(entry_map, sm->sm_blksz);
 }
 
 static int
@@ -386,8 +795,6 @@ space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
        sm->sm_shift = shift;
        sm->sm_os = os;
        sm->sm_object = object;
-       sm->sm_length = 0;
-       sm->sm_alloc = 0;
        sm->sm_blksz = 0;
        sm->sm_dbuf = NULL;
        sm->sm_phys = NULL;
@@ -397,7 +804,6 @@ space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
                space_map_close(sm);
                return (error);
        }
-
        *smp = sm;
 
        return (0);
@@ -418,7 +824,7 @@ space_map_close(space_map_t *sm)
 }
 
 void
-space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
+space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
 {
        objset_t *os = sm->sm_os;
        spa_t *spa = dmu_objset_spa(os);
@@ -440,7 +846,8 @@ space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
         */
        if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
            doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
-           doi.doi_data_block_size != space_map_blksz) {
+           doi.doi_data_block_size != blocksize ||
+           doi.doi_metadata_block_size != 1 << space_map_ibs) {
                zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
                    "object[%llu]: old bonus %u, old blocksz %u",
                    dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
@@ -449,7 +856,7 @@ space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
                space_map_free(sm, tx);
                dmu_buf_rele(sm->sm_dbuf, sm);
 
-               sm->sm_object = space_map_alloc(sm->sm_os, tx);
+               sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
                VERIFY0(space_map_open_impl(sm));
        } else {
                VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
@@ -464,25 +871,12 @@ space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
        }
 
        dmu_buf_will_dirty(sm->sm_dbuf, tx);
-       sm->sm_phys->smp_objsize = 0;
+       sm->sm_phys->smp_length = 0;
        sm->sm_phys->smp_alloc = 0;
 }
 
-/*
- * Update the in-core space_map allocation and length values.
- */
-void
-space_map_update(space_map_t *sm)
-{
-       if (sm == NULL)
-               return;
-
-       sm->sm_alloc = sm->sm_phys->smp_alloc;
-       sm->sm_length = sm->sm_phys->smp_objsize;
-}
-
 uint64_t
-space_map_alloc(objset_t *os, dmu_tx_t *tx)
+space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
 {
        spa_t *spa = dmu_objset_spa(os);
        uint64_t object;
@@ -496,9 +890,8 @@ space_map_alloc(objset_t *os, dmu_tx_t *tx)
                bonuslen = SPACE_MAP_SIZE_V0;
        }
 
-       object = dmu_object_alloc(os,
-           DMU_OT_SPACE_MAP, space_map_blksz,
-           DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
+       object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize,
+           space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
 
        return (object);
 }
@@ -530,38 +923,147 @@ space_map_free(space_map_t *sm, dmu_tx_t *tx)
        sm->sm_object = 0;
 }
 
+/*
+ * Given a range tree, it makes a worst-case estimate of how much
+ * space would the tree's segments take if they were written to
+ * the given space map.
+ */
+uint64_t
+space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
+    uint64_t vdev_id)
+{
+       spa_t *spa = dmu_objset_spa(sm->sm_os);
+       uint64_t shift = sm->sm_shift;
+       uint64_t *histogram = rt->rt_histogram;
+       uint64_t entries_for_seg = 0;
+
+       /*
+        * In order to get a quick estimate of the optimal size that this
+        * range tree would have on-disk as a space map, we iterate through
+        * its histogram buckets instead of iterating through its nodes.
+        *
+        * Note that this is a highest-bound/worst-case estimate for the
+        * following reasons:
+        *
+        * 1] We assume that we always add a debug padding for each block
+        *    we write and we also assume that we start at the last word
+        *    of a block attempting to write a two-word entry.
+        * 2] Rounding up errors due to the way segments are distributed
+        *    in the buckets of the range tree's histogram.
+        * 3] The activation of zfs_force_some_double_word_sm_entries
+        *    (tunable) when testing.
+        *
+        * = Math and Rounding Errors =
+        *
+        * rt_histogram[i] bucket of a range tree represents the number
+        * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
+        * that, we want to divide the buckets into groups: Buckets that
+        * can be represented using a single-word entry, ones that can
+        * be represented with a double-word entry, and ones that can
+        * only be represented with multiple two-word entries.
+        *
+        * [Note that if the new encoding feature is not enabled there
+        * are only two groups: single-word entry buckets and multiple
+        * single-word entry buckets. The information below assumes
+        * two-word entries enabled, but it can easily applied when
+        * the feature is not enabled]
+        *
+        * To find the highest bucket that can be represented with a
+        * single-word entry we look at the maximum run that such entry
+        * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
+        * the run of a space map entry is shifted by sm_shift, thus we
+        * add it to the exponent]. This way, excluding the value of the
+        * maximum run that can be represented by a single-word entry,
+        * all runs that are smaller exist in buckets 0 to
+        * SM_RUN_BITS + shift - 1.
+        *
+        * To find the highest bucket that can be represented with a
+        * double-word entry, we follow the same approach. Finally, any
+        * bucket higher than that are represented with multiple two-word
+        * entries. To be more specific, if the highest bucket whose
+        * segments can be represented with a single two-word entry is X,
+        * then bucket X+1 will need 2 two-word entries for each of its
+        * segments, X+2 will need 4, X+3 will need 8, ...etc.
+        *
+        * With all of the above we make our estimation based on bucket
+        * groups. There is a rounding error though. As we mentioned in
+        * the example with the one-word entry, the maximum run that can
+        * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
+        * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
+        * that length fall into the next bucket (and bucket group) where
+        * we start counting two-word entries and this is one more reason
+        * why the estimated size may end up being bigger than the actual
+        * size written.
+        */
+       uint64_t size = 0;
+       uint64_t idx = 0;
+
+       if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
+           (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
+
+               /*
+                * If we are trying to force some double word entries just
+                * assume the worst-case of every single word entry being
+                * written as a double word entry.
+                */
+               uint64_t entry_size =
+                   (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
+                   zfs_force_some_double_word_sm_entries) ?
+                   (2 * sizeof (uint64_t)) : sizeof (uint64_t);
+
+               uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
+               for (; idx <= single_entry_max_bucket; idx++)
+                       size += histogram[idx] * entry_size;
+
+               if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
+                       for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
+                               ASSERT3U(idx, >=, single_entry_max_bucket);
+                               entries_for_seg =
+                                   1ULL << (idx - single_entry_max_bucket);
+                               size += histogram[idx] *
+                                   entries_for_seg * entry_size;
+                       }
+                       return (size);
+               }
+       }
+
+       ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
+
+       uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
+       for (; idx <= double_entry_max_bucket; idx++)
+               size += histogram[idx] * 2 * sizeof (uint64_t);
+
+       for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
+               ASSERT3U(idx, >=, double_entry_max_bucket);
+               entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
+               size += histogram[idx] *
+                   entries_for_seg * 2 * sizeof (uint64_t);
+       }
+
+       /*
+        * Assume the worst case where we start with the padding at the end
+        * of the current block and we add an extra padding entry at the end
+        * of all subsequent blocks.
+        */
+       size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
+
+       return (size);
+}
+
 uint64_t
 space_map_object(space_map_t *sm)
 {
        return (sm != NULL ? sm->sm_object : 0);
 }
 
-/*
- * Returns the already synced, on-disk allocated space.
- */
-uint64_t
+int64_t
 space_map_allocated(space_map_t *sm)
 {
-       return (sm != NULL ? sm->sm_alloc : 0);
+       return (sm != NULL ? sm->sm_phys->smp_alloc : 0);
 }
 
-/*
- * Returns the already synced, on-disk length;
- */
 uint64_t
 space_map_length(space_map_t *sm)
 {
-       return (sm != NULL ? sm->sm_length : 0);
-}
-
-/*
- * Returns the allocated space that is currently syncing.
- */
-int64_t
-space_map_alloc_delta(space_map_t *sm)
-{
-       if (sm == NULL)
-               return (0);
-       ASSERT(sm->sm_dbuf != NULL);
-       return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
+       return (sm != NULL ? sm->sm_phys->smp_length : 0);
 }