]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/dbuf.c
Fix send/recv lost spill block
[mirror_zfs.git] / module / zfs / dbuf.c
index fd51d59a8edb6f3921ae1e36134beaf75dd2e57b..07e616f6f0de614e58422d45acbb94f592d57342 100644 (file)
@@ -21,7 +21,7 @@
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
- * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  */
 #include <sys/blkptr.h>
 #include <sys/range_tree.h>
 #include <sys/trace_dbuf.h>
+#include <sys/callb.h>
+#include <sys/abd.h>
+#include <sys/vdev.h>
+#include <sys/cityhash.h>
+#include <sys/spa_impl.h>
 
-struct dbuf_hold_impl_data {
+kstat_t *dbuf_ksp;
+
+typedef struct dbuf_stats {
+       /*
+        * Various statistics about the size of the dbuf cache.
+        */
+       kstat_named_t cache_count;
+       kstat_named_t cache_size_bytes;
+       kstat_named_t cache_size_bytes_max;
+       /*
+        * Statistics regarding the bounds on the dbuf cache size.
+        */
+       kstat_named_t cache_target_bytes;
+       kstat_named_t cache_lowater_bytes;
+       kstat_named_t cache_hiwater_bytes;
+       /*
+        * Total number of dbuf cache evictions that have occurred.
+        */
+       kstat_named_t cache_total_evicts;
+       /*
+        * The distribution of dbuf levels in the dbuf cache and
+        * the total size of all dbufs at each level.
+        */
+       kstat_named_t cache_levels[DN_MAX_LEVELS];
+       kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
+       /*
+        * Statistics about the dbuf hash table.
+        */
+       kstat_named_t hash_hits;
+       kstat_named_t hash_misses;
+       kstat_named_t hash_collisions;
+       kstat_named_t hash_elements;
+       kstat_named_t hash_elements_max;
+       /*
+        * Number of sublists containing more than one dbuf in the dbuf
+        * hash table. Keep track of the longest hash chain.
+        */
+       kstat_named_t hash_chains;
+       kstat_named_t hash_chain_max;
+       /*
+        * Number of times a dbuf_create() discovers that a dbuf was
+        * already created and in the dbuf hash table.
+        */
+       kstat_named_t hash_insert_race;
+       /*
+        * Statistics about the size of the metadata dbuf cache.
+        */
+       kstat_named_t metadata_cache_count;
+       kstat_named_t metadata_cache_size_bytes;
+       kstat_named_t metadata_cache_size_bytes_max;
+       /*
+        * For diagnostic purposes, this is incremented whenever we can't add
+        * something to the metadata cache because it's full, and instead put
+        * the data in the regular dbuf cache.
+        */
+       kstat_named_t metadata_cache_overflow;
+} dbuf_stats_t;
+
+dbuf_stats_t dbuf_stats = {
+       { "cache_count",                        KSTAT_DATA_UINT64 },
+       { "cache_size_bytes",                   KSTAT_DATA_UINT64 },
+       { "cache_size_bytes_max",               KSTAT_DATA_UINT64 },
+       { "cache_target_bytes",                 KSTAT_DATA_UINT64 },
+       { "cache_lowater_bytes",                KSTAT_DATA_UINT64 },
+       { "cache_hiwater_bytes",                KSTAT_DATA_UINT64 },
+       { "cache_total_evicts",                 KSTAT_DATA_UINT64 },
+       { { "cache_levels_N",                   KSTAT_DATA_UINT64 } },
+       { { "cache_levels_bytes_N",             KSTAT_DATA_UINT64 } },
+       { "hash_hits",                          KSTAT_DATA_UINT64 },
+       { "hash_misses",                        KSTAT_DATA_UINT64 },
+       { "hash_collisions",                    KSTAT_DATA_UINT64 },
+       { "hash_elements",                      KSTAT_DATA_UINT64 },
+       { "hash_elements_max",                  KSTAT_DATA_UINT64 },
+       { "hash_chains",                        KSTAT_DATA_UINT64 },
+       { "hash_chain_max",                     KSTAT_DATA_UINT64 },
+       { "hash_insert_race",                   KSTAT_DATA_UINT64 },
+       { "metadata_cache_count",               KSTAT_DATA_UINT64 },
+       { "metadata_cache_size_bytes",          KSTAT_DATA_UINT64 },
+       { "metadata_cache_size_bytes_max",      KSTAT_DATA_UINT64 },
+       { "metadata_cache_overflow",            KSTAT_DATA_UINT64 }
+};
+
+#define        DBUF_STAT_INCR(stat, val)       \
+       atomic_add_64(&dbuf_stats.stat.value.ui64, (val));
+#define        DBUF_STAT_DECR(stat, val)       \
+       DBUF_STAT_INCR(stat, -(val));
+#define        DBUF_STAT_BUMP(stat)            \
+       DBUF_STAT_INCR(stat, 1);
+#define        DBUF_STAT_BUMPDOWN(stat)        \
+       DBUF_STAT_INCR(stat, -1);
+#define        DBUF_STAT_MAX(stat, v) {                                        \
+       uint64_t _m;                                                    \
+       while ((v) > (_m = dbuf_stats.stat.value.ui64) &&               \
+           (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
+               continue;                                               \
+}
+
+typedef struct dbuf_hold_arg {
        /* Function arguments */
        dnode_t *dh_dn;
        uint8_t dh_level;
@@ -61,37 +163,122 @@ struct dbuf_hold_impl_data {
        blkptr_t *dh_bp;
        int dh_err;
        dbuf_dirty_record_t *dh_dr;
-       arc_buf_contents_t dh_type;
-       int dh_depth;
-};
+} dbuf_hold_arg_t;
 
-static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
-    dnode_t *dn, uint8_t level, uint64_t blkid, boolean_t fail_sparse,
-       boolean_t fail_uncached,
-       void *tag, dmu_buf_impl_t **dbp, int depth);
-static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
+static dbuf_hold_arg_t *dbuf_hold_arg_create(dnode_t *dn, uint8_t level,
+       uint64_t blkid, boolean_t fail_sparse, boolean_t fail_uncached,
+       void *tag, dmu_buf_impl_t **dbp);
+static int dbuf_hold_impl_arg(dbuf_hold_arg_t *dh);
+static void dbuf_hold_arg_destroy(dbuf_hold_arg_t *dh);
 
-/*
- * Number of times that zfs_free_range() took the slow path while doing
- * a zfs receive.  A nonzero value indicates a potential performance problem.
- */
-uint64_t zfs_free_range_recv_miss;
-
-static void dbuf_destroy(dmu_buf_impl_t *db);
 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
 
-#ifndef __lint
 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
-    dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp);
-#endif /* ! __lint */
+    dmu_buf_evict_func_t *evict_func_sync,
+    dmu_buf_evict_func_t *evict_func_async,
+    dmu_buf_t **clear_on_evict_dbufp);
 
 /*
  * Global data structures and functions for the dbuf cache.
  */
-static kmem_cache_t *dbuf_cache;
+static kmem_cache_t *dbuf_kmem_cache;
 static taskq_t *dbu_evict_taskq;
 
+static kthread_t *dbuf_cache_evict_thread;
+static kmutex_t dbuf_evict_lock;
+static kcondvar_t dbuf_evict_cv;
+static boolean_t dbuf_evict_thread_exit;
+
+/*
+ * There are two dbuf caches; each dbuf can only be in one of them at a time.
+ *
+ * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
+ *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
+ *    that represent the metadata that describes filesystems/snapshots/
+ *    bookmarks/properties/etc. We only evict from this cache when we export a
+ *    pool, to short-circuit as much I/O as possible for all administrative
+ *    commands that need the metadata. There is no eviction policy for this
+ *    cache, because we try to only include types in it which would occupy a
+ *    very small amount of space per object but create a large impact on the
+ *    performance of these commands. Instead, after it reaches a maximum size
+ *    (which should only happen on very small memory systems with a very large
+ *    number of filesystem objects), we stop taking new dbufs into the
+ *    metadata cache, instead putting them in the normal dbuf cache.
+ *
+ * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
+ *    are not currently held but have been recently released. These dbufs
+ *    are not eligible for arc eviction until they are aged out of the cache.
+ *    Dbufs that are aged out of the cache will be immediately destroyed and
+ *    become eligible for arc eviction.
+ *
+ * Dbufs are added to these caches once the last hold is released. If a dbuf is
+ * later accessed and still exists in the dbuf cache, then it will be removed
+ * from the cache and later re-added to the head of the cache.
+ *
+ * If a given dbuf meets the requirements for the metadata cache, it will go
+ * there, otherwise it will be considered for the generic LRU dbuf cache. The
+ * caches and the refcounts tracking their sizes are stored in an array indexed
+ * by those caches' matching enum values (from dbuf_cached_state_t).
+ */
+typedef struct dbuf_cache {
+       multilist_t *cache;
+       zfs_refcount_t size;
+} dbuf_cache_t;
+dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
+
+/* Size limits for the caches */
+unsigned long dbuf_cache_max_bytes = 0;
+unsigned long dbuf_metadata_cache_max_bytes = 0;
+/* Set the default sizes of the caches to log2 fraction of arc size */
+int dbuf_cache_shift = 5;
+int dbuf_metadata_cache_shift = 6;
+
+/*
+ * The LRU dbuf cache uses a three-stage eviction policy:
+ *     - A low water marker designates when the dbuf eviction thread
+ *     should stop evicting from the dbuf cache.
+ *     - When we reach the maximum size (aka mid water mark), we
+ *     signal the eviction thread to run.
+ *     - The high water mark indicates when the eviction thread
+ *     is unable to keep up with the incoming load and eviction must
+ *     happen in the context of the calling thread.
+ *
+ * The dbuf cache:
+ *                                                 (max size)
+ *                                      low water   mid water   hi water
+ * +----------------------------------------+----------+----------+
+ * |                                        |          |          |
+ * |                                        |          |          |
+ * |                                        |          |          |
+ * |                                        |          |          |
+ * +----------------------------------------+----------+----------+
+ *                                        stop        signal     evict
+ *                                      evicting     eviction   directly
+ *                                                    thread
+ *
+ * The high and low water marks indicate the operating range for the eviction
+ * thread. The low water mark is, by default, 90% of the total size of the
+ * cache and the high water mark is at 110% (both of these percentages can be
+ * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
+ * respectively). The eviction thread will try to ensure that the cache remains
+ * within this range by waking up every second and checking if the cache is
+ * above the low water mark. The thread can also be woken up by callers adding
+ * elements into the cache if the cache is larger than the mid water (i.e max
+ * cache size). Once the eviction thread is woken up and eviction is required,
+ * it will continue evicting buffers until it's able to reduce the cache size
+ * to the low water mark. If the cache size continues to grow and hits the high
+ * water mark, then callers adding elements to the cache will begin to evict
+ * directly from the cache until the cache is no longer above the high water
+ * mark.
+ */
+
+/*
+ * The percentage above and below the maximum cache size.
+ */
+uint_t dbuf_cache_hiwater_pct = 10;
+uint_t dbuf_cache_lowater_pct = 10;
+
 /* ARGSUSED */
 static int
 dbuf_cons(void *vdb, void *unused, int kmflag)
@@ -101,7 +288,8 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
 
        mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
        cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
-       refcount_create(&db->db_holds);
+       multilist_link_init(&db->db_cache_link);
+       zfs_refcount_create(&db->db_holds);
 
        return (0);
 }
@@ -113,7 +301,8 @@ dbuf_dest(void *vdb, void *unused)
        dmu_buf_impl_t *db = vdb;
        mutex_destroy(&db->db_mtx);
        cv_destroy(&db->db_changed);
-       refcount_destroy(&db->db_holds);
+       ASSERT(!multilist_link_active(&db->db_cache_link));
+       zfs_refcount_destroy(&db->db_holds);
 }
 
 /*
@@ -123,27 +312,16 @@ static dbuf_hash_table_t dbuf_hash_table;
 
 static uint64_t dbuf_hash_count;
 
+/*
+ * We use Cityhash for this. It's fast, and has good hash properties without
+ * requiring any large static buffers.
+ */
 static uint64_t
 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
 {
-       uintptr_t osv = (uintptr_t)os;
-       uint64_t crc = -1ULL;
-
-       ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
-       crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
-       crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
-       crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
-       crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
-       crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
-       crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
-
-       crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
-
-       return (crc);
+       return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
 }
 
-#define        DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
-
 #define        DBUF_EQUAL(dbuf, os, obj, level, blkid)         \
        ((dbuf)->db.db_object == (obj) &&               \
        (dbuf)->db_objset == (os) &&                    \
@@ -158,7 +336,7 @@ dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
        uint64_t idx;
        dmu_buf_impl_t *db;
 
-       hv = DBUF_HASH(os, obj, level, blkid);
+       hv = dbuf_hash(os, obj, level, blkid);
        idx = hv & h->hash_table_mask;
 
        mutex_enter(DBUF_HASH_MUTEX(h, idx));
@@ -209,13 +387,15 @@ dbuf_hash_insert(dmu_buf_impl_t *db)
        int level = db->db_level;
        uint64_t blkid, hv, idx;
        dmu_buf_impl_t *dbf;
+       uint32_t i;
 
        blkid = db->db_blkid;
-       hv = DBUF_HASH(os, obj, level, blkid);
+       hv = dbuf_hash(os, obj, level, blkid);
        idx = hv & h->hash_table_mask;
 
        mutex_enter(DBUF_HASH_MUTEX(h, idx));
-       for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
+       for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
+           dbf = dbf->db_hash_next, i++) {
                if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
                        mutex_enter(&dbf->db_mtx);
                        if (dbf->db_state != DB_EVICTING) {
@@ -226,15 +406,58 @@ dbuf_hash_insert(dmu_buf_impl_t *db)
                }
        }
 
+       if (i > 0) {
+               DBUF_STAT_BUMP(hash_collisions);
+               if (i == 1)
+                       DBUF_STAT_BUMP(hash_chains);
+
+               DBUF_STAT_MAX(hash_chain_max, i);
+       }
+
        mutex_enter(&db->db_mtx);
        db->db_hash_next = h->hash_table[idx];
        h->hash_table[idx] = db;
        mutex_exit(DBUF_HASH_MUTEX(h, idx));
        atomic_inc_64(&dbuf_hash_count);
+       DBUF_STAT_MAX(hash_elements_max, dbuf_hash_count);
 
        return (NULL);
 }
 
+/*
+ * This returns whether this dbuf should be stored in the metadata cache, which
+ * is based on whether it's from one of the dnode types that store data related
+ * to traversing dataset hierarchies.
+ */
+static boolean_t
+dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
+{
+       DB_DNODE_ENTER(db);
+       dmu_object_type_t type = DB_DNODE(db)->dn_type;
+       DB_DNODE_EXIT(db);
+
+       /* Check if this dbuf is one of the types we care about */
+       if (DMU_OT_IS_METADATA_CACHED(type)) {
+               /* If we hit this, then we set something up wrong in dmu_ot */
+               ASSERT(DMU_OT_IS_METADATA(type));
+
+               /*
+                * Sanity check for small-memory systems: don't allocate too
+                * much memory for this purpose.
+                */
+               if (zfs_refcount_count(
+                   &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
+                   dbuf_metadata_cache_max_bytes) {
+                       DBUF_STAT_BUMP(metadata_cache_overflow);
+                       return (B_FALSE);
+               }
+
+               return (B_TRUE);
+       }
+
+       return (B_FALSE);
+}
+
 /*
  * Remove an entry from the hash table.  It must be in the EVICTING state.
  */
@@ -245,15 +468,15 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
        uint64_t hv, idx;
        dmu_buf_impl_t *dbf, **dbp;
 
-       hv = DBUF_HASH(db->db_objset, db->db.db_object,
+       hv = dbuf_hash(db->db_objset, db->db.db_object,
            db->db_level, db->db_blkid);
        idx = hv & h->hash_table_mask;
 
        /*
-        * We musn't hold db_mtx to maintain lock ordering:
+        * We mustn't hold db_mtx to maintain lock ordering:
         * DBUF_HASH_MUTEX > db_mtx.
         */
-       ASSERT(refcount_is_zero(&db->db_holds));
+       ASSERT(zfs_refcount_is_zero(&db->db_holds));
        ASSERT(db->db_state == DB_EVICTING);
        ASSERT(!MUTEX_HELD(&db->db_mtx));
 
@@ -265,12 +488,13 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
        }
        *dbp = db->db_hash_next;
        db->db_hash_next = NULL;
+       if (h->hash_table[idx] &&
+           h->hash_table[idx]->db_hash_next == NULL)
+               DBUF_STAT_BUMPDOWN(hash_chains);
        mutex_exit(DBUF_HASH_MUTEX(h, idx));
        atomic_dec_64(&dbuf_hash_count);
 }
 
-static arc_evict_func_t dbuf_do_evict;
-
 typedef enum {
        DBVU_EVICTING,
        DBVU_NOT_EVICTING
@@ -292,7 +516,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
        ASSERT(db->db.db_data != NULL);
        ASSERT3U(db->db_state, ==, DB_CACHED);
 
-       holds = refcount_count(&db->db_holds);
+       holds = zfs_refcount_count(&db->db_holds);
        if (verify_type == DBVU_EVICTING) {
                /*
                 * Immediate eviction occurs when holds == dirtycnt.
@@ -332,11 +556,24 @@ dbuf_evict_user(dmu_buf_impl_t *db)
 #endif
 
        /*
-        * Invoke the callback from a taskq to avoid lock order reversals
-        * and limit stack depth.
+        * There are two eviction callbacks - one that we call synchronously
+        * and one that we invoke via a taskq.  The async one is useful for
+        * avoiding lock order reversals and limiting stack depth.
+        *
+        * Note that if we have a sync callback but no async callback,
+        * it's likely that the sync callback will free the structure
+        * containing the dbu.  In that case we need to take care to not
+        * dereference dbu after calling the sync evict func.
         */
-       taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0,
-           &dbu->dbu_tqent);
+       boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
+
+       if (dbu->dbu_evict_func_sync != NULL)
+               dbu->dbu_evict_func_sync(dbu);
+
+       if (has_async) {
+               taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
+                   dbu, 0, &dbu->dbu_tqent);
+       }
 }
 
 boolean_t
@@ -358,15 +595,196 @@ dbuf_is_metadata(dmu_buf_impl_t *db)
        }
 }
 
-void
-dbuf_evict(dmu_buf_impl_t *db)
+
+/*
+ * This function *must* return indices evenly distributed between all
+ * sublists of the multilist. This is needed due to how the dbuf eviction
+ * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
+ * distributed between all sublists and uses this assumption when
+ * deciding which sublist to evict from and how much to evict from it.
+ */
+unsigned int
+dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
 {
-       ASSERT(MUTEX_HELD(&db->db_mtx));
-       ASSERT(db->db_buf == NULL);
-       ASSERT(db->db_data_pending == NULL);
+       dmu_buf_impl_t *db = obj;
 
-       dbuf_clear(db);
-       dbuf_destroy(db);
+       /*
+        * The assumption here, is the hash value for a given
+        * dmu_buf_impl_t will remain constant throughout it's lifetime
+        * (i.e. it's objset, object, level and blkid fields don't change).
+        * Thus, we don't need to store the dbuf's sublist index
+        * on insertion, as this index can be recalculated on removal.
+        *
+        * Also, the low order bits of the hash value are thought to be
+        * distributed evenly. Otherwise, in the case that the multilist
+        * has a power of two number of sublists, each sublists' usage
+        * would not be evenly distributed.
+        */
+       return (dbuf_hash(db->db_objset, db->db.db_object,
+           db->db_level, db->db_blkid) %
+           multilist_get_num_sublists(ml));
+}
+
+static inline unsigned long
+dbuf_cache_target_bytes(void)
+{
+       return MIN(dbuf_cache_max_bytes,
+           arc_target_bytes() >> dbuf_cache_shift);
+}
+
+static inline uint64_t
+dbuf_cache_hiwater_bytes(void)
+{
+       uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
+       return (dbuf_cache_target +
+           (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
+}
+
+static inline uint64_t
+dbuf_cache_lowater_bytes(void)
+{
+       uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
+       return (dbuf_cache_target -
+           (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
+}
+
+static inline boolean_t
+dbuf_cache_above_hiwater(void)
+{
+       return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
+           dbuf_cache_hiwater_bytes());
+}
+
+static inline boolean_t
+dbuf_cache_above_lowater(void)
+{
+       return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
+           dbuf_cache_lowater_bytes());
+}
+
+/*
+ * Evict the oldest eligible dbuf from the dbuf cache.
+ */
+static void
+dbuf_evict_one(void)
+{
+       int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache);
+       multilist_sublist_t *mls = multilist_sublist_lock(
+           dbuf_caches[DB_DBUF_CACHE].cache, idx);
+
+       ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
+
+       dmu_buf_impl_t *db = multilist_sublist_tail(mls);
+       while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
+               db = multilist_sublist_prev(mls, db);
+       }
+
+       DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
+           multilist_sublist_t *, mls);
+
+       if (db != NULL) {
+               multilist_sublist_remove(mls, db);
+               multilist_sublist_unlock(mls);
+               (void) zfs_refcount_remove_many(
+                   &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
+               DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
+               DBUF_STAT_BUMPDOWN(cache_count);
+               DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
+                   db->db.db_size);
+               ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
+               db->db_caching_status = DB_NO_CACHE;
+               dbuf_destroy(db);
+               DBUF_STAT_MAX(cache_size_bytes_max,
+                   zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size));
+               DBUF_STAT_BUMP(cache_total_evicts);
+       } else {
+               multilist_sublist_unlock(mls);
+       }
+}
+
+/*
+ * The dbuf evict thread is responsible for aging out dbufs from the
+ * cache. Once the cache has reached it's maximum size, dbufs are removed
+ * and destroyed. The eviction thread will continue running until the size
+ * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
+ * out of the cache it is destroyed and becomes eligible for arc eviction.
+ */
+/* ARGSUSED */
+static void
+dbuf_evict_thread(void *unused)
+{
+       callb_cpr_t cpr;
+
+       CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
+
+       mutex_enter(&dbuf_evict_lock);
+       while (!dbuf_evict_thread_exit) {
+               while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
+                       CALLB_CPR_SAFE_BEGIN(&cpr);
+                       (void) cv_timedwait_sig_hires(&dbuf_evict_cv,
+                           &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
+                       CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
+               }
+               mutex_exit(&dbuf_evict_lock);
+
+               /*
+                * Keep evicting as long as we're above the low water mark
+                * for the cache. We do this without holding the locks to
+                * minimize lock contention.
+                */
+               while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
+                       dbuf_evict_one();
+               }
+
+               mutex_enter(&dbuf_evict_lock);
+       }
+
+       dbuf_evict_thread_exit = B_FALSE;
+       cv_broadcast(&dbuf_evict_cv);
+       CALLB_CPR_EXIT(&cpr);   /* drops dbuf_evict_lock */
+       thread_exit();
+}
+
+/*
+ * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
+ * If the dbuf cache is at its high water mark, then evict a dbuf from the
+ * dbuf cache using the callers context.
+ */
+static void
+dbuf_evict_notify(void)
+{
+       /*
+        * We check if we should evict without holding the dbuf_evict_lock,
+        * because it's OK to occasionally make the wrong decision here,
+        * and grabbing the lock results in massive lock contention.
+        */
+       if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
+           dbuf_cache_target_bytes()) {
+               if (dbuf_cache_above_hiwater())
+                       dbuf_evict_one();
+               cv_signal(&dbuf_evict_cv);
+       }
+}
+
+static int
+dbuf_kstat_update(kstat_t *ksp, int rw)
+{
+       dbuf_stats_t *ds = ksp->ks_data;
+
+       if (rw == KSTAT_WRITE) {
+               return (SET_ERROR(EACCES));
+       } else {
+               ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
+                   &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
+               ds->cache_size_bytes.value.ui64 =
+                   zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
+               ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
+               ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
+               ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
+               ds->hash_elements.value.ui64 = dbuf_hash_count;
+       }
+
+       return (0);
 }
 
 void
@@ -387,7 +805,7 @@ dbuf_init(void)
 
 retry:
        h->hash_table_mask = hsize - 1;
-#if defined(_KERNEL) && defined(HAVE_SPL)
+#if defined(_KERNEL)
        /*
         * Large allocations which do not require contiguous pages
         * should be using vmem_alloc() in the linux kernel
@@ -403,7 +821,7 @@ retry:
                goto retry;
        }
 
-       dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
+       dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
            sizeof (dmu_buf_impl_t),
            0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
 
@@ -412,11 +830,62 @@ retry:
 
        dbuf_stats_init(h);
 
+       /*
+        * Setup the parameters for the dbuf caches. We set the sizes of the
+        * dbuf cache and the metadata cache to 1/32nd and 1/16th (default)
+        * of the target size of the ARC. If the values has been specified as
+        * a module option and they're not greater than the target size of the
+        * ARC, then we honor that value.
+        */
+       if (dbuf_cache_max_bytes == 0 ||
+           dbuf_cache_max_bytes >= arc_target_bytes()) {
+               dbuf_cache_max_bytes = arc_target_bytes() >> dbuf_cache_shift;
+       }
+       if (dbuf_metadata_cache_max_bytes == 0 ||
+           dbuf_metadata_cache_max_bytes >= arc_target_bytes()) {
+               dbuf_metadata_cache_max_bytes =
+                   arc_target_bytes() >> dbuf_metadata_cache_shift;
+       }
+
        /*
         * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
         * configuration is not required.
         */
        dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
+
+       for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
+               dbuf_caches[dcs].cache =
+                   multilist_create(sizeof (dmu_buf_impl_t),
+                   offsetof(dmu_buf_impl_t, db_cache_link),
+                   dbuf_cache_multilist_index_func);
+               zfs_refcount_create(&dbuf_caches[dcs].size);
+       }
+
+       dbuf_evict_thread_exit = B_FALSE;
+       mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
+       cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
+       dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
+           NULL, 0, &p0, TS_RUN, minclsyspri);
+
+       dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
+           KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
+           KSTAT_FLAG_VIRTUAL);
+       if (dbuf_ksp != NULL) {
+               dbuf_ksp->ks_data = &dbuf_stats;
+               dbuf_ksp->ks_update = dbuf_kstat_update;
+               kstat_install(dbuf_ksp);
+
+               for (i = 0; i < DN_MAX_LEVELS; i++) {
+                       snprintf(dbuf_stats.cache_levels[i].name,
+                           KSTAT_STRLEN, "cache_level_%d", i);
+                       dbuf_stats.cache_levels[i].data_type =
+                           KSTAT_DATA_UINT64;
+                       snprintf(dbuf_stats.cache_levels_bytes[i].name,
+                           KSTAT_STRLEN, "cache_level_%d_bytes", i);
+                       dbuf_stats.cache_levels_bytes[i].data_type =
+                           KSTAT_DATA_UINT64;
+               }
+       }
 }
 
 void
@@ -429,7 +898,7 @@ dbuf_fini(void)
 
        for (i = 0; i < DBUF_MUTEXES; i++)
                mutex_destroy(&h->hash_mutexes[i]);
-#if defined(_KERNEL) && defined(HAVE_SPL)
+#if defined(_KERNEL)
        /*
         * Large allocations which do not require contiguous pages
         * should be using vmem_free() in the linux kernel
@@ -438,8 +907,29 @@ dbuf_fini(void)
 #else
        kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
 #endif
-       kmem_cache_destroy(dbuf_cache);
+       kmem_cache_destroy(dbuf_kmem_cache);
        taskq_destroy(dbu_evict_taskq);
+
+       mutex_enter(&dbuf_evict_lock);
+       dbuf_evict_thread_exit = B_TRUE;
+       while (dbuf_evict_thread_exit) {
+               cv_signal(&dbuf_evict_cv);
+               cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
+       }
+       mutex_exit(&dbuf_evict_lock);
+
+       mutex_destroy(&dbuf_evict_lock);
+       cv_destroy(&dbuf_evict_cv);
+
+       for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
+               zfs_refcount_destroy(&dbuf_caches[dcs].size);
+               multilist_destroy(dbuf_caches[dcs].cache);
+       }
+
+       if (dbuf_ksp != NULL) {
+               kstat_delete(dbuf_ksp);
+               dbuf_ksp = NULL;
+       }
 }
 
 /*
@@ -478,7 +968,6 @@ dbuf_verify(dmu_buf_impl_t *db)
                ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
        } else if (db->db_blkid == DMU_SPILL_BLKID) {
                ASSERT(dn != NULL);
-               ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
                ASSERT0(db->db.db_offset);
        } else {
                ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
@@ -519,7 +1008,7 @@ dbuf_verify(dmu_buf_impl_t *db)
                } else {
                        /* db is pointed to by an indirect block */
                        ASSERTV(int epb = db->db_parent->db.db_size >>
-                               SPA_BLKPTRSHIFT);
+                           SPA_BLKPTRSHIFT);
                        ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
                        ASSERT3U(db->db_parent->db.db_object, ==,
                            db->db.db_object);
@@ -543,13 +1032,49 @@ dbuf_verify(dmu_buf_impl_t *db)
                 * If the blkptr isn't set but they have nonzero data,
                 * it had better be dirty, otherwise we'll lose that
                 * data when we evict this buffer.
+                *
+                * There is an exception to this rule for indirect blocks; in
+                * this case, if the indirect block is a hole, we fill in a few
+                * fields on each of the child blocks (importantly, birth time)
+                * to prevent hole birth times from being lost when you
+                * partially fill in a hole.
                 */
                if (db->db_dirtycnt == 0) {
-                       ASSERTV(uint64_t *buf = db->db.db_data);
-                       int i;
+                       if (db->db_level == 0) {
+                               uint64_t *buf = db->db.db_data;
+                               int i;
 
-                       for (i = 0; i < db->db.db_size >> 3; i++) {
-                               ASSERT(buf[i] == 0);
+                               for (i = 0; i < db->db.db_size >> 3; i++) {
+                                       ASSERT(buf[i] == 0);
+                               }
+                       } else {
+                               blkptr_t *bps = db->db.db_data;
+                               ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
+                                   db->db.db_size);
+                               /*
+                                * We want to verify that all the blkptrs in the
+                                * indirect block are holes, but we may have
+                                * automatically set up a few fields for them.
+                                * We iterate through each blkptr and verify
+                                * they only have those fields set.
+                                */
+                               for (int i = 0;
+                                   i < db->db.db_size / sizeof (blkptr_t);
+                                   i++) {
+                                       blkptr_t *bp = &bps[i];
+                                       ASSERT(ZIO_CHECKSUM_IS_ZERO(
+                                           &bp->blk_cksum));
+                                       ASSERT(
+                                           DVA_IS_EMPTY(&bp->blk_dva[0]) &&
+                                           DVA_IS_EMPTY(&bp->blk_dva[1]) &&
+                                           DVA_IS_EMPTY(&bp->blk_dva[2]));
+                                       ASSERT0(bp->blk_fill);
+                                       ASSERT0(bp->blk_pad[0]);
+                                       ASSERT0(bp->blk_pad[1]);
+                                       ASSERT(!BP_IS_EMBEDDED(bp));
+                                       ASSERT(BP_IS_HOLE(bp));
+                                       ASSERT0(bp->blk_phys_birth);
+                               }
                        }
                }
        }
@@ -562,7 +1087,7 @@ dbuf_clear_data(dmu_buf_impl_t *db)
 {
        ASSERT(MUTEX_HELD(&db->db_mtx));
        dbuf_evict_user(db);
-       db->db_buf = NULL;
+       ASSERT3P(db->db_buf, ==, NULL);
        db->db.db_data = NULL;
        if (db->db_state != DB_NOFILL)
                db->db_state = DB_UNCACHED;
@@ -577,8 +1102,6 @@ dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
        db->db_buf = buf;
        ASSERT(buf->b_data != NULL);
        db->db.db_data = buf->b_data;
-       if (!arc_released(buf))
-               arc_set_callback(buf, dbuf_do_evict, db);
 }
 
 /*
@@ -589,17 +1112,19 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
 {
        arc_buf_t *abuf;
 
+       ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        mutex_enter(&db->db_mtx);
-       if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
+       if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
                int blksz = db->db.db_size;
                spa_t *spa = db->db_objset->os_spa;
 
                mutex_exit(&db->db_mtx);
-               abuf = arc_loan_buf(spa, blksz);
+               abuf = arc_loan_buf(spa, B_FALSE, blksz);
                bcopy(db->db.db_data, abuf->b_data, blksz);
        } else {
                abuf = db->db_buf;
                arc_loan_inuse_buf(abuf, db);
+               db->db_buf = NULL;
                dbuf_clear_data(db);
                mutex_exit(&db->db_mtx);
        }
@@ -611,7 +1136,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
  * provided.
  */
 uint64_t
-dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
+dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
 {
        if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
                /*
@@ -627,14 +1152,25 @@ dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
                 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
                 *
                 * Thus, the level n blkid is: offset /
-                * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT)))
+                * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
                 * = offset / 2^(datablkshift + level *
                 *   (indblkshift - SPA_BLKPTRSHIFT))
                 * = offset >> (datablkshift + level *
                 *   (indblkshift - SPA_BLKPTRSHIFT))
                 */
-               return (offset >> (dn->dn_datablkshift + level *
-                   (dn->dn_indblkshift - SPA_BLKPTRSHIFT)));
+
+               const unsigned exp = dn->dn_datablkshift +
+                   level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
+
+               if (exp >= 8 * sizeof (offset)) {
+                       /* This only happens on the highest indirection level */
+                       ASSERT3U(level, ==, dn->dn_nlevels - 1);
+                       return (0);
+               }
+
+               ASSERT3U(exp, <, 8 * sizeof (offset));
+
+               return (offset >> exp);
        } else {
                ASSERT3U(offset, <, dn->dn_datablksz);
                return (0);
@@ -642,7 +1178,8 @@ dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
 }
 
 static void
-dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
+dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
+    arc_buf_t *buf, void *vdb)
 {
        dmu_buf_impl_t *db = vdb;
 
@@ -651,28 +1188,89 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
        /*
         * All reads are synchronous, so we must have a hold on the dbuf
         */
-       ASSERT(refcount_count(&db->db_holds) > 0);
+       ASSERT(zfs_refcount_count(&db->db_holds) > 0);
        ASSERT(db->db_buf == NULL);
        ASSERT(db->db.db_data == NULL);
-       if (db->db_level == 0 && db->db_freed_in_flight) {
-               /* we were freed in flight; disregard any error */
+       if (buf == NULL) {
+               /* i/o error */
+               ASSERT(zio == NULL || zio->io_error != 0);
+               ASSERT(db->db_blkid != DMU_BONUS_BLKID);
+               ASSERT3P(db->db_buf, ==, NULL);
+               db->db_state = DB_UNCACHED;
+       } else if (db->db_level == 0 && db->db_freed_in_flight) {
+               /* freed in flight */
+               ASSERT(zio == NULL || zio->io_error == 0);
                arc_release(buf, db);
                bzero(buf->b_data, db->db.db_size);
                arc_buf_freeze(buf);
                db->db_freed_in_flight = FALSE;
                dbuf_set_data(db, buf);
                db->db_state = DB_CACHED;
-       } else if (zio == NULL || zio->io_error == 0) {
+       } else {
+               /* success */
+               ASSERT(zio == NULL || zio->io_error == 0);
                dbuf_set_data(db, buf);
                db->db_state = DB_CACHED;
-       } else {
-               ASSERT(db->db_blkid != DMU_BONUS_BLKID);
-               ASSERT3P(db->db_buf, ==, NULL);
-               VERIFY(arc_buf_remove_ref(buf, db));
-               db->db_state = DB_UNCACHED;
        }
        cv_broadcast(&db->db_changed);
-       dbuf_rele_and_unlock(db, NULL);
+       dbuf_rele_and_unlock(db, NULL, B_FALSE);
+}
+
+
+/*
+ * This function ensures that, when doing a decrypting read of a block,
+ * we make sure we have decrypted the dnode associated with it. We must do
+ * this so that we ensure we are fully authenticating the checksum-of-MACs
+ * tree from the root of the objset down to this block. Indirect blocks are
+ * always verified against their secure checksum-of-MACs assuming that the
+ * dnode containing them is correct. Now that we are doing a decrypting read,
+ * we can be sure that the key is loaded and verify that assumption. This is
+ * especially important considering that we always read encrypted dnode
+ * blocks as raw data (without verifying their MACs) to start, and
+ * decrypt / authenticate them when we need to read an encrypted bonus buffer.
+ */
+static int
+dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
+{
+       int err = 0;
+       objset_t *os = db->db_objset;
+       arc_buf_t *dnode_abuf;
+       dnode_t *dn;
+       zbookmark_phys_t zb;
+
+       ASSERT(MUTEX_HELD(&db->db_mtx));
+
+       if (!os->os_encrypted || os->os_raw_receive ||
+           (flags & DB_RF_NO_DECRYPT) != 0)
+               return (0);
+
+       DB_DNODE_ENTER(db);
+       dn = DB_DNODE(db);
+       dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
+
+       if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
+               DB_DNODE_EXIT(db);
+               return (0);
+       }
+
+       SET_BOOKMARK(&zb, dmu_objset_id(os),
+           DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
+       err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
+
+       /*
+        * An error code of EACCES tells us that the key is still not
+        * available. This is ok if we are only reading authenticated
+        * (and therefore non-encrypted) blocks.
+        */
+       if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
+           !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
+           (db->db_blkid == DMU_BONUS_BLKID &&
+           !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
+               err = 0;
+
+       DB_DNODE_EXIT(db);
+
+       return (err);
 }
 
 static int
@@ -681,11 +1279,11 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
        dnode_t *dn;
        zbookmark_phys_t zb;
        uint32_t aflags = ARC_FLAG_NOWAIT;
-       int err;
+       int err, zio_flags = 0;
 
        DB_DNODE_ENTER(db);
        dn = DB_DNODE(db);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        /* We need the struct_rwlock to prevent db_blkptr from changing. */
        ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
        ASSERT(MUTEX_HELD(&db->db_mtx));
@@ -693,13 +1291,26 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
        ASSERT(db->db_buf == NULL);
 
        if (db->db_blkid == DMU_BONUS_BLKID) {
+               /*
+                * The bonus length stored in the dnode may be less than
+                * the maximum available space in the bonus buffer.
+                */
                int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
+               int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
+
+               /* if the underlying dnode block is encrypted, decrypt it */
+               err = dbuf_read_verify_dnode_crypt(db, flags);
+               if (err != 0) {
+                       DB_DNODE_EXIT(db);
+                       mutex_exit(&db->db_mtx);
+                       return (err);
+               }
 
                ASSERT3U(bonuslen, <=, db->db.db_size);
-               db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
-               arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
-               if (bonuslen < DN_MAX_BONUSLEN)
-                       bzero(db->db.db_data, DN_MAX_BONUSLEN);
+               db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
+               arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
+               if (bonuslen < max_bonuslen)
+                       bzero(db->db.db_data, max_bonuslen);
                if (bonuslen)
                        bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
                DB_DNODE_EXIT(db);
@@ -718,15 +1329,60 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
            BP_IS_HOLE(db->db_blkptr)))) {
                arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
 
-               DB_DNODE_EXIT(db);
-               dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
-                   db->db.db_size, db, type));
+               dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type,
+                   db->db.db_size));
                bzero(db->db.db_data, db->db.db_size);
+
+               if (db->db_blkptr != NULL && db->db_level > 0 &&
+                   BP_IS_HOLE(db->db_blkptr) &&
+                   db->db_blkptr->blk_birth != 0) {
+                       blkptr_t *bps = db->db.db_data;
+                       for (int i = 0; i < ((1 <<
+                           DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t));
+                           i++) {
+                               blkptr_t *bp = &bps[i];
+                               ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
+                                   1 << dn->dn_indblkshift);
+                               BP_SET_LSIZE(bp,
+                                   BP_GET_LEVEL(db->db_blkptr) == 1 ?
+                                   dn->dn_datablksz :
+                                   BP_GET_LSIZE(db->db_blkptr));
+                               BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
+                               BP_SET_LEVEL(bp,
+                                   BP_GET_LEVEL(db->db_blkptr) - 1);
+                               BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
+                       }
+               }
+               DB_DNODE_EXIT(db);
                db->db_state = DB_CACHED;
                mutex_exit(&db->db_mtx);
                return (0);
        }
 
+
+       SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
+           db->db.db_object, db->db_level, db->db_blkid);
+
+       /*
+        * All bps of an encrypted os should have the encryption bit set.
+        * If this is not true it indicates tampering and we report an error.
+        */
+       if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
+               spa_log_error(db->db_objset->os_spa, &zb);
+               zfs_panic_recover("unencrypted block in encrypted "
+                   "object set %llu", dmu_objset_id(db->db_objset));
+               DB_DNODE_EXIT(db);
+               mutex_exit(&db->db_mtx);
+               return (SET_ERROR(EIO));
+       }
+
+       err = dbuf_read_verify_dnode_crypt(db, flags);
+       if (err != 0) {
+               DB_DNODE_EXIT(db);
+               mutex_exit(&db->db_mtx);
+               return (err);
+       }
+
        DB_DNODE_EXIT(db);
 
        db->db_state = DB_READ;
@@ -734,28 +1390,102 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
 
        if (DBUF_IS_L2CACHEABLE(db))
                aflags |= ARC_FLAG_L2CACHE;
-       if (DBUF_IS_L2COMPRESSIBLE(db))
-               aflags |= ARC_FLAG_L2COMPRESS;
-
-       SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
-           db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
-           db->db.db_object, db->db_level, db->db_blkid);
 
        dbuf_add_ref(db, NULL);
 
+       zio_flags = (flags & DB_RF_CANFAIL) ?
+           ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
+
+       if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
+               zio_flags |= ZIO_FLAG_RAW;
+
        err = arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
-           dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
-           (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
+           dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
            &aflags, &zb);
 
-       return (SET_ERROR(err));
+       return (err);
+}
+
+/*
+ * This is our just-in-time copy function.  It makes a copy of buffers that
+ * have been modified in a previous transaction group before we access them in
+ * the current active group.
+ *
+ * This function is used in three places: when we are dirtying a buffer for the
+ * first time in a txg, when we are freeing a range in a dnode that includes
+ * this buffer, and when we are accessing a buffer which was received compressed
+ * and later referenced in a WRITE_BYREF record.
+ *
+ * Note that when we are called from dbuf_free_range() we do not put a hold on
+ * the buffer, we just traverse the active dbuf list for the dnode.
+ */
+static void
+dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
+{
+       dbuf_dirty_record_t *dr = db->db_last_dirty;
+
+       ASSERT(MUTEX_HELD(&db->db_mtx));
+       ASSERT(db->db.db_data != NULL);
+       ASSERT(db->db_level == 0);
+       ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
+
+       if (dr == NULL ||
+           (dr->dt.dl.dr_data !=
+           ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
+               return;
+
+       /*
+        * If the last dirty record for this dbuf has not yet synced
+        * and its referencing the dbuf data, either:
+        *      reset the reference to point to a new copy,
+        * or (if there a no active holders)
+        *      just null out the current db_data pointer.
+        */
+       ASSERT3U(dr->dr_txg, >=, txg - 2);
+       if (db->db_blkid == DMU_BONUS_BLKID) {
+               dnode_t *dn = DB_DNODE(db);
+               int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
+               dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
+               arc_space_consume(bonuslen, ARC_SPACE_BONUS);
+               bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
+       } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
+               dnode_t *dn = DB_DNODE(db);
+               int size = arc_buf_size(db->db_buf);
+               arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
+               spa_t *spa = db->db_objset->os_spa;
+               enum zio_compress compress_type =
+                   arc_get_compression(db->db_buf);
+
+               if (arc_is_encrypted(db->db_buf)) {
+                       boolean_t byteorder;
+                       uint8_t salt[ZIO_DATA_SALT_LEN];
+                       uint8_t iv[ZIO_DATA_IV_LEN];
+                       uint8_t mac[ZIO_DATA_MAC_LEN];
+
+                       arc_get_raw_params(db->db_buf, &byteorder, salt,
+                           iv, mac);
+                       dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
+                           dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
+                           mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
+                           compress_type);
+               } else if (compress_type != ZIO_COMPRESS_OFF) {
+                       ASSERT3U(type, ==, ARC_BUFC_DATA);
+                       dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
+                           size, arc_buf_lsize(db->db_buf), compress_type);
+               } else {
+                       dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
+               }
+               bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
+       } else {
+               db->db_buf = NULL;
+               dbuf_clear_data(db);
+       }
 }
 
 int
 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
 {
        int err = 0;
-       boolean_t havepzio = (zio != NULL);
        boolean_t prefetch;
        dnode_t *dn;
 
@@ -763,7 +1493,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
         * We don't have to hold the mutex to check db_state because it
         * can't be freed while we have a hold on the buffer.
         */
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
 
        if (db->db_state == DB_NOFILL)
                return (SET_ERROR(EIO));
@@ -779,31 +1509,73 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
 
        mutex_enter(&db->db_mtx);
        if (db->db_state == DB_CACHED) {
+               spa_t *spa = dn->dn_objset->os_spa;
+
+               /*
+                * Ensure that this block's dnode has been decrypted if
+                * the caller has requested decrypted data.
+                */
+               err = dbuf_read_verify_dnode_crypt(db, flags);
+
+               /*
+                * If the arc buf is compressed or encrypted and the caller
+                * requested uncompressed data, we need to untransform it
+                * before returning. We also call arc_untransform() on any
+                * unauthenticated blocks, which will verify their MAC if
+                * the key is now available.
+                */
+               if (err == 0 && db->db_buf != NULL &&
+                   (flags & DB_RF_NO_DECRYPT) == 0 &&
+                   (arc_is_encrypted(db->db_buf) ||
+                   arc_is_unauthenticated(db->db_buf) ||
+                   arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
+                       zbookmark_phys_t zb;
+
+                       SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
+                           db->db.db_object, db->db_level, db->db_blkid);
+                       dbuf_fix_old_data(db, spa_syncing_txg(spa));
+                       err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
+                       dbuf_set_data(db, db->db_buf);
+               }
                mutex_exit(&db->db_mtx);
-               if (prefetch)
-                       dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
+               if (err == 0 && prefetch)
+                       dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
                if ((flags & DB_RF_HAVESTRUCT) == 0)
                        rw_exit(&dn->dn_struct_rwlock);
                DB_DNODE_EXIT(db);
+               DBUF_STAT_BUMP(hash_hits);
        } else if (db->db_state == DB_UNCACHED) {
                spa_t *spa = dn->dn_objset->os_spa;
+               boolean_t need_wait = B_FALSE;
 
-               if (zio == NULL)
+               if (zio == NULL &&
+                   db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
                        zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
-
+                       need_wait = B_TRUE;
+               }
                err = dbuf_read_impl(db, zio, flags);
 
                /* dbuf_read_impl has dropped db_mtx for us */
 
                if (!err && prefetch)
-                       dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
+                       dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
 
                if ((flags & DB_RF_HAVESTRUCT) == 0)
                        rw_exit(&dn->dn_struct_rwlock);
                DB_DNODE_EXIT(db);
+               DBUF_STAT_BUMP(hash_misses);
 
-               if (!err && !havepzio)
-                       err = zio_wait(zio);
+               /*
+                * If we created a zio_root we must execute it to avoid
+                * leaking it, even if it isn't attached to any work due
+                * to an error in dbuf_read_impl().
+                */
+               if (need_wait) {
+                       if (err == 0)
+                               err = zio_wait(zio);
+                       else
+                               VERIFY0(zio_wait(zio));
+               }
        } else {
                /*
                 * Another reader came in while the dbuf was in flight
@@ -815,10 +1587,11 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
                 */
                mutex_exit(&db->db_mtx);
                if (prefetch)
-                       dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
+                       dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
                if ((flags & DB_RF_HAVESTRUCT) == 0)
                        rw_exit(&dn->dn_struct_rwlock);
                DB_DNODE_EXIT(db);
+               DBUF_STAT_BUMP(hash_misses);
 
                /* Skip the wait per the caller's request. */
                mutex_enter(&db->db_mtx);
@@ -837,14 +1610,13 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
                mutex_exit(&db->db_mtx);
        }
 
-       ASSERT(err || havepzio || db->db_state == DB_CACHED);
        return (err);
 }
 
 static void
 dbuf_noread(dmu_buf_impl_t *db)
 {
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        mutex_enter(&db->db_mtx);
        while (db->db_state == DB_READ || db->db_state == DB_FILL)
@@ -855,7 +1627,7 @@ dbuf_noread(dmu_buf_impl_t *db)
 
                ASSERT(db->db_buf == NULL);
                ASSERT(db->db.db_data == NULL);
-               dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
+               dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size));
                db->db_state = DB_FILL;
        } else if (db->db_state == DB_NOFILL) {
                dbuf_clear_data(db);
@@ -865,59 +1637,6 @@ dbuf_noread(dmu_buf_impl_t *db)
        mutex_exit(&db->db_mtx);
 }
 
-/*
- * This is our just-in-time copy function.  It makes a copy of
- * buffers, that have been modified in a previous transaction
- * group, before we modify them in the current active group.
- *
- * This function is used in two places: when we are dirtying a
- * buffer for the first time in a txg, and when we are freeing
- * a range in a dnode that includes this buffer.
- *
- * Note that when we are called from dbuf_free_range() we do
- * not put a hold on the buffer, we just traverse the active
- * dbuf list for the dnode.
- */
-static void
-dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
-{
-       dbuf_dirty_record_t *dr = db->db_last_dirty;
-
-       ASSERT(MUTEX_HELD(&db->db_mtx));
-       ASSERT(db->db.db_data != NULL);
-       ASSERT(db->db_level == 0);
-       ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
-
-       if (dr == NULL ||
-           (dr->dt.dl.dr_data !=
-           ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
-               return;
-
-       /*
-        * If the last dirty record for this dbuf has not yet synced
-        * and its referencing the dbuf data, either:
-        *      reset the reference to point to a new copy,
-        * or (if there a no active holders)
-        *      just null out the current db_data pointer.
-        */
-       ASSERT(dr->dr_txg >= txg - 2);
-       if (db->db_blkid == DMU_BONUS_BLKID) {
-               /* Note that the data bufs here are zio_bufs */
-               dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
-               arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
-               bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
-       } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
-               int size = db->db.db_size;
-               arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
-               spa_t *spa = db->db_objset->os_spa;
-
-               dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
-               bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
-       } else {
-               dbuf_clear_data(db);
-       }
-}
-
 void
 dbuf_unoverride(dbuf_dirty_record_t *dr)
 {
@@ -926,6 +1645,11 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
        uint64_t txg = dr->dr_txg;
 
        ASSERT(MUTEX_HELD(&db->db_mtx));
+       /*
+        * This assert is valid because dmu_sync() expects to be called by
+        * a zilog's get_data while holding a range lock.  This call only
+        * comes from dbuf_dirty() callers who must also hold a range lock.
+        */
        ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
        ASSERT(db->db_level == 0);
 
@@ -941,6 +1665,7 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
 
        dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
        dr->dt.dl.dr_nopwrite = B_FALSE;
+       dr->dt.dl.dr_has_raw_params = B_FALSE;
 
        /*
         * Release the already-written buffer, so we leave it in
@@ -957,9 +1682,6 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
  * Evict (if its unreferenced) or clear (if its referenced) any level-0
  * data blocks in the free range, so that any future readers will find
  * empty blocks.
- *
- * This is a no-op if the dataset is in the middle of an incremental
- * receive; see comment below for details.
  */
 void
 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
@@ -969,10 +1691,9 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
        dmu_buf_impl_t *db, *db_next;
        uint64_t txg = tx->tx_txg;
        avl_index_t where;
-       boolean_t freespill =
-           (start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID);
 
-       if (end_blkid > dn->dn_maxblkid && !freespill)
+       if (end_blkid > dn->dn_maxblkid &&
+           !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
                end_blkid = dn->dn_maxblkid;
        dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
 
@@ -982,28 +1703,9 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
        db_search->db_state = DB_SEARCH;
 
        mutex_enter(&dn->dn_dbufs_mtx);
-       if (start_blkid >= dn->dn_unlisted_l0_blkid && !freespill) {
-               /* There can't be any dbufs in this range; no need to search. */
-#ifdef DEBUG
-               db = avl_find(&dn->dn_dbufs, db_search, &where);
-               ASSERT3P(db, ==, NULL);
-               db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
-               ASSERT(db == NULL || db->db_level > 0);
-#endif
-               goto out;
-       } else if (dmu_objset_is_receiving(dn->dn_objset)) {
-               /*
-                * If we are receiving, we expect there to be no dbufs in
-                * the range to be freed, because receive modifies each
-                * block at most once, and in offset order.  If this is
-                * not the case, it can lead to performance problems,
-                * so note that we unexpectedly took the slow path.
-                */
-               atomic_inc_64(&zfs_free_range_recv_miss);
-       }
-
        db = avl_find(&dn->dn_dbufs, db_search, &where);
        ASSERT3P(db, ==, NULL);
+
        db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
 
        for (; db != NULL; db = db_next) {
@@ -1035,9 +1737,9 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
                        mutex_exit(&db->db_mtx);
                        continue;
                }
-               if (refcount_count(&db->db_holds) == 0) {
+               if (zfs_refcount_count(&db->db_holds) == 0) {
                        ASSERT(db->db_buf);
-                       dbuf_clear(db);
+                       dbuf_destroy(db);
                        continue;
                }
                /* The dbuf is referenced */
@@ -1076,46 +1778,10 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
                mutex_exit(&db->db_mtx);
        }
 
-out:
        kmem_free(db_search, sizeof (dmu_buf_impl_t));
        mutex_exit(&dn->dn_dbufs_mtx);
 }
 
-static int
-dbuf_block_freeable(dmu_buf_impl_t *db)
-{
-       dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
-       uint64_t birth_txg = 0;
-
-       /*
-        * We don't need any locking to protect db_blkptr:
-        * If it's syncing, then db_last_dirty will be set
-        * so we'll ignore db_blkptr.
-        *
-        * This logic ensures that only block births for
-        * filled blocks are considered.
-        */
-       ASSERT(MUTEX_HELD(&db->db_mtx));
-       if (db->db_last_dirty && (db->db_blkptr == NULL ||
-           !BP_IS_HOLE(db->db_blkptr))) {
-               birth_txg = db->db_last_dirty->dr_txg;
-       } else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
-               birth_txg = db->db_blkptr->blk_birth;
-       }
-
-       /*
-        * If this block don't exist or is in a snapshot, it can't be freed.
-        * Don't pass the bp to dsl_dataset_block_freeable() since we
-        * are holding the db_mtx lock and might deadlock if we are
-        * prefetching a dedup-ed block.
-        */
-       if (birth_txg != 0)
-               return (ds == NULL ||
-                   dsl_dataset_block_freeable(ds, NULL, birth_txg));
-       else
-               return (B_FALSE);
-}
-
 void
 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
 {
@@ -1145,7 +1811,7 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
        dmu_buf_will_dirty(&db->db, tx);
 
        /* create the data buffer for the new block */
-       buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
+       buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
 
        /* copy old block data to the new block */
        obuf = db->db_buf;
@@ -1156,7 +1822,7 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
 
        mutex_enter(&db->db_mtx);
        dbuf_set_data(db, buf);
-       VERIFY(arc_buf_remove_ref(obuf, db));
+       arc_buf_destroy(obuf, db);
        db->db.db_size = size;
 
        if (db->db_level == 0) {
@@ -1165,7 +1831,7 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
        }
        mutex_exit(&db->db_mtx);
 
-       dnode_willuse_space(dn, size-osize, tx);
+       dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
        DB_DNODE_EXIT(db);
 }
 
@@ -1215,11 +1881,10 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
        objset_t *os;
        dbuf_dirty_record_t **drp, *dr;
        int drop_struct_lock = FALSE;
-       boolean_t do_free_accounting = B_FALSE;
        int txgoff = tx->tx_txg & TXG_MASK;
 
        ASSERT(tx->tx_txg != 0);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        DMU_TX_DIRTY_BUF(tx, db);
 
        DB_DNODE_ENTER(db);
@@ -1229,10 +1894,18 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
         * objects may be dirtied in syncing context, but only if they
         * were already pre-dirtied in open context.
         */
+#ifdef DEBUG
+       if (dn->dn_objset->os_dsl_dataset != NULL) {
+               rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
+                   RW_READER, FTAG);
+       }
        ASSERT(!dmu_tx_is_syncing(tx) ||
            BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
            DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
            dn->dn_objset->os_dsl_dataset == NULL);
+       if (dn->dn_objset->os_dsl_dataset != NULL)
+               rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
+#endif
        /*
         * We make this assert for private objects as well, but after we
         * check if we're already dirty.  They are allowed to re-dirty
@@ -1257,13 +1930,25 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
         * Don't set dirtyctx to SYNC if we're just modifying this as we
         * initialize the objset.
         */
-       if (dn->dn_dirtyctx == DN_UNDIRTIED &&
-           !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
-               dn->dn_dirtyctx =
-                   (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
-               ASSERT(dn->dn_dirtyctx_firstset == NULL);
-               dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
+       if (dn->dn_dirtyctx == DN_UNDIRTIED) {
+               if (dn->dn_objset->os_dsl_dataset != NULL) {
+                       rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
+                           RW_READER, FTAG);
+               }
+               if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
+                       dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ?
+                           DN_DIRTY_SYNC : DN_DIRTY_OPEN);
+                       ASSERT(dn->dn_dirtyctx_firstset == NULL);
+                       dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
+               }
+               if (dn->dn_objset->os_dsl_dataset != NULL) {
+                       rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
+                           FTAG);
+               }
        }
+
+       if (tx->tx_txg > dn->dn_dirty_txg)
+               dn->dn_dirty_txg = tx->tx_txg;
        mutex_exit(&dn->dn_mtx);
 
        if (db->db_blkid == DMU_SPILL_BLKID)
@@ -1293,11 +1978,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
            (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
 
        ASSERT3U(dn->dn_nlevels, >, db->db_level);
-       ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
-           dn->dn_phys->dn_nlevels > db->db_level ||
-           dn->dn_next_nlevels[txgoff] > db->db_level ||
-           dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
-           dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
 
        /*
         * We should only be dirtying in syncing context if it's the
@@ -1307,22 +1987,21 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
         * this assertion only if we're not already dirty.
         */
        os = dn->dn_objset;
+       VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
+#ifdef DEBUG
+       if (dn->dn_objset->os_dsl_dataset != NULL)
+               rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
        ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
            os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
+       if (dn->dn_objset->os_dsl_dataset != NULL)
+               rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
+#endif
        ASSERT(db->db.db_size != 0);
 
        dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
 
        if (db->db_blkid != DMU_BONUS_BLKID) {
-               /*
-                * Update the accounting.
-                * Note: we delay "free accounting" until after we drop
-                * the db_mtx.  This keeps us from grabbing other locks
-                * (and possibly deadlocking) in bp_get_dsize() while
-                * also holding the db_mtx.
-                */
-               dnode_willuse_space(dn, db->db.db_size, tx);
-               do_free_accounting = dbuf_block_freeable(db);
+               dmu_objset_willuse_space(os, db->db.db_size, tx);
        }
 
        /*
@@ -1403,29 +2082,42 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
                dnode_setdirty(dn, tx);
                DB_DNODE_EXIT(db);
                return (dr);
-       } else if (do_free_accounting) {
-               blkptr_t *bp = db->db_blkptr;
-               int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
-                   bp_get_dsize(os->os_spa, bp) : db->db.db_size;
-               /*
-                * This is only a guess -- if the dbuf is dirty
-                * in a previous txg, we don't know how much
-                * space it will use on disk yet.  We should
-                * really have the struct_rwlock to access
-                * db_blkptr, but since this is just a guess,
-                * it's OK if we get an odd answer.
-                */
-               ddt_prefetch(os->os_spa, bp);
-               dnode_willuse_space(dn, -willfree, tx);
        }
 
+       /*
+        * The dn_struct_rwlock prevents db_blkptr from changing
+        * due to a write from syncing context completing
+        * while we are running, so we want to acquire it before
+        * looking at db_blkptr.
+        */
        if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
                rw_enter(&dn->dn_struct_rwlock, RW_READER);
                drop_struct_lock = TRUE;
        }
 
+       /*
+        * We need to hold the dn_struct_rwlock to make this assertion,
+        * because it protects dn_phys / dn_next_nlevels from changing.
+        */
+       ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
+           dn->dn_phys->dn_nlevels > db->db_level ||
+           dn->dn_next_nlevels[txgoff] > db->db_level ||
+           dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
+           dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
+
+       /*
+        * If we are overwriting a dedup BP, then unless it is snapshotted,
+        * when we get to syncing context we will need to decrement its
+        * refcount in the DDT.  Prefetch the relevant DDT block so that
+        * syncing context won't have to wait for the i/o.
+        */
+       ddt_prefetch(os->os_spa, db->db_blkptr);
+
        if (db->db_level == 0) {
-               dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
+               ASSERT(!db->db_objset->os_raw_receive ||
+                   dn->dn_maxblkid >= db->db_blkid);
+               dnode_new_blkid(dn, db->db_blkid, tx,
+                   drop_struct_lock, B_FALSE);
                ASSERT(dn->dn_maxblkid >= db->db_blkid);
        }
 
@@ -1555,7 +2247,7 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
                ASSERT(db->db_buf != NULL);
                ASSERT(dr->dt.dl.dr_data != NULL);
                if (dr->dt.dl.dr_data != db->db_buf)
-                       VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
+                       arc_buf_destroy(dr->dt.dl.dr_data, db);
        }
 
        kmem_free(dr, sizeof (dbuf_dirty_record_t));
@@ -1563,28 +2255,22 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
        ASSERT(db->db_dirtycnt > 0);
        db->db_dirtycnt -= 1;
 
-       if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
-               arc_buf_t *buf = db->db_buf;
-
-               ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
-               dbuf_clear_data(db);
-               VERIFY(arc_buf_remove_ref(buf, db));
-               dbuf_evict(db);
+       if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
+               ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
+               dbuf_destroy(db);
                return (B_TRUE);
        }
 
        return (B_FALSE);
 }
 
-void
-dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
+static void
+dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
 {
        dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
-       int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
-       dbuf_dirty_record_t *dr;
 
        ASSERT(tx->tx_txg != 0);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
 
        /*
         * Quick check for dirtyness.  For already dirty blocks, this
@@ -1594,6 +2280,7 @@ dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
         */
        mutex_enter(&db->db_mtx);
 
+       dbuf_dirty_record_t *dr;
        for (dr = db->db_last_dirty;
            dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
                /*
@@ -1612,12 +2299,36 @@ dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
 
        DB_DNODE_ENTER(db);
        if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
-               rf |= DB_RF_HAVESTRUCT;
+               flags |= DB_RF_HAVESTRUCT;
        DB_DNODE_EXIT(db);
-       (void) dbuf_read(db, NULL, rf);
+       (void) dbuf_read(db, NULL, flags);
        (void) dbuf_dirty(db, tx);
 }
 
+void
+dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
+{
+       dmu_buf_will_dirty_impl(db_fake,
+           DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
+}
+
+boolean_t
+dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
+{
+       dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+
+       mutex_enter(&db->db_mtx);
+       for (dbuf_dirty_record_t *dr = db->db_last_dirty;
+           dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
+               if (dr->dr_txg == tx->tx_txg) {
+                       mutex_exit(&db->db_mtx);
+                       return (B_TRUE);
+               }
+       }
+       mutex_exit(&db->db_mtx);
+       return (B_FALSE);
+}
+
 void
 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
 {
@@ -1636,7 +2347,7 @@ dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        ASSERT(tx->tx_txg != 0);
        ASSERT(db->db_level == 0);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
 
        ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
            dmu_tx_private_ok(tx));
@@ -1645,6 +2356,45 @@ dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
        (void) dbuf_dirty(db, tx);
 }
 
+/*
+ * This function is effectively the same as dmu_buf_will_dirty(), but
+ * indicates the caller expects raw encrypted data in the db, and provides
+ * the crypt params (byteorder, salt, iv, mac) which should be stored in the
+ * blkptr_t when this dbuf is written.  This is only used for blocks of
+ * dnodes, during raw receive.
+ */
+void
+dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
+    const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
+{
+       dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+       dbuf_dirty_record_t *dr;
+
+       /*
+        * dr_has_raw_params is only processed for blocks of dnodes
+        * (see dbuf_sync_dnode_leaf_crypt()).
+        */
+       ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
+       ASSERT3U(db->db_level, ==, 0);
+       ASSERT(db->db_objset->os_raw_receive);
+
+       dmu_buf_will_dirty_impl(db_fake,
+           DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
+
+       dr = db->db_last_dirty;
+       while (dr != NULL && dr->dr_txg > tx->tx_txg)
+               dr = dr->dr_next;
+
+       ASSERT3P(dr, !=, NULL);
+       ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
+
+       dr->dt.dl.dr_has_raw_params = B_TRUE;
+       dr->dt.dl.dr_byteorder = byteorder;
+       bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
+       bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
+       bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
+}
+
 #pragma weak dmu_buf_fill_done = dbuf_fill_done
 /* ARGSUSED */
 void
@@ -1711,12 +2461,12 @@ dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
 void
 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
 {
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        ASSERT(db->db_level == 0);
-       ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
+       ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
        ASSERT(buf != NULL);
-       ASSERT(arc_buf_size(buf) == db->db.db_size);
+       ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
        ASSERT(tx->tx_txg != 0);
 
        arc_return_buf(buf, db);
@@ -1730,11 +2480,18 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
        ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
 
        if (db->db_state == DB_CACHED &&
-           refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
+           zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
+               /*
+                * In practice, we will never have a case where we have an
+                * encrypted arc buffer while additional holds exist on the
+                * dbuf. We don't handle this here so we simply assert that
+                * fact instead.
+                */
+               ASSERT(!arc_is_encrypted(buf));
                mutex_exit(&db->db_mtx);
                (void) dbuf_dirty(db, tx);
                bcopy(buf->b_data, db->db.db_data, db->db.db_size);
-               VERIFY(arc_buf_remove_ref(buf, db));
+               arc_buf_destroy(buf, db);
                xuio_stat_wbuf_copied();
                return;
        }
@@ -1746,16 +2503,17 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
                ASSERT(db->db_buf != NULL);
                if (dr != NULL && dr->dr_txg == tx->tx_txg) {
                        ASSERT(dr->dt.dl.dr_data == db->db_buf);
+
                        if (!arc_released(db->db_buf)) {
                                ASSERT(dr->dt.dl.dr_override_state ==
                                    DR_OVERRIDDEN);
                                arc_release(db->db_buf, db);
                        }
                        dr->dt.dl.dr_data = buf;
-                       VERIFY(arc_buf_remove_ref(db->db_buf, db));
+                       arc_buf_destroy(db->db_buf, db);
                } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
                        arc_release(db->db_buf, db);
-                       VERIFY(arc_buf_remove_ref(db->db_buf, db));
+                       arc_buf_destroy(db->db_buf, db);
                }
                db->db_buf = NULL;
        }
@@ -1767,43 +2525,51 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
        dmu_buf_fill_done(&db->db, tx);
 }
 
-/*
- * "Clear" the contents of this dbuf.  This will mark the dbuf
- * EVICTING and clear *most* of its references.  Unfortunately,
- * when we are not holding the dn_dbufs_mtx, we can't clear the
- * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
- * in this case.  For callers from the DMU we will usually see:
- *     dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy()
- * For the arc callback, we will usually see:
- *     dbuf_do_evict()->dbuf_clear();dbuf_destroy()
- * Sometimes, though, we will get a mix of these two:
- *     DMU: dbuf_clear()->arc_clear_callback()
- *     ARC: dbuf_do_evict()->dbuf_destroy()
- *
- * This routine will dissociate the dbuf from the arc, by calling
- * arc_clear_callback(), but will not evict the data from the ARC.
- */
 void
-dbuf_clear(dmu_buf_impl_t *db)
+dbuf_destroy(dmu_buf_impl_t *db)
 {
        dnode_t *dn;
        dmu_buf_impl_t *parent = db->db_parent;
        dmu_buf_impl_t *dndb;
-       boolean_t dbuf_gone = B_FALSE;
 
        ASSERT(MUTEX_HELD(&db->db_mtx));
-       ASSERT(refcount_is_zero(&db->db_holds));
+       ASSERT(zfs_refcount_is_zero(&db->db_holds));
 
-       dbuf_evict_user(db);
+       if (db->db_buf != NULL) {
+               arc_buf_destroy(db->db_buf, db);
+               db->db_buf = NULL;
+       }
 
-       if (db->db_state == DB_CACHED) {
-               ASSERT(db->db.db_data != NULL);
-               if (db->db_blkid == DMU_BONUS_BLKID) {
-                       zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
-                       arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
+       if (db->db_blkid == DMU_BONUS_BLKID) {
+               int slots = DB_DNODE(db)->dn_num_slots;
+               int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
+               if (db->db.db_data != NULL) {
+                       kmem_free(db->db.db_data, bonuslen);
+                       arc_space_return(bonuslen, ARC_SPACE_BONUS);
+                       db->db_state = DB_UNCACHED;
                }
-               db->db.db_data = NULL;
-               db->db_state = DB_UNCACHED;
+       }
+
+       dbuf_clear_data(db);
+
+       if (multilist_link_active(&db->db_cache_link)) {
+               ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
+                   db->db_caching_status == DB_DBUF_METADATA_CACHE);
+
+               multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
+               (void) zfs_refcount_remove_many(
+                   &dbuf_caches[db->db_caching_status].size,
+                   db->db.db_size, db);
+
+               if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
+                       DBUF_STAT_BUMPDOWN(metadata_cache_count);
+               } else {
+                       DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
+                       DBUF_STAT_BUMPDOWN(cache_count);
+                       DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
+                           db->db.db_size);
+               }
+               db->db_caching_status = DB_NO_CACHE;
        }
 
        ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
@@ -1812,14 +2578,26 @@ dbuf_clear(dmu_buf_impl_t *db)
        db->db_state = DB_EVICTING;
        db->db_blkptr = NULL;
 
+       /*
+        * Now that db_state is DB_EVICTING, nobody else can find this via
+        * the hash table.  We can now drop db_mtx, which allows us to
+        * acquire the dn_dbufs_mtx.
+        */
+       mutex_exit(&db->db_mtx);
+
        DB_DNODE_ENTER(db);
        dn = DB_DNODE(db);
        dndb = dn->dn_dbuf;
-       if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
+       if (db->db_blkid != DMU_BONUS_BLKID) {
+               boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
+               if (needlock)
+                       mutex_enter(&dn->dn_dbufs_mtx);
                avl_remove(&dn->dn_dbufs, db);
                atomic_dec_32(&dn->dn_dbufs_count);
                membar_producer();
                DB_DNODE_EXIT(db);
+               if (needlock)
+                       mutex_exit(&dn->dn_dbufs_mtx);
                /*
                 * Decrementing the dbuf count means that the hold corresponding
                 * to the removed dbuf is no longer discounted in dnode_move(),
@@ -1828,39 +2606,51 @@ dbuf_clear(dmu_buf_impl_t *db)
                 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
                 * release any lock.
                 */
-               dnode_rele(dn, db);
+               mutex_enter(&dn->dn_mtx);
+               dnode_rele_and_unlock(dn, db, B_TRUE);
                db->db_dnode_handle = NULL;
+
+               dbuf_hash_remove(db);
        } else {
                DB_DNODE_EXIT(db);
        }
 
-       if (db->db_buf)
-               dbuf_gone = arc_clear_callback(db->db_buf);
+       ASSERT(zfs_refcount_is_zero(&db->db_holds));
 
-       if (!dbuf_gone)
-               mutex_exit(&db->db_mtx);
+       db->db_parent = NULL;
+
+       ASSERT(db->db_buf == NULL);
+       ASSERT(db->db.db_data == NULL);
+       ASSERT(db->db_hash_next == NULL);
+       ASSERT(db->db_blkptr == NULL);
+       ASSERT(db->db_data_pending == NULL);
+       ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
+       ASSERT(!multilist_link_active(&db->db_cache_link));
+
+       kmem_cache_free(dbuf_kmem_cache, db);
+       arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
 
        /*
         * If this dbuf is referenced from an indirect dbuf,
         * decrement the ref count on the indirect dbuf.
         */
-       if (parent && parent != dndb)
-               dbuf_rele(parent, db);
+       if (parent && parent != dndb) {
+               mutex_enter(&parent->db_mtx);
+               dbuf_rele_and_unlock(parent, db, B_TRUE);
+       }
 }
 
 /*
  * Note: While bpp will always be updated if the function returns success,
  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
- * this happens when the dnode is the meta-dnode, or a userused or groupused
+ * this happens when the dnode is the meta-dnode, or {user|group|project}used
  * object.
  */
 __attribute__((always_inline))
 static inline int
 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
-    dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
+    dmu_buf_impl_t **parentp, blkptr_t **bpp)
 {
-       int nlevels, epbs;
-
        *parentp = NULL;
        *bpp = NULL;
 
@@ -1870,7 +2660,7 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
                mutex_enter(&dn->dn_mtx);
                if (dn->dn_have_spill &&
                    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
-                       *bpp = &dn->dn_phys->dn_spill;
+                       *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
                else
                        *bpp = NULL;
                dbuf_add_ref(dn->dn_dbuf, NULL);
@@ -1879,31 +2669,44 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
                return (0);
        }
 
-       if (dn->dn_phys->dn_nlevels == 0)
-               nlevels = 1;
-       else
-               nlevels = dn->dn_phys->dn_nlevels;
-
-       epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
+       int nlevels =
+           (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
+       int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
 
        ASSERT3U(level * epbs, <, 64);
        ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
+       /*
+        * This assertion shouldn't trip as long as the max indirect block size
+        * is less than 1M.  The reason for this is that up to that point,
+        * the number of levels required to address an entire object with blocks
+        * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.  In
+        * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
+        * (i.e. we can address the entire object), objects will all use at most
+        * N-1 levels and the assertion won't overflow.  However, once epbs is
+        * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
+        * enough to address an entire object, so objects will have 5 levels,
+        * but then this assertion will overflow.
+        *
+        * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
+        * need to redo this logic to handle overflows.
+        */
+       ASSERT(level >= nlevels ||
+           ((nlevels - level - 1) * epbs) +
+           highbit64(dn->dn_phys->dn_nblkptr) <= 64);
        if (level >= nlevels ||
-           (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
+           blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
+           ((nlevels - level - 1) * epbs)) ||
+           (fail_sparse &&
+           blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
                /* the buffer has no parent yet */
                return (SET_ERROR(ENOENT));
        } else if (level < nlevels-1) {
                /* this block is referenced from an indirect block */
-               int err;
-               if (dh == NULL) {
-                       err = dbuf_hold_impl(dn, level+1,
-                           blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
-               } else {
-                       __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
-                           blkid >> epbs, fail_sparse, FALSE, NULL,
-                           parentp, dh->dh_depth + 1);
-                       err = __dbuf_hold_impl(dh + 1);
-               }
+               int err;
+               dbuf_hold_arg_t *dh = dbuf_hold_arg_create(dn, level + 1,
+                   blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
+               err = dbuf_hold_impl_arg(dh);
+               dbuf_hold_arg_destroy(dh);
                if (err)
                        return (err);
                err = dbuf_read(*parentp, NULL,
@@ -1915,6 +2718,8 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
                }
                *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
                    (blkid & ((1ULL << epbs) - 1));
+               if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
+                       ASSERT(BP_IS_HOLE(*bpp));
                return (0);
        } else {
                /* the block is referenced from the dnode */
@@ -1940,7 +2745,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
        ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
        ASSERT(dn->dn_type != DMU_OT_NONE);
 
-       db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
+       db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
 
        db->db_objset = os;
        db->db.db_object = dn->dn_object;
@@ -1959,13 +2764,14 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
 
        if (blkid == DMU_BONUS_BLKID) {
                ASSERT3P(parent, ==, dn->dn_dbuf);
-               db->db.db_size = DN_MAX_BONUSLEN -
+               db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
                    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
                ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
                db->db.db_offset = DMU_BONUS_BLKID;
                db->db_state = DB_UNCACHED;
+               db->db_caching_status = DB_NO_CACHE;
                /* the bonus dbuf is not placed in the hash table */
-               arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
+               arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
                return (db);
        } else if (blkid == DMU_SPILL_BLKID) {
                db->db.db_size = (blkptr != NULL) ?
@@ -1989,24 +2795,24 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
        db->db_state = DB_EVICTING;
        if ((odb = dbuf_hash_insert(db)) != NULL) {
                /* someone else inserted it first */
-               kmem_cache_free(dbuf_cache, db);
+               kmem_cache_free(dbuf_kmem_cache, db);
                mutex_exit(&dn->dn_dbufs_mtx);
+               DBUF_STAT_BUMP(hash_insert_race);
                return (odb);
        }
        avl_add(&dn->dn_dbufs, db);
-       if (db->db_level == 0 && db->db_blkid >=
-           dn->dn_unlisted_l0_blkid)
-               dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
+
        db->db_state = DB_UNCACHED;
+       db->db_caching_status = DB_NO_CACHE;
        mutex_exit(&dn->dn_dbufs_mtx);
-       arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
+       arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
 
        if (parent && parent != dn->dn_dbuf)
                dbuf_add_ref(parent, db);
 
        ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
-           refcount_count(&dn->dn_holds) > 0);
-       (void) refcount_add(&dn->dn_holds, db);
+           zfs_refcount_count(&dn->dn_holds) > 0);
+       (void) zfs_refcount_add(&dn->dn_holds, db);
        atomic_inc_32(&dn->dn_dbufs_count);
 
        dprintf_dbuf(db, "db=%p\n", db);
@@ -2014,76 +2820,12 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
        return (db);
 }
 
-static int
-dbuf_do_evict(void *private)
-{
-       dmu_buf_impl_t *db = private;
-
-       if (!MUTEX_HELD(&db->db_mtx))
-               mutex_enter(&db->db_mtx);
-
-       ASSERT(refcount_is_zero(&db->db_holds));
-
-       if (db->db_state != DB_EVICTING) {
-               ASSERT(db->db_state == DB_CACHED);
-               DBUF_VERIFY(db);
-               db->db_buf = NULL;
-               dbuf_evict(db);
-       } else {
-               mutex_exit(&db->db_mtx);
-               dbuf_destroy(db);
-       }
-       return (0);
-}
-
-static void
-dbuf_destroy(dmu_buf_impl_t *db)
-{
-       ASSERT(refcount_is_zero(&db->db_holds));
-
-       if (db->db_blkid != DMU_BONUS_BLKID) {
-               /*
-                * If this dbuf is still on the dn_dbufs list,
-                * remove it from that list.
-                */
-               if (db->db_dnode_handle != NULL) {
-                       dnode_t *dn;
-
-                       DB_DNODE_ENTER(db);
-                       dn = DB_DNODE(db);
-                       mutex_enter(&dn->dn_dbufs_mtx);
-                       avl_remove(&dn->dn_dbufs, db);
-                       atomic_dec_32(&dn->dn_dbufs_count);
-                       mutex_exit(&dn->dn_dbufs_mtx);
-                       DB_DNODE_EXIT(db);
-                       /*
-                        * Decrementing the dbuf count means that the hold
-                        * corresponding to the removed dbuf is no longer
-                        * discounted in dnode_move(), so the dnode cannot be
-                        * moved until after we release the hold.
-                        */
-                       dnode_rele(dn, db);
-                       db->db_dnode_handle = NULL;
-               }
-               dbuf_hash_remove(db);
-       }
-       db->db_parent = NULL;
-       db->db_buf = NULL;
-
-       ASSERT(db->db.db_data == NULL);
-       ASSERT(db->db_hash_next == NULL);
-       ASSERT(db->db_blkptr == NULL);
-       ASSERT(db->db_data_pending == NULL);
-
-       kmem_cache_free(dbuf_cache, db);
-       arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
-}
-
 typedef struct dbuf_prefetch_arg {
        spa_t *dpa_spa; /* The spa to issue the prefetch in. */
        zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
        int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
        int dpa_curlevel; /* The current level that we're reading */
+       dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
        zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
        zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
        arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
@@ -2095,18 +2837,23 @@ typedef struct dbuf_prefetch_arg {
 static void
 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
 {
-       arc_flags_t aflags;
        if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
                return;
 
-       aflags = dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
+       int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
+       arc_flags_t aflags =
+           dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
+
+       /* dnodes are always read as raw and then converted later */
+       if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
+           dpa->dpa_curlevel == 0)
+               zio_flags |= ZIO_FLAG_RAW;
 
        ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
        ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
        ASSERT(dpa->dpa_zio != NULL);
        (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
-           dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
-           &aflags, &dpa->dpa_zb);
+           dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
 }
 
 /*
@@ -2115,27 +2862,66 @@ dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
  * prefetch if the next block down is our target.
  */
 static void
-dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
+dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
+    const blkptr_t *iobp, arc_buf_t *abuf, void *private)
 {
        dbuf_prefetch_arg_t *dpa = private;
-       uint64_t nextblkid;
-       blkptr_t *bp;
 
        ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
        ASSERT3S(dpa->dpa_curlevel, >, 0);
+
+       if (abuf == NULL) {
+               ASSERT(zio == NULL || zio->io_error != 0);
+               kmem_free(dpa, sizeof (*dpa));
+               return;
+       }
+       ASSERT(zio == NULL || zio->io_error == 0);
+
+       /*
+        * The dpa_dnode is only valid if we are called with a NULL
+        * zio. This indicates that the arc_read() returned without
+        * first calling zio_read() to issue a physical read. Once
+        * a physical read is made the dpa_dnode must be invalidated
+        * as the locks guarding it may have been dropped. If the
+        * dpa_dnode is still valid, then we want to add it to the dbuf
+        * cache. To do so, we must hold the dbuf associated with the block
+        * we just prefetched, read its contents so that we associate it
+        * with an arc_buf_t, and then release it.
+        */
        if (zio != NULL) {
                ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
-               ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
+               if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
+                       ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
+               } else {
+                       ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
+               }
                ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
+
+               dpa->dpa_dnode = NULL;
+       } else if (dpa->dpa_dnode != NULL) {
+               uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
+                   (dpa->dpa_epbs * (dpa->dpa_curlevel -
+                   dpa->dpa_zb.zb_level));
+               dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
+                   dpa->dpa_curlevel, curblkid, FTAG);
+               if (db == NULL) {
+                       kmem_free(dpa, sizeof (*dpa));
+                       arc_buf_destroy(abuf, private);
+                       return;
+               }
+
+               (void) dbuf_read(db, NULL,
+                   DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
+               dbuf_rele(db, FTAG);
        }
 
        dpa->dpa_curlevel--;
-
-       nextblkid = dpa->dpa_zb.zb_blkid >>
+       uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
            (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
-       bp = ((blkptr_t *)abuf->b_data) +
+       blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
            P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
-       if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) {
+
+       if (BP_IS_HOLE(bp)) {
                kmem_free(dpa, sizeof (*dpa));
        } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
                ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
@@ -2145,6 +2931,10 @@ dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
                arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
                zbookmark_phys_t zb;
 
+               /* flag if L2ARC eligible, l2arc_noprefetch then decides */
+               if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
+                       iter_aflags |= ARC_FLAG_L2CACHE;
+
                ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
 
                SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
@@ -2155,14 +2945,16 @@ dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
                    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
                    &iter_aflags, &zb);
        }
-       (void) arc_buf_remove_ref(abuf, private);
+
+       arc_buf_destroy(abuf, private);
 }
 
 /*
  * Issue prefetch reads for the given block on the given level.  If the indirect
  * blocks above that block are not in memory, we will read them in
  * asynchronously.  As a result, this call never blocks waiting for a read to
- * complete.
+ * complete. Note that the prefetch might fail if the dataset is encrypted and
+ * the encryption key is unmapped before the IO completes.
  */
 void
 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
@@ -2171,10 +2963,6 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
        blkptr_t bp;
        int epbs, nlevels, curlevel;
        uint64_t curblkid;
-       dmu_buf_impl_t *db;
-       zio_t *pio;
-       dbuf_prefetch_arg_t *dpa;
-       dsl_dataset_t *ds;
 
        ASSERT(blkid != DMU_BONUS_BLKID);
        ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
@@ -2197,7 +2985,7 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
        if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
                return;
 
-       db = dbuf_find(dn->dn_objset, dn->dn_object,
+       dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
            level, blkid);
        if (db != NULL) {
                mutex_exit(&db->db_mtx);
@@ -2242,20 +3030,25 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
 
        ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
 
-       pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
+       zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
            ZIO_FLAG_CANFAIL);
 
-       dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
-       ds = dn->dn_objset->os_dsl_dataset;
+       dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
+       dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
        SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
            dn->dn_object, level, blkid);
        dpa->dpa_curlevel = curlevel;
        dpa->dpa_prio = prio;
        dpa->dpa_aflags = aflags;
        dpa->dpa_spa = dn->dn_objset->os_spa;
+       dpa->dpa_dnode = dn;
        dpa->dpa_epbs = epbs;
        dpa->dpa_zio = pio;
 
+       /* flag if L2ARC eligible, l2arc_noprefetch then decides */
+       if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
+               dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
+
        /*
         * If we have the indirect just above us, no need to do the asynchronous
         * prefetch chain; we'll just run the last step ourselves.  If we're at
@@ -2271,6 +3064,10 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
                arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
                zbookmark_phys_t zb;
 
+               /* flag if L2ARC eligible, l2arc_noprefetch then decides */
+               if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
+                       iter_aflags |= ARC_FLAG_L2CACHE;
+
                SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
                    dn->dn_object, curlevel, curblkid);
                (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
@@ -2287,14 +3084,54 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
 
 #define        DBUF_HOLD_IMPL_MAX_DEPTH        20
 
+/*
+ * Helper function for dbuf_hold_impl_arg() to copy a buffer. Handles
+ * the case of encrypted, compressed and uncompressed buffers by
+ * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
+ * arc_alloc_compressed_buf() or arc_alloc_buf().*
+ *
+ * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl_arg().
+ */
+noinline static void
+dbuf_hold_copy(struct dbuf_hold_arg *dh)
+{
+       dnode_t *dn = dh->dh_dn;
+       dmu_buf_impl_t *db = dh->dh_db;
+       dbuf_dirty_record_t *dr = dh->dh_dr;
+       arc_buf_t *data = dr->dt.dl.dr_data;
+
+       enum zio_compress compress_type = arc_get_compression(data);
+
+       if (arc_is_encrypted(data)) {
+               boolean_t byteorder;
+               uint8_t salt[ZIO_DATA_SALT_LEN];
+               uint8_t iv[ZIO_DATA_IV_LEN];
+               uint8_t mac[ZIO_DATA_MAC_LEN];
+
+               arc_get_raw_params(data, &byteorder, salt, iv, mac);
+               dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
+                   dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
+                   dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
+                   compress_type));
+       } else if (compress_type != ZIO_COMPRESS_OFF) {
+               dbuf_set_data(db, arc_alloc_compressed_buf(
+                   dn->dn_objset->os_spa, db, arc_buf_size(data),
+                   arc_buf_lsize(data), compress_type));
+       } else {
+               dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
+                   DBUF_GET_BUFC_TYPE(db), db->db.db_size));
+       }
+
+       bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
+}
+
 /*
  * Returns with db_holds incremented, and db_mtx not held.
  * Note: dn_struct_rwlock must be held.
  */
 static int
-__dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
+dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
 {
-       ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
        dh->dh_parent = NULL;
 
        ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
@@ -2302,7 +3139,7 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
        ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
 
        *(dh->dh_dbp) = NULL;
-top:
+
        /* dbuf_find() returns with db_mtx held */
        dh->dh_db = dbuf_find(dh->dh_dn->dn_objset, dh->dh_dn->dn_object,
            dh->dh_level, dh->dh_blkid);
@@ -2315,8 +3152,7 @@ top:
 
                ASSERT3P(dh->dh_parent, ==, NULL);
                dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
-                                       dh->dh_fail_sparse, &dh->dh_parent,
-                                       &dh->dh_bp, dh);
+                   dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp);
                if (dh->dh_fail_sparse) {
                        if (dh->dh_err == 0 &&
                            dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
@@ -2330,7 +3166,7 @@ top:
                if (dh->dh_err && dh->dh_err != ENOENT)
                        return (dh->dh_err);
                dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
-                                       dh->dh_parent, dh->dh_bp);
+                   dh->dh_parent, dh->dh_bp);
        }
 
        if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) {
@@ -2338,16 +3174,8 @@ top:
                return (SET_ERROR(ENOENT));
        }
 
-       if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) {
-               arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db);
-               if (dh->dh_db->db_buf->b_data == NULL) {
-                       dbuf_clear(dh->dh_db);
-                       if (dh->dh_parent) {
-                               dbuf_rele(dh->dh_parent, NULL);
-                               dh->dh_parent = NULL;
-                       }
-                       goto top;
-               }
+       if (dh->dh_db->db_buf != NULL) {
+               arc_buf_access(dh->dh_db->db_buf);
                ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
        }
 
@@ -2363,19 +3191,33 @@ top:
            dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
            dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
                dh->dh_dr = dh->dh_db->db_data_pending;
+               if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf)
+                       dbuf_hold_copy(dh);
+       }
+
+       if (multilist_link_active(&dh->dh_db->db_cache_link)) {
+               ASSERT(zfs_refcount_is_zero(&dh->dh_db->db_holds));
+               ASSERT(dh->dh_db->db_caching_status == DB_DBUF_CACHE ||
+                   dh->dh_db->db_caching_status == DB_DBUF_METADATA_CACHE);
 
-               if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
-                       dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
+               multilist_remove(
+                   dbuf_caches[dh->dh_db->db_caching_status].cache,
+                   dh->dh_db);
+               (void) zfs_refcount_remove_many(
+                   &dbuf_caches[dh->dh_db->db_caching_status].size,
+                   dh->dh_db->db.db_size, dh->dh_db);
 
-                       dbuf_set_data(dh->dh_db,
-                           arc_buf_alloc(dh->dh_dn->dn_objset->os_spa,
-                           dh->dh_db->db.db_size, dh->dh_db, dh->dh_type));
-                       bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
-                           dh->dh_db->db.db_data, dh->dh_db->db.db_size);
+               if (dh->dh_db->db_caching_status == DB_DBUF_METADATA_CACHE) {
+                       DBUF_STAT_BUMPDOWN(metadata_cache_count);
+               } else {
+                       DBUF_STAT_BUMPDOWN(cache_levels[dh->dh_db->db_level]);
+                       DBUF_STAT_BUMPDOWN(cache_count);
+                       DBUF_STAT_DECR(cache_levels_bytes[dh->dh_db->db_level],
+                           dh->dh_db->db.db_size);
                }
+               dh->dh_db->db_caching_status = DB_NO_CACHE;
        }
-
-       (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
+       (void) zfs_refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
        DBUF_VERIFY(dh->dh_db);
        mutex_exit(&dh->dh_db->db_mtx);
 
@@ -2392,38 +3234,33 @@ top:
 }
 
 /*
- * The following code preserves the recursive function dbuf_hold_impl()
- * but moves the local variables AND function arguments to the heap to
- * minimize the stack frame size.  Enough space is initially allocated
- * on the stack for 20 levels of recursion.
+ * dbuf_hold_impl_arg() is called recursively, via dbuf_findbp().  There can
+ * be as many recursive calls as there are levels of on-disk indirect blocks,
+ * but typically only 0-2 recursive calls.  To minimize the stack frame size,
+ * the recursive function's arguments and "local variables" are allocated on
+ * the heap as the dbuf_hold_arg_t.
  */
 int
 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
     boolean_t fail_sparse, boolean_t fail_uncached,
     void *tag, dmu_buf_impl_t **dbp)
 {
-       struct dbuf_hold_impl_data *dh;
-       int error;
+       dbuf_hold_arg_t *dh = dbuf_hold_arg_create(dn, level, blkid,
+           fail_sparse, fail_uncached, tag, dbp);
 
-       dh = kmem_zalloc(sizeof (struct dbuf_hold_impl_data) *
-           DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
-       __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse,
-               fail_uncached, tag, dbp, 0);
+       int error = dbuf_hold_impl_arg(dh);
 
-       error = __dbuf_hold_impl(dh);
-
-       kmem_free(dh, sizeof (struct dbuf_hold_impl_data) *
-           DBUF_HOLD_IMPL_MAX_DEPTH);
+       dbuf_hold_arg_destroy(dh);
 
        return (error);
 }
 
-static void
-__dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
-    dnode_t *dn, uint8_t level, uint64_t blkid,
-       boolean_t fail_sparse, boolean_t fail_uncached,
-       void *tag, dmu_buf_impl_t **dbp, int depth)
+static dbuf_hold_arg_t *
+dbuf_hold_arg_create(dnode_t *dn, uint8_t level, uint64_t blkid,
+    boolean_t fail_sparse, boolean_t fail_uncached,
+    void *tag, dmu_buf_impl_t **dbp)
 {
+       dbuf_hold_arg_t *dh = kmem_alloc(sizeof (*dh), KM_SLEEP);
        dh->dh_dn = dn;
        dh->dh_level = level;
        dh->dh_blkid = blkid;
@@ -2433,7 +3270,20 @@ __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
 
        dh->dh_tag = tag;
        dh->dh_dbp = dbp;
-       dh->dh_depth = depth;
+
+       dh->dh_db = NULL;
+       dh->dh_parent = NULL;
+       dh->dh_bp = NULL;
+       dh->dh_err = 0;
+       dh->dh_dr = NULL;
+
+       return (dh);
+}
+
+static void
+dbuf_hold_arg_destroy(dbuf_hold_arg_t *dh)
+{
+       kmem_free(dh, sizeof (*dh));
 }
 
 dmu_buf_impl_t *
@@ -2492,7 +3342,8 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
 void
 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
 {
-       VERIFY(refcount_add(&db->db_holds, tag) > 1);
+       int64_t holds = zfs_refcount_add(&db->db_holds, tag);
+       VERIFY3S(holds, >, 1);
 }
 
 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
@@ -2511,7 +3362,7 @@ dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
 
        if (found_db != NULL) {
                if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
-                       (void) refcount_add(&db->db_holds, tag);
+                       (void) zfs_refcount_add(&db->db_holds, tag);
                        result = B_TRUE;
                }
                mutex_exit(&found_db->db_mtx);
@@ -2530,7 +3381,7 @@ void
 dbuf_rele(dmu_buf_impl_t *db, void *tag)
 {
        mutex_enter(&db->db_mtx);
-       dbuf_rele_and_unlock(db, tag);
+       dbuf_rele_and_unlock(db, tag, B_FALSE);
 }
 
 void
@@ -2541,10 +3392,19 @@ dmu_buf_rele(dmu_buf_t *db, void *tag)
 
 /*
  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
- * db_dirtycnt and db_holds to be updated atomically.
+ * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
+ * argument should be set if we are already in the dbuf-evicting code
+ * path, in which case we don't want to recursively evict.  This allows us to
+ * avoid deeply nested stacks that would have a call flow similar to this:
+ *
+ * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
+ *     ^                                               |
+ *     |                                               |
+ *     +-----dbuf_destroy()<--dbuf_evict_one()<--------+
+ *
  */
 void
-dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
+dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
 {
        int64_t holds;
 
@@ -2556,15 +3416,17 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
         * dnode so we can guarantee in dnode_move() that a referenced bonus
         * buffer has a corresponding dnode hold.
         */
-       holds = refcount_remove(&db->db_holds, tag);
+       holds = zfs_refcount_remove(&db->db_holds, tag);
        ASSERT(holds >= 0);
 
        /*
         * We can't freeze indirects if there is a possibility that they
         * may be modified in the current syncing context.
         */
-       if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
+       if (db->db_buf != NULL &&
+           holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
                arc_buf_freeze(db->db_buf);
+       }
 
        if (holds == db->db_dirtycnt &&
            db->db_level == 0 && db->db_user_immediate_evict)
@@ -2609,62 +3471,95 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
                         */
                        ASSERT(db->db_state == DB_UNCACHED ||
                            db->db_state == DB_NOFILL);
-                       dbuf_evict(db);
+                       dbuf_destroy(db);
                } else if (arc_released(db->db_buf)) {
-                       arc_buf_t *buf = db->db_buf;
                        /*
                         * This dbuf has anonymous data associated with it.
                         */
-                       dbuf_clear_data(db);
-                       VERIFY(arc_buf_remove_ref(buf, db));
-                       dbuf_evict(db);
+                       dbuf_destroy(db);
                } else {
-                       VERIFY(!arc_buf_remove_ref(db->db_buf, db));
+                       boolean_t do_arc_evict = B_FALSE;
+                       blkptr_t bp;
+                       spa_t *spa = dmu_objset_spa(db->db_objset);
+
+                       if (!DBUF_IS_CACHEABLE(db) &&
+                           db->db_blkptr != NULL &&
+                           !BP_IS_HOLE(db->db_blkptr) &&
+                           !BP_IS_EMBEDDED(db->db_blkptr)) {
+                               do_arc_evict = B_TRUE;
+                               bp = *db->db_blkptr;
+                       }
 
-                       /*
-                        * A dbuf will be eligible for eviction if either the
-                        * 'primarycache' property is set or a duplicate
-                        * copy of this buffer is already cached in the arc.
-                        *
-                        * In the case of the 'primarycache' a buffer
-                        * is considered for eviction if it matches the
-                        * criteria set in the property.
-                        *
-                        * To decide if our buffer is considered a
-                        * duplicate, we must call into the arc to determine
-                        * if multiple buffers are referencing the same
-                        * block on-disk. If so, then we simply evict
-                        * ourselves.
-                        */
-                       if (!DBUF_IS_CACHEABLE(db)) {
-                               if (db->db_blkptr != NULL &&
-                                   !BP_IS_HOLE(db->db_blkptr) &&
-                                   !BP_IS_EMBEDDED(db->db_blkptr)) {
-                                       spa_t *spa =
-                                           dmu_objset_spa(db->db_objset);
-                                       blkptr_t bp = *db->db_blkptr;
-                                       dbuf_clear(db);
-                                       arc_freed(spa, &bp);
+                       if (!DBUF_IS_CACHEABLE(db) ||
+                           db->db_pending_evict) {
+                               dbuf_destroy(db);
+                       } else if (!multilist_link_active(&db->db_cache_link)) {
+                               ASSERT3U(db->db_caching_status, ==,
+                                   DB_NO_CACHE);
+
+                               dbuf_cached_state_t dcs =
+                                   dbuf_include_in_metadata_cache(db) ?
+                                   DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
+                               db->db_caching_status = dcs;
+
+                               multilist_insert(dbuf_caches[dcs].cache, db);
+                               (void) zfs_refcount_add_many(
+                                   &dbuf_caches[dcs].size,
+                                   db->db.db_size, db);
+
+                               if (dcs == DB_DBUF_METADATA_CACHE) {
+                                       DBUF_STAT_BUMP(metadata_cache_count);
+                                       DBUF_STAT_MAX(
+                                           metadata_cache_size_bytes_max,
+                                           zfs_refcount_count(
+                                           &dbuf_caches[dcs].size));
                                } else {
-                                       dbuf_clear(db);
+                                       DBUF_STAT_BUMP(
+                                           cache_levels[db->db_level]);
+                                       DBUF_STAT_BUMP(cache_count);
+                                       DBUF_STAT_INCR(
+                                           cache_levels_bytes[db->db_level],
+                                           db->db.db_size);
+                                       DBUF_STAT_MAX(cache_size_bytes_max,
+                                           zfs_refcount_count(
+                                           &dbuf_caches[dcs].size));
                                }
-                       } else if (db->db_pending_evict ||
-                           arc_buf_eviction_needed(db->db_buf)) {
-                               dbuf_clear(db);
-                       } else {
                                mutex_exit(&db->db_mtx);
+
+                               if (db->db_caching_status == DB_DBUF_CACHE &&
+                                   !evicting) {
+                                       dbuf_evict_notify();
+                               }
                        }
+
+                       if (do_arc_evict)
+                               arc_freed(spa, &bp);
                }
        } else {
                mutex_exit(&db->db_mtx);
        }
+
 }
 
 #pragma weak dmu_buf_refcount = dbuf_refcount
 uint64_t
 dbuf_refcount(dmu_buf_impl_t *db)
 {
-       return (refcount_count(&db->db_holds));
+       return (zfs_refcount_count(&db->db_holds));
+}
+
+uint64_t
+dmu_buf_user_refcount(dmu_buf_t *db_fake)
+{
+       uint64_t holds;
+       dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+
+       mutex_enter(&db->db_mtx);
+       ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
+       holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
+       mutex_exit(&db->db_mtx);
+
+       return (holds);
 }
 
 void *
@@ -2721,24 +3616,33 @@ dmu_buf_user_evict_wait()
        taskq_wait(dbu_evict_taskq);
 }
 
-boolean_t
-dmu_buf_freeable(dmu_buf_t *dbuf)
+blkptr_t *
+dmu_buf_get_blkptr(dmu_buf_t *db)
 {
-       boolean_t res = B_FALSE;
-       dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
+       dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
+       return (dbi->db_blkptr);
+}
 
-       if (db->db_blkptr)
-               res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
-                   db->db_blkptr, db->db_blkptr->blk_birth);
+objset_t *
+dmu_buf_get_objset(dmu_buf_t *db)
+{
+       dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
+       return (dbi->db_objset);
+}
 
-       return (res);
+dnode_t *
+dmu_buf_dnode_enter(dmu_buf_t *db)
+{
+       dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
+       DB_DNODE_ENTER(dbi);
+       return (DB_DNODE(dbi));
 }
 
-blkptr_t *
-dmu_buf_get_blkptr(dmu_buf_t *db)
+void
+dmu_buf_dnode_exit(dmu_buf_t *db)
 {
        dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
-       return (dbi->db_blkptr);
+       DB_DNODE_EXIT(dbi);
 }
 
 static void
@@ -2751,7 +3655,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
                return;
 
        if (db->db_blkid == DMU_SPILL_BLKID) {
-               db->db_blkptr = &dn->dn_phys->dn_spill;
+               db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
                BP_ZERO(db->db_blkptr);
                return;
        }
@@ -2786,6 +3690,48 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
        }
 }
 
+/*
+ * When syncing out a blocks of dnodes, adjust the block to deal with
+ * encryption.  Normally, we make sure the block is decrypted before writing
+ * it.  If we have crypt params, then we are writing a raw (encrypted) block,
+ * from a raw receive.  In this case, set the ARC buf's crypt params so
+ * that the BP will be filled with the correct byteorder, salt, iv, and mac.
+ */
+static void
+dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
+{
+       int err;
+       dmu_buf_impl_t *db = dr->dr_dbuf;
+
+       ASSERT(MUTEX_HELD(&db->db_mtx));
+       ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
+       ASSERT3U(db->db_level, ==, 0);
+
+       if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
+               zbookmark_phys_t zb;
+
+               /*
+                * Unfortunately, there is currently no mechanism for
+                * syncing context to handle decryption errors. An error
+                * here is only possible if an attacker maliciously
+                * changed a dnode block and updated the associated
+                * checksums going up the block tree.
+                */
+               SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
+                   db->db.db_object, db->db_level, db->db_blkid);
+               err = arc_untransform(db->db_buf, db->db_objset->os_spa,
+                   &zb, B_TRUE);
+               if (err)
+                       panic("Invalid dnode block MAC");
+       } else if (dr->dt.dl.dr_has_raw_params) {
+               (void) arc_release(dr->dt.dl.dr_data, db);
+               arc_convert_to_raw(dr->dt.dl.dr_data,
+                   dmu_objset_id(db->db_objset),
+                   dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
+                   dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
+       }
+}
+
 /*
  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
  * is critical the we not allow the compiler to inline this function in to
@@ -2827,6 +3773,7 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
        db->db_data_pending = dr;
 
        mutex_exit(&db->db_mtx);
+
        dbuf_write(dr, db->db_buf, tx);
 
        zio = dr->dr_zio;
@@ -2876,6 +3823,22 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
 
        if (db->db_blkid == DMU_SPILL_BLKID) {
                mutex_enter(&dn->dn_mtx);
+               if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
+                       /*
+                        * In the previous transaction group, the bonus buffer
+                        * was entirely used to store the attributes for the
+                        * dnode which overrode the dn_spill field.  However,
+                        * when adding more attributes to the file a spill
+                        * block was required to hold the extra attributes.
+                        *
+                        * Make sure to clear the garbage left in the dn_spill
+                        * field from the previous attributes in the bonus
+                        * buffer.  Otherwise, after writing out the spill
+                        * block to the new allocated dva, it will free
+                        * the old block pointed to by the invalid dn_spill.
+                        */
+                       db->db_blkptr = NULL;
+               }
                dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
                mutex_exit(&dn->dn_mtx);
        }
@@ -2891,13 +3854,17 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
 
                ASSERT(*datap != NULL);
                ASSERT0(db->db_level);
-               ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
-               bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
+               ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
+                   DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
+               bcopy(*datap, DN_BONUS(dn->dn_phys),
+                   DN_MAX_BONUS_LEN(dn->dn_phys));
                DB_DNODE_EXIT(db);
 
                if (*datap != db->db.db_data) {
-                       zio_buf_free(*datap, DN_MAX_BONUSLEN);
-                       arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
+                       int slots = DB_DNODE(db)->dn_num_slots;
+                       int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
+                       kmem_free(*datap, bonuslen);
+                       arc_space_return(bonuslen, ARC_SPACE_BONUS);
                }
                db->db_data_pending = NULL;
                drp = &db->db_last_dirty;
@@ -2913,7 +3880,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
                kmem_free(dr, sizeof (dbuf_dirty_record_t));
                ASSERT(db->db_dirtycnt > 0);
                db->db_dirtycnt -= 1;
-               dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
+               dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
                return;
        }
 
@@ -2937,9 +3904,16 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
                ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
        }
 
+       /*
+        * If this is a dnode block, ensure it is appropriately encrypted
+        * or decrypted, depending on what we are writing to it this txg.
+        */
+       if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
+               dbuf_prepare_encrypted_dnode_leaf(dr);
+
        if (db->db_state != DB_NOFILL &&
            dn->dn_object != DMU_META_DNODE_OBJECT &&
-           refcount_count(&db->db_holds) > 1 &&
+           zfs_refcount_count(&db->db_holds) > 1 &&
            dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
            *datap == db->db_buf) {
                /*
@@ -2953,10 +3927,29 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
                 * objects only modified in the syncing context (e.g.
                 * DNONE_DNODE blocks).
                 */
-               int blksz = arc_buf_size(*datap);
+               int psize = arc_buf_size(*datap);
+               int lsize = arc_buf_lsize(*datap);
                arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
-               *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
-               bcopy(db->db.db_data, (*datap)->b_data, blksz);
+               enum zio_compress compress_type = arc_get_compression(*datap);
+
+               if (arc_is_encrypted(*datap)) {
+                       boolean_t byteorder;
+                       uint8_t salt[ZIO_DATA_SALT_LEN];
+                       uint8_t iv[ZIO_DATA_IV_LEN];
+                       uint8_t mac[ZIO_DATA_MAC_LEN];
+
+                       arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
+                       *datap = arc_alloc_raw_buf(os->os_spa, db,
+                           dmu_objset_id(os), byteorder, salt, iv, mac,
+                           dn->dn_type, psize, lsize, compress_type);
+               } else if (compress_type != ZIO_COMPRESS_OFF) {
+                       ASSERT3U(type, ==, ARC_BUFC_DATA);
+                       *datap = arc_alloc_compressed_buf(os->os_spa, db,
+                           psize, lsize, compress_type);
+               } else {
+                       *datap = arc_alloc_buf(os->os_spa, db, type, psize);
+               }
+               bcopy(db->db.db_data, (*datap)->b_data, psize);
        }
        db->db_data_pending = dr;
 
@@ -2966,7 +3959,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
 
        ASSERT(!list_link_active(&dr->dr_dirty_node));
        if (dn->dn_object == DMU_META_DNODE_OBJECT) {
-               list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
+               list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
                DB_DNODE_EXIT(db);
        } else {
                /*
@@ -3048,23 +4041,31 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
        if (db->db_blkid == DMU_SPILL_BLKID) {
                ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
                ASSERT(!(BP_IS_HOLE(bp)) &&
-                   db->db_blkptr == &dn->dn_phys->dn_spill);
+                   db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
        }
 #endif
 
        if (db->db_level == 0) {
                mutex_enter(&dn->dn_mtx);
                if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
-                   db->db_blkid != DMU_SPILL_BLKID)
+                   db->db_blkid != DMU_SPILL_BLKID) {
+                       ASSERT0(db->db_objset->os_raw_receive);
                        dn->dn_phys->dn_maxblkid = db->db_blkid;
+               }
                mutex_exit(&dn->dn_mtx);
 
                if (dn->dn_type == DMU_OT_DNODE) {
-                       dnode_phys_t *dnp = db->db.db_data;
-                       for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
-                           i--, dnp++) {
-                               if (dnp->dn_type != DMU_OT_NONE)
+                       i = 0;
+                       while (i < db->db.db_size) {
+                               dnode_phys_t *dnp =
+                                   (void *)(((char *)db->db.db_data) + i);
+
+                               i += DNODE_MIN_SIZE;
+                               if (dnp->dn_type != DMU_OT_NONE) {
                                        fill++;
+                                       i += dnp->dn_extra_slots *
+                                           DNODE_MIN_SIZE;
+                               }
                        }
                } else {
                        if (BP_IS_HOLE(bp)) {
@@ -3085,7 +4086,7 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
        DB_DNODE_EXIT(db);
 
        if (!BP_IS_EMBEDDED(bp))
-               bp->blk_fill = fill;
+               BP_SET_FILL(bp, fill);
 
        mutex_exit(&db->db_mtx);
 
@@ -3094,6 +4095,51 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
        rw_exit(&dn->dn_struct_rwlock);
 }
 
+/* ARGSUSED */
+/*
+ * This function gets called just prior to running through the compression
+ * stage of the zio pipeline. If we're an indirect block comprised of only
+ * holes, then we want this indirect to be compressed away to a hole. In
+ * order to do that we must zero out any information about the holes that
+ * this indirect points to prior to before we try to compress it.
+ */
+static void
+dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
+{
+       dmu_buf_impl_t *db = vdb;
+       dnode_t *dn;
+       blkptr_t *bp;
+       unsigned int epbs, i;
+
+       ASSERT3U(db->db_level, >, 0);
+       DB_DNODE_ENTER(db);
+       dn = DB_DNODE(db);
+       epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
+       ASSERT3U(epbs, <, 31);
+
+       /* Determine if all our children are holes */
+       for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
+               if (!BP_IS_HOLE(bp))
+                       break;
+       }
+
+       /*
+        * If all the children are holes, then zero them all out so that
+        * we may get compressed away.
+        */
+       if (i == 1ULL << epbs) {
+               /*
+                * We only found holes. Grab the rwlock to prevent
+                * anybody from reading the blocks we're about to
+                * zero out.
+                */
+               rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
+               bzero(db->db.db_data, db->db.db_size);
+               rw_exit(&dn->dn_struct_rwlock);
+       }
+       DB_DNODE_EXIT(db);
+}
+
 /*
  * The SPA will call this callback several times for each zio - once
  * for every physical child i/o (zio->io_phys_children times).  This
@@ -3172,7 +4218,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
                dn = DB_DNODE(db);
                ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
                ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
-                   db->db_blkptr == &dn->dn_phys->dn_spill);
+                   db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
                DB_DNODE_EXIT(db);
        }
 #endif
@@ -3182,10 +4228,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
                ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
                if (db->db_state != DB_NOFILL) {
                        if (dr->dt.dl.dr_data != db->db_buf)
-                               VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
-                                   db));
-                       else if (!arc_released(db->db_buf))
-                               arc_set_callback(db->db_buf, dbuf_do_evict, db);
+                               arc_buf_destroy(dr->dt.dl.dr_data, db);
                }
        } else {
                dnode_t *dn;
@@ -3201,8 +4244,6 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
                            dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
                        ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
                            db->db.db_size);
-                       if (!arc_released(db->db_buf))
-                               arc_set_callback(db->db_buf, dbuf_do_evict, db);
                }
                DB_DNODE_EXIT(db);
                mutex_destroy(&dr->dt.di.dr_mtx);
@@ -3214,7 +4255,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
        ASSERT(db->db_dirtycnt > 0);
        db->db_dirtycnt -= 1;
        db->db_data_pending = NULL;
-       dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
+       dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
 }
 
 static void
@@ -3254,8 +4295,147 @@ dbuf_write_override_done(zio_t *zio)
        mutex_exit(&db->db_mtx);
 
        dbuf_write_done(zio, NULL, db);
+
+       if (zio->io_abd != NULL)
+               abd_put(zio->io_abd);
 }
 
+typedef struct dbuf_remap_impl_callback_arg {
+       objset_t        *drica_os;
+       uint64_t        drica_blk_birth;
+       dmu_tx_t        *drica_tx;
+} dbuf_remap_impl_callback_arg_t;
+
+static void
+dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
+    void *arg)
+{
+       dbuf_remap_impl_callback_arg_t *drica = arg;
+       objset_t *os = drica->drica_os;
+       spa_t *spa = dmu_objset_spa(os);
+       dmu_tx_t *tx = drica->drica_tx;
+
+       ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
+
+       if (os == spa_meta_objset(spa)) {
+               spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
+       } else {
+               dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
+                   size, drica->drica_blk_birth, tx);
+       }
+}
+
+static void
+dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx)
+{
+       blkptr_t bp_copy = *bp;
+       spa_t *spa = dmu_objset_spa(dn->dn_objset);
+       dbuf_remap_impl_callback_arg_t drica;
+
+       ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
+
+       drica.drica_os = dn->dn_objset;
+       drica.drica_blk_birth = bp->blk_birth;
+       drica.drica_tx = tx;
+       if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
+           &drica)) {
+               /*
+                * The struct_rwlock prevents dbuf_read_impl() from
+                * dereferencing the BP while we are changing it.  To
+                * avoid lock contention, only grab it when we are actually
+                * changing the BP.
+                */
+               rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
+               *bp = bp_copy;
+               rw_exit(&dn->dn_struct_rwlock);
+       }
+}
+
+/*
+ * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting
+ * to remap a copy of every bp in the dbuf.
+ */
+boolean_t
+dbuf_can_remap(const dmu_buf_impl_t *db)
+{
+       spa_t *spa = dmu_objset_spa(db->db_objset);
+       blkptr_t *bp = db->db.db_data;
+       boolean_t ret = B_FALSE;
+
+       ASSERT3U(db->db_level, >, 0);
+       ASSERT3S(db->db_state, ==, DB_CACHED);
+
+       ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+       spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+       for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
+               blkptr_t bp_copy = bp[i];
+               if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
+                       ret = B_TRUE;
+                       break;
+               }
+       }
+       spa_config_exit(spa, SCL_VDEV, FTAG);
+
+       return (ret);
+}
+
+boolean_t
+dnode_needs_remap(const dnode_t *dn)
+{
+       spa_t *spa = dmu_objset_spa(dn->dn_objset);
+       boolean_t ret = B_FALSE;
+
+       if (dn->dn_phys->dn_nlevels == 0) {
+               return (B_FALSE);
+       }
+
+       ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+       spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+       for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) {
+               blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j];
+               if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
+                       ret = B_TRUE;
+                       break;
+               }
+       }
+       spa_config_exit(spa, SCL_VDEV, FTAG);
+
+       return (ret);
+}
+
+/*
+ * Remap any existing BP's to concrete vdevs, if possible.
+ */
+static void
+dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
+{
+       spa_t *spa = dmu_objset_spa(db->db_objset);
+       ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
+
+       if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
+               return;
+
+       if (db->db_level > 0) {
+               blkptr_t *bp = db->db.db_data;
+               for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
+                       dbuf_remap_impl(dn, &bp[i], tx);
+               }
+       } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
+               dnode_phys_t *dnp = db->db.db_data;
+               ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
+                   DMU_OT_DNODE);
+               for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
+                   i += dnp[i].dn_extra_slots + 1) {
+                       for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
+                               dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx);
+                       }
+               }
+       }
+}
+
+
 /* Issue I/O to commit a dirty buffer to disk. */
 static void
 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
@@ -3289,6 +4469,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
                        } else {
                                dbuf_release_bp(db);
                        }
+                       dbuf_remap(dn, db, tx);
                }
        }
 
@@ -3344,11 +4525,13 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
                 * The BP for this block has been provided by open context
                 * (by dmu_sync() or dmu_buf_write_embedded()).
                 */
-               void *contents = (data != NULL) ? data->b_data : NULL;
+               abd_t *contents = (data != NULL) ?
+                   abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
 
                dr->dr_zio = zio_write(zio, os->os_spa, txg,
-                   &dr->dr_bp_copy, contents, db->db.db_size, &zp,
-                   dbuf_write_override_ready, NULL, dbuf_write_override_done,
+                   &dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size,
+                   &zp, dbuf_write_override_ready, NULL, NULL,
+                   dbuf_write_override_done,
                    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
                mutex_enter(&db->db_mtx);
                dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
@@ -3356,26 +4539,39 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
                    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
                mutex_exit(&db->db_mtx);
        } else if (db->db_state == DB_NOFILL) {
-               ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
+               ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
+                   zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
                dr->dr_zio = zio_write(zio, os->os_spa, txg,
-                   &dr->dr_bp_copy, NULL, db->db.db_size, &zp,
-                   dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
+                   &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
+                   dbuf_write_nofill_ready, NULL, NULL,
+                   dbuf_write_nofill_done, db,
                    ZIO_PRIORITY_ASYNC_WRITE,
                    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
        } else {
                ASSERT(arc_released(data));
+
+               /*
+                * For indirect blocks, we want to setup the children
+                * ready callback so that we can properly handle an indirect
+                * block that only contains holes.
+                */
+               arc_write_done_func_t *children_ready_cb = NULL;
+               if (db->db_level != 0)
+                       children_ready_cb = dbuf_write_children_ready;
+
                dr->dr_zio = arc_write(zio, os->os_spa, txg,
                    &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
-                   DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
-                   dbuf_write_physdone, dbuf_write_done, db,
-                   ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
+                   &zp, dbuf_write_ready,
+                   children_ready_cb, dbuf_write_physdone,
+                   dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
+                   ZIO_FLAG_MUSTSUCCEED, &zb);
        }
 }
 
-#if defined(_KERNEL) && defined(HAVE_SPL)
+#if defined(_KERNEL)
 EXPORT_SYMBOL(dbuf_find);
 EXPORT_SYMBOL(dbuf_is_metadata);
-EXPORT_SYMBOL(dbuf_evict);
+EXPORT_SYMBOL(dbuf_destroy);
 EXPORT_SYMBOL(dbuf_loan_arcbuf);
 EXPORT_SYMBOL(dbuf_whichblock);
 EXPORT_SYMBOL(dbuf_read);
@@ -3384,13 +4580,14 @@ EXPORT_SYMBOL(dbuf_free_range);
 EXPORT_SYMBOL(dbuf_new_size);
 EXPORT_SYMBOL(dbuf_release_bp);
 EXPORT_SYMBOL(dbuf_dirty);
+EXPORT_SYMBOL(dmu_buf_set_crypt_params);
 EXPORT_SYMBOL(dmu_buf_will_dirty);
+EXPORT_SYMBOL(dmu_buf_is_dirty);
 EXPORT_SYMBOL(dmu_buf_will_not_fill);
 EXPORT_SYMBOL(dmu_buf_will_fill);
 EXPORT_SYMBOL(dmu_buf_fill_done);
 EXPORT_SYMBOL(dmu_buf_rele);
 EXPORT_SYMBOL(dbuf_assign_arcbuf);
-EXPORT_SYMBOL(dbuf_clear);
 EXPORT_SYMBOL(dbuf_prefetch);
 EXPORT_SYMBOL(dbuf_hold_impl);
 EXPORT_SYMBOL(dbuf_hold);
@@ -3406,6 +4603,34 @@ EXPORT_SYMBOL(dbuf_sync_list);
 EXPORT_SYMBOL(dmu_buf_set_user);
 EXPORT_SYMBOL(dmu_buf_set_user_ie);
 EXPORT_SYMBOL(dmu_buf_get_user);
-EXPORT_SYMBOL(dmu_buf_freeable);
 EXPORT_SYMBOL(dmu_buf_get_blkptr);
+
+/* BEGIN CSTYLED */
+module_param(dbuf_cache_max_bytes, ulong, 0644);
+MODULE_PARM_DESC(dbuf_cache_max_bytes,
+       "Maximum size in bytes of the dbuf cache.");
+
+module_param(dbuf_cache_hiwater_pct, uint, 0644);
+MODULE_PARM_DESC(dbuf_cache_hiwater_pct,
+       "Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
+       "directly.");
+
+module_param(dbuf_cache_lowater_pct, uint, 0644);
+MODULE_PARM_DESC(dbuf_cache_lowater_pct,
+       "Percentage below dbuf_cache_max_bytes when the evict thread stops "
+       "evicting dbufs.");
+
+module_param(dbuf_metadata_cache_max_bytes, ulong, 0644);
+MODULE_PARM_DESC(dbuf_metadata_cache_max_bytes,
+       "Maximum size in bytes of the dbuf metadata cache.");
+
+module_param(dbuf_cache_shift, int, 0644);
+MODULE_PARM_DESC(dbuf_cache_shift,
+       "Set the size of the dbuf cache to a log2 fraction of arc size.");
+
+module_param(dbuf_metadata_cache_shift, int, 0644);
+MODULE_PARM_DESC(dbuf_cache_shift,
+       "Set the size of the dbuf metadata cache to a log2 fraction of "
+       "arc size.");
+/* END CSTYLED */
 #endif