]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/arc.c
Encryption patch follow-up
[mirror_zfs.git] / module / zfs / arc.c
index 805e7b59f5f7ec120688874e9270edff091d9d6d..1329e8e83c5b19a75cca40cee4cb0d3c47cc87b4 100644 (file)
  */
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
- * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
- * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
+ * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  */
 
 /*
  * A new reference to a cache buffer can be obtained in two
  * ways: 1) via a hash table lookup using the DVA as a key,
  * or 2) via one of the ARC lists.  The arc_read() interface
- * uses method 1, while the internal arc algorithms for
+ * uses method 1, while the internal ARC algorithms for
  * adjusting the cache use method 2.  We therefore provide two
  * types of locks: 1) the hash table lock array, and 2) the
- * arc list locks.
+ * ARC list locks.
  *
  * Buffers do not have their own mutexes, rather they rely on the
  * hash table mutexes for the bulk of their protection (i.e. most
  * buf_hash_remove() expects the appropriate hash mutex to be
  * already held before it is invoked.
  *
- * Each arc state also has a mutex which is used to protect the
+ * Each ARC state also has a mutex which is used to protect the
  * buffer list associated with the state.  When attempting to
- * obtain a hash table lock while holding an arc list lock you
+ * obtain a hash table lock while holding an ARC list lock you
  * must use: mutex_tryenter() to avoid deadlock.  Also note that
  * the active state mutex must be held before the ghost state mutex.
  *
- * Arc buffers may have an associated eviction callback function.
- * This function will be invoked prior to removing the buffer (e.g.
- * in arc_do_user_evicts()).  Note however that the data associated
- * with the buffer may be evicted prior to the callback.  The callback
- * must be made with *no locks held* (to prevent deadlock).  Additionally,
- * the users of callbacks must ensure that their private data is
- * protected from simultaneous callbacks from arc_clear_callback()
- * and arc_do_user_evicts().
- *
  * It as also possible to register a callback which is run when the
  * arc_meta_limit is reached and no buffers can be safely evicted.  In
  * this case the arc user should drop a reference on some arc buffers so
  *     - ARC header release, as it removes from L2ARC buflists
  */
 
+/*
+ * ARC operation:
+ *
+ * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
+ * This structure can point either to a block that is still in the cache or to
+ * one that is only accessible in an L2 ARC device, or it can provide
+ * information about a block that was recently evicted. If a block is
+ * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
+ * information to retrieve it from the L2ARC device. This information is
+ * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
+ * that is in this state cannot access the data directly.
+ *
+ * Blocks that are actively being referenced or have not been evicted
+ * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
+ * the arc_buf_hdr_t that will point to the data block in memory. A block can
+ * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
+ * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
+ * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
+ *
+ * The L1ARC's data pointer may or may not be uncompressed. The ARC has the
+ * ability to store the physical data (b_pabd) associated with the DVA of the
+ * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
+ * it will match its on-disk compression characteristics. This behavior can be
+ * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
+ * compressed ARC functionality is disabled, the b_pabd will point to an
+ * uncompressed version of the on-disk data.
+ *
+ * Data in the L1ARC is not accessed by consumers of the ARC directly. Each
+ * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
+ * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
+ * consumer. The ARC will provide references to this data and will keep it
+ * cached until it is no longer in use. The ARC caches only the L1ARC's physical
+ * data block and will evict any arc_buf_t that is no longer referenced. The
+ * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
+ * "overhead_size" kstat.
+ *
+ * Depending on the consumer, an arc_buf_t can be requested in uncompressed or
+ * compressed form. The typical case is that consumers will want uncompressed
+ * data, and when that happens a new data buffer is allocated where the data is
+ * decompressed for them to use. Currently the only consumer who wants
+ * compressed arc_buf_t's is "zfs send", when it streams data exactly as it
+ * exists on disk. When this happens, the arc_buf_t's data buffer is shared
+ * with the arc_buf_hdr_t.
+ *
+ * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
+ * first one is owned by a compressed send consumer (and therefore references
+ * the same compressed data buffer as the arc_buf_hdr_t) and the second could be
+ * used by any other consumer (and has its own uncompressed copy of the data
+ * buffer).
+ *
+ *   arc_buf_hdr_t
+ *   +-----------+
+ *   | fields    |
+ *   | common to |
+ *   | L1- and   |
+ *   | L2ARC     |
+ *   +-----------+
+ *   | l2arc_buf_hdr_t
+ *   |           |
+ *   +-----------+
+ *   | l1arc_buf_hdr_t
+ *   |           |              arc_buf_t
+ *   | b_buf     +------------>+-----------+      arc_buf_t
+ *   | b_pabd    +-+           |b_next     +---->+-----------+
+ *   +-----------+ |           |-----------|     |b_next     +-->NULL
+ *                 |           |b_comp = T |     +-----------+
+ *                 |           |b_data     +-+   |b_comp = F |
+ *                 |           +-----------+ |   |b_data     +-+
+ *                 +->+------+               |   +-----------+ |
+ *        compressed  |      |               |                 |
+ *           data     |      |<--------------+                 | uncompressed
+ *                    +------+          compressed,            |     data
+ *                                        shared               +-->+------+
+ *                                         data                    |      |
+ *                                                                 |      |
+ *                                                                 +------+
+ *
+ * When a consumer reads a block, the ARC must first look to see if the
+ * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
+ * arc_buf_t and either copies uncompressed data into a new data buffer from an
+ * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
+ * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
+ * hdr is compressed and the desired compression characteristics of the
+ * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
+ * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
+ * the last buffer in the hdr's b_buf list, however a shared compressed buf can
+ * be anywhere in the hdr's list.
+ *
+ * The diagram below shows an example of an uncompressed ARC hdr that is
+ * sharing its data with an arc_buf_t (note that the shared uncompressed buf is
+ * the last element in the buf list):
+ *
+ *                arc_buf_hdr_t
+ *                +-----------+
+ *                |           |
+ *                |           |
+ *                |           |
+ *                +-----------+
+ * l2arc_buf_hdr_t|           |
+ *                |           |
+ *                +-----------+
+ * l1arc_buf_hdr_t|           |
+ *                |           |                 arc_buf_t    (shared)
+ *                |    b_buf  +------------>+---------+      arc_buf_t
+ *                |           |             |b_next   +---->+---------+
+ *                |  b_pabd   +-+           |---------|     |b_next   +-->NULL
+ *                +-----------+ |           |         |     +---------+
+ *                              |           |b_data   +-+   |         |
+ *                              |           +---------+ |   |b_data   +-+
+ *                              +->+------+             |   +---------+ |
+ *                                 |      |             |               |
+ *                   uncompressed  |      |             |               |
+ *                        data     +------+             |               |
+ *                                    ^                 +->+------+     |
+ *                                    |       uncompressed |      |     |
+ *                                    |           data     |      |     |
+ *                                    |                    +------+     |
+ *                                    +---------------------------------+
+ *
+ * Writing to the ARC requires that the ARC first discard the hdr's b_pabd
+ * since the physical block is about to be rewritten. The new data contents
+ * will be contained in the arc_buf_t. As the I/O pipeline performs the write,
+ * it may compress the data before writing it to disk. The ARC will be called
+ * with the transformed data and will bcopy the transformed on-disk block into
+ * a newly allocated b_pabd. Writes are always done into buffers which have
+ * either been loaned (and hence are new and don't have other readers) or
+ * buffers which have been released (and hence have their own hdr, if there
+ * were originally other readers of the buf's original hdr). This ensures that
+ * the ARC only needs to update a single buf and its hdr after a write occurs.
+ *
+ * When the L2ARC is in use, it will also take advantage of the b_pabd. The
+ * L2ARC will always write the contents of b_pabd to the L2ARC. This means
+ * that when compressed ARC is enabled that the L2ARC blocks are identical
+ * to the on-disk block in the main data pool. This provides a significant
+ * advantage since the ARC can leverage the bp's checksum when reading from the
+ * L2ARC to determine if the contents are valid. However, if the compressed
+ * ARC is disabled, then the L2ARC's block must be transformed to look
+ * like the physical block in the main data pool before comparing the
+ * checksum and determining its validity.
+ *
+ * The L1ARC has a slightly different system for storing encrypted data.
+ * Raw (encrypted + possibly compressed) data has a few subtle differences from
+ * data that is just compressed. The biggest difference is that it is not
+ * possible to decrypt encrypted data (or visa versa) if the keys aren't loaded.
+ * The other difference is that encryption cannot be treated as a suggestion.
+ * If a caller would prefer compressed data, but they actually wind up with
+ * uncompressed data the worst thing that could happen is there might be a
+ * performance hit. If the caller requests encrypted data, however, we must be
+ * sure they actually get it or else secret information could be leaked. Raw
+ * data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
+ * may have both an encrypted version and a decrypted version of its data at
+ * once. When a caller needs a raw arc_buf_t, it is allocated and the data is
+ * copied out of this header. To avoid complications with b_pabd, raw buffers
+ * cannot be shared.
+ */
+
 #include <sys/spa.h>
 #include <sys/zio.h>
+#include <sys/spa_impl.h>
 #include <sys/zio_compress.h>
+#include <sys/zio_checksum.h>
 #include <sys/zfs_context.h>
 #include <sys/arc.h>
+#include <sys/refcount.h>
 #include <sys/vdev.h>
 #include <sys/vdev_impl.h>
 #include <sys/dsl_pool.h>
+#include <sys/zio_checksum.h>
 #include <sys/multilist.h>
+#include <sys/abd.h>
+#include <sys/zil.h>
+#include <sys/fm/fs/zfs.h>
 #ifdef _KERNEL
 #include <sys/vmsystm.h>
 #include <vm/anon.h>
@@ -160,21 +315,6 @@ static kcondvar_t  arc_reclaim_thread_cv;
 static boolean_t       arc_reclaim_thread_exit;
 static kcondvar_t      arc_reclaim_waiters_cv;
 
-static kmutex_t                arc_user_evicts_lock;
-static kcondvar_t      arc_user_evicts_cv;
-static boolean_t       arc_user_evicts_thread_exit;
-
-/* number of objects to prune from caches when arc_meta_limit is reached */
-int zfs_arc_meta_prune = 10000;
-
-/* The preferred strategy to employ when arc_meta_limit is reached */
-int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
-
-typedef enum arc_reclaim_strategy {
-       ARC_RECLAIM_AGGR,               /* Aggressive reclaim strategy */
-       ARC_RECLAIM_CONS                /* Conservative reclaim strategy */
-} arc_reclaim_strategy_t;
-
 /*
  * The number of headers to evict in arc_evict_state_impl() before
  * dropping the sublist lock and evicting from another sublist. A lower
@@ -184,48 +324,40 @@ typedef enum arc_reclaim_strategy {
  */
 int zfs_arc_evict_batch_limit = 10;
 
-/*
- * The number of sublists used for each of the arc state lists. If this
- * is not set to a suitable value by the user, it will be configured to
- * the number of CPUs on the system in arc_init().
- */
-int zfs_arc_num_sublists_per_state = 0;
-
 /* number of seconds before growing cache again */
-int zfs_arc_grow_retry = 5;
+static int             arc_grow_retry = 5;
 
-/* shift of arc_c for calculating overflow limit in arc_get_data_buf */
-int zfs_arc_overflow_shift = 8;
-
-/* disable anon data aggressively growing arc_p */
-int zfs_arc_p_aggressive_disable = 1;
+/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
+int            zfs_arc_overflow_shift = 8;
 
-/* disable arc_p adapt dampener in arc_adapt */
-int zfs_arc_p_dampener_disable = 1;
+/* shift of arc_c for calculating both min and max arc_p */
+static int             arc_p_min_shift = 4;
 
 /* log2(fraction of arc to reclaim) */
-int zfs_arc_shrink_shift = 5;
+static int             arc_shrink_shift = 7;
+
+/* percent of pagecache to reclaim arc to */
+#ifdef _KERNEL
+static uint_t          zfs_arc_pc_percent = 0;
+#endif
 
 /*
- * minimum lifespan of a prefetch block in clock ticks
- * (initialized in arc_init())
+ * log2(fraction of ARC which must be free to allow growing).
+ * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
+ * when reading a new block into the ARC, we will evict an equal-sized block
+ * from the ARC.
+ *
+ * This must be less than arc_shrink_shift, so that when we shrink the ARC,
+ * we will still not allow it to grow.
  */
-int zfs_arc_min_prefetch_lifespan = HZ;
-
-/* disable arc proactive arc throttle due to low memory */
-int zfs_arc_memory_throttle_disable = 1;
+int                    arc_no_grow_shift = 5;
 
-/* disable duplicate buffer eviction */
-int zfs_disable_dup_eviction = 0;
-
-/* average block used to size buf_hash_table */
-int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
 
 /*
  * minimum lifespan of a prefetch block in clock ticks
  * (initialized in arc_init())
  */
-static int arc_min_prefetch_lifespan;
+static int             arc_min_prefetch_lifespan;
 
 /*
  * If this percent of memory is free, don't throttle.
@@ -234,14 +366,16 @@ int arc_lotsfree_percent = 10;
 
 static int arc_dead;
 
-/* expiration time for arc_no_grow */
-static clock_t arc_grow_time = 0;
-
 /*
  * The arc has filled available memory and has now warmed up.
  */
 static boolean_t arc_warm;
 
+/*
+ * log2 fraction of the zio arena to keep free.
+ */
+int arc_zio_arena_free_shift = 2;
+
 /*
  * These tunables are for performance analysis.
  */
@@ -249,11 +383,37 @@ unsigned long zfs_arc_max = 0;
 unsigned long zfs_arc_min = 0;
 unsigned long zfs_arc_meta_limit = 0;
 unsigned long zfs_arc_meta_min = 0;
+unsigned long zfs_arc_dnode_limit = 0;
+unsigned long zfs_arc_dnode_reduce_percent = 10;
+int zfs_arc_grow_retry = 0;
+int zfs_arc_shrink_shift = 0;
+int zfs_arc_p_min_shift = 0;
+int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
+
+int zfs_compressed_arc_enabled = B_TRUE;
+
+/*
+ * ARC will evict meta buffers that exceed arc_meta_limit. This
+ * tunable make arc_meta_limit adjustable for different workloads.
+ */
+unsigned long zfs_arc_meta_limit_percent = 75;
+
+/*
+ * Percentage that can be consumed by dnodes of ARC meta buffers.
+ */
+unsigned long zfs_arc_dnode_limit_percent = 10;
 
 /*
- * Limit the number of restarts in arc_adjust_meta()
+ * These tunables are Linux specific
  */
-unsigned long zfs_arc_meta_adjust_restarts = 4096;
+unsigned long zfs_arc_sys_free = 0;
+int zfs_arc_min_prefetch_lifespan = 0;
+int zfs_arc_p_aggressive_disable = 1;
+int zfs_arc_p_dampener_disable = 1;
+int zfs_arc_meta_prune = 10000;
+int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
+int zfs_arc_meta_adjust_restarts = 4096;
+int zfs_arc_lotsfree_percent = 10;
 
 /* The 6 states: */
 static arc_state_t ARC_anon;
@@ -311,25 +471,161 @@ typedef struct arc_stats {
        kstat_named_t arcstat_c_min;
        kstat_named_t arcstat_c_max;
        kstat_named_t arcstat_size;
+       /*
+        * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
+        * Note that the compressed bytes may match the uncompressed bytes
+        * if the block is either not compressed or compressed arc is disabled.
+        */
+       kstat_named_t arcstat_compressed_size;
+       /*
+        * Uncompressed size of the data stored in b_pabd. If compressed
+        * arc is disabled then this value will be identical to the stat
+        * above.
+        */
+       kstat_named_t arcstat_uncompressed_size;
+       /*
+        * Number of bytes stored in all the arc_buf_t's. This is classified
+        * as "overhead" since this data is typically short-lived and will
+        * be evicted from the arc when it becomes unreferenced unless the
+        * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
+        * values have been set (see comment in dbuf.c for more information).
+        */
+       kstat_named_t arcstat_overhead_size;
+       /*
+        * Number of bytes consumed by internal ARC structures necessary
+        * for tracking purposes; these structures are not actually
+        * backed by ARC buffers. This includes arc_buf_hdr_t structures
+        * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
+        * caches), and arc_buf_t structures (allocated via arc_buf_t
+        * cache).
+        */
        kstat_named_t arcstat_hdr_size;
+       /*
+        * Number of bytes consumed by ARC buffers of type equal to
+        * ARC_BUFC_DATA. This is generally consumed by buffers backing
+        * on disk user data (e.g. plain file contents).
+        */
        kstat_named_t arcstat_data_size;
-       kstat_named_t arcstat_meta_size;
-       kstat_named_t arcstat_other_size;
+       /*
+        * Number of bytes consumed by ARC buffers of type equal to
+        * ARC_BUFC_METADATA. This is generally consumed by buffers
+        * backing on disk data that is used for internal ZFS
+        * structures (e.g. ZAP, dnode, indirect blocks, etc).
+        */
+       kstat_named_t arcstat_metadata_size;
+       /*
+        * Number of bytes consumed by dmu_buf_impl_t objects.
+        */
+       kstat_named_t arcstat_dbuf_size;
+       /*
+        * Number of bytes consumed by dnode_t objects.
+        */
+       kstat_named_t arcstat_dnode_size;
+       /*
+        * Number of bytes consumed by bonus buffers.
+        */
+       kstat_named_t arcstat_bonus_size;
+       /*
+        * Total number of bytes consumed by ARC buffers residing in the
+        * arc_anon state. This includes *all* buffers in the arc_anon
+        * state; e.g. data, metadata, evictable, and unevictable buffers
+        * are all included in this value.
+        */
        kstat_named_t arcstat_anon_size;
-       kstat_named_t arcstat_anon_evict_data;
-       kstat_named_t arcstat_anon_evict_metadata;
+       /*
+        * Number of bytes consumed by ARC buffers that meet the
+        * following criteria: backing buffers of type ARC_BUFC_DATA,
+        * residing in the arc_anon state, and are eligible for eviction
+        * (e.g. have no outstanding holds on the buffer).
+        */
+       kstat_named_t arcstat_anon_evictable_data;
+       /*
+        * Number of bytes consumed by ARC buffers that meet the
+        * following criteria: backing buffers of type ARC_BUFC_METADATA,
+        * residing in the arc_anon state, and are eligible for eviction
+        * (e.g. have no outstanding holds on the buffer).
+        */
+       kstat_named_t arcstat_anon_evictable_metadata;
+       /*
+        * Total number of bytes consumed by ARC buffers residing in the
+        * arc_mru state. This includes *all* buffers in the arc_mru
+        * state; e.g. data, metadata, evictable, and unevictable buffers
+        * are all included in this value.
+        */
        kstat_named_t arcstat_mru_size;
-       kstat_named_t arcstat_mru_evict_data;
-       kstat_named_t arcstat_mru_evict_metadata;
+       /*
+        * Number of bytes consumed by ARC buffers that meet the
+        * following criteria: backing buffers of type ARC_BUFC_DATA,
+        * residing in the arc_mru state, and are eligible for eviction
+        * (e.g. have no outstanding holds on the buffer).
+        */
+       kstat_named_t arcstat_mru_evictable_data;
+       /*
+        * Number of bytes consumed by ARC buffers that meet the
+        * following criteria: backing buffers of type ARC_BUFC_METADATA,
+        * residing in the arc_mru state, and are eligible for eviction
+        * (e.g. have no outstanding holds on the buffer).
+        */
+       kstat_named_t arcstat_mru_evictable_metadata;
+       /*
+        * Total number of bytes that *would have been* consumed by ARC
+        * buffers in the arc_mru_ghost state. The key thing to note
+        * here, is the fact that this size doesn't actually indicate
+        * RAM consumption. The ghost lists only consist of headers and
+        * don't actually have ARC buffers linked off of these headers.
+        * Thus, *if* the headers had associated ARC buffers, these
+        * buffers *would have* consumed this number of bytes.
+        */
        kstat_named_t arcstat_mru_ghost_size;
-       kstat_named_t arcstat_mru_ghost_evict_data;
-       kstat_named_t arcstat_mru_ghost_evict_metadata;
+       /*
+        * Number of bytes that *would have been* consumed by ARC
+        * buffers that are eligible for eviction, of type
+        * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
+        */
+       kstat_named_t arcstat_mru_ghost_evictable_data;
+       /*
+        * Number of bytes that *would have been* consumed by ARC
+        * buffers that are eligible for eviction, of type
+        * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
+        */
+       kstat_named_t arcstat_mru_ghost_evictable_metadata;
+       /*
+        * Total number of bytes consumed by ARC buffers residing in the
+        * arc_mfu state. This includes *all* buffers in the arc_mfu
+        * state; e.g. data, metadata, evictable, and unevictable buffers
+        * are all included in this value.
+        */
        kstat_named_t arcstat_mfu_size;
-       kstat_named_t arcstat_mfu_evict_data;
-       kstat_named_t arcstat_mfu_evict_metadata;
+       /*
+        * Number of bytes consumed by ARC buffers that are eligible for
+        * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
+        * state.
+        */
+       kstat_named_t arcstat_mfu_evictable_data;
+       /*
+        * Number of bytes consumed by ARC buffers that are eligible for
+        * eviction, of type ARC_BUFC_METADATA, and reside in the
+        * arc_mfu state.
+        */
+       kstat_named_t arcstat_mfu_evictable_metadata;
+       /*
+        * Total number of bytes that *would have been* consumed by ARC
+        * buffers in the arc_mfu_ghost state. See the comment above
+        * arcstat_mru_ghost_size for more details.
+        */
        kstat_named_t arcstat_mfu_ghost_size;
-       kstat_named_t arcstat_mfu_ghost_evict_data;
-       kstat_named_t arcstat_mfu_ghost_evict_metadata;
+       /*
+        * Number of bytes that *would have been* consumed by ARC
+        * buffers that are eligible for eviction, of type
+        * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
+        */
+       kstat_named_t arcstat_mfu_ghost_evictable_data;
+       /*
+        * Number of bytes that *would have been* consumed by ARC
+        * buffers that are eligible for eviction, of type
+        * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
+        */
+       kstat_named_t arcstat_mfu_ghost_evictable_metadata;
        kstat_named_t arcstat_l2_hits;
        kstat_named_t arcstat_l2_misses;
        kstat_named_t arcstat_l2_feeds;
@@ -344,30 +640,32 @@ typedef struct arc_stats {
        kstat_named_t arcstat_l2_evict_reading;
        kstat_named_t arcstat_l2_evict_l1cached;
        kstat_named_t arcstat_l2_free_on_write;
-       kstat_named_t arcstat_l2_cdata_free_on_write;
        kstat_named_t arcstat_l2_abort_lowmem;
        kstat_named_t arcstat_l2_cksum_bad;
        kstat_named_t arcstat_l2_io_error;
-       kstat_named_t arcstat_l2_size;
-       kstat_named_t arcstat_l2_asize;
+       kstat_named_t arcstat_l2_lsize;
+       kstat_named_t arcstat_l2_psize;
        kstat_named_t arcstat_l2_hdr_size;
-       kstat_named_t arcstat_l2_compress_successes;
-       kstat_named_t arcstat_l2_compress_zeros;
-       kstat_named_t arcstat_l2_compress_failures;
        kstat_named_t arcstat_memory_throttle_count;
-       kstat_named_t arcstat_duplicate_buffers;
-       kstat_named_t arcstat_duplicate_buffers_size;
-       kstat_named_t arcstat_duplicate_reads;
        kstat_named_t arcstat_memory_direct_count;
        kstat_named_t arcstat_memory_indirect_count;
+       kstat_named_t arcstat_memory_all_bytes;
+       kstat_named_t arcstat_memory_free_bytes;
+       kstat_named_t arcstat_memory_available_bytes;
        kstat_named_t arcstat_no_grow;
        kstat_named_t arcstat_tempreserve;
        kstat_named_t arcstat_loaned_bytes;
        kstat_named_t arcstat_prune;
        kstat_named_t arcstat_meta_used;
        kstat_named_t arcstat_meta_limit;
+       kstat_named_t arcstat_dnode_limit;
        kstat_named_t arcstat_meta_max;
        kstat_named_t arcstat_meta_min;
+       kstat_named_t arcstat_sync_wait_for_async;
+       kstat_named_t arcstat_demand_hit_predictive_prefetch;
+       kstat_named_t arcstat_need_free;
+       kstat_named_t arcstat_sys_free;
+       kstat_named_t arcstat_raw_size;
 } arc_stats_t;
 
 static arc_stats_t arc_stats = {
@@ -403,25 +701,30 @@ static arc_stats_t arc_stats = {
        { "c_min",                      KSTAT_DATA_UINT64 },
        { "c_max",                      KSTAT_DATA_UINT64 },
        { "size",                       KSTAT_DATA_UINT64 },
+       { "compressed_size",            KSTAT_DATA_UINT64 },
+       { "uncompressed_size",          KSTAT_DATA_UINT64 },
+       { "overhead_size",              KSTAT_DATA_UINT64 },
        { "hdr_size",                   KSTAT_DATA_UINT64 },
        { "data_size",                  KSTAT_DATA_UINT64 },
-       { "meta_size",                  KSTAT_DATA_UINT64 },
-       { "other_size",                 KSTAT_DATA_UINT64 },
+       { "metadata_size",              KSTAT_DATA_UINT64 },
+       { "dbuf_size",                  KSTAT_DATA_UINT64 },
+       { "dnode_size",                 KSTAT_DATA_UINT64 },
+       { "bonus_size",                 KSTAT_DATA_UINT64 },
        { "anon_size",                  KSTAT_DATA_UINT64 },
-       { "anon_evict_data",            KSTAT_DATA_UINT64 },
-       { "anon_evict_metadata",        KSTAT_DATA_UINT64 },
+       { "anon_evictable_data",        KSTAT_DATA_UINT64 },
+       { "anon_evictable_metadata",    KSTAT_DATA_UINT64 },
        { "mru_size",                   KSTAT_DATA_UINT64 },
-       { "mru_evict_data",             KSTAT_DATA_UINT64 },
-       { "mru_evict_metadata",         KSTAT_DATA_UINT64 },
+       { "mru_evictable_data",         KSTAT_DATA_UINT64 },
+       { "mru_evictable_metadata",     KSTAT_DATA_UINT64 },
        { "mru_ghost_size",             KSTAT_DATA_UINT64 },
-       { "mru_ghost_evict_data",       KSTAT_DATA_UINT64 },
-       { "mru_ghost_evict_metadata",   KSTAT_DATA_UINT64 },
+       { "mru_ghost_evictable_data",   KSTAT_DATA_UINT64 },
+       { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
        { "mfu_size",                   KSTAT_DATA_UINT64 },
-       { "mfu_evict_data",             KSTAT_DATA_UINT64 },
-       { "mfu_evict_metadata",         KSTAT_DATA_UINT64 },
+       { "mfu_evictable_data",         KSTAT_DATA_UINT64 },
+       { "mfu_evictable_metadata",     KSTAT_DATA_UINT64 },
        { "mfu_ghost_size",             KSTAT_DATA_UINT64 },
-       { "mfu_ghost_evict_data",       KSTAT_DATA_UINT64 },
-       { "mfu_ghost_evict_metadata",   KSTAT_DATA_UINT64 },
+       { "mfu_ghost_evictable_data",   KSTAT_DATA_UINT64 },
+       { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
        { "l2_hits",                    KSTAT_DATA_UINT64 },
        { "l2_misses",                  KSTAT_DATA_UINT64 },
        { "l2_feeds",                   KSTAT_DATA_UINT64 },
@@ -436,30 +739,32 @@ static arc_stats_t arc_stats = {
        { "l2_evict_reading",           KSTAT_DATA_UINT64 },
        { "l2_evict_l1cached",          KSTAT_DATA_UINT64 },
        { "l2_free_on_write",           KSTAT_DATA_UINT64 },
-       { "l2_cdata_free_on_write",     KSTAT_DATA_UINT64 },
        { "l2_abort_lowmem",            KSTAT_DATA_UINT64 },
        { "l2_cksum_bad",               KSTAT_DATA_UINT64 },
        { "l2_io_error",                KSTAT_DATA_UINT64 },
        { "l2_size",                    KSTAT_DATA_UINT64 },
        { "l2_asize",                   KSTAT_DATA_UINT64 },
        { "l2_hdr_size",                KSTAT_DATA_UINT64 },
-       { "l2_compress_successes",      KSTAT_DATA_UINT64 },
-       { "l2_compress_zeros",          KSTAT_DATA_UINT64 },
-       { "l2_compress_failures",       KSTAT_DATA_UINT64 },
        { "memory_throttle_count",      KSTAT_DATA_UINT64 },
-       { "duplicate_buffers",          KSTAT_DATA_UINT64 },
-       { "duplicate_buffers_size",     KSTAT_DATA_UINT64 },
-       { "duplicate_reads",            KSTAT_DATA_UINT64 },
        { "memory_direct_count",        KSTAT_DATA_UINT64 },
        { "memory_indirect_count",      KSTAT_DATA_UINT64 },
+       { "memory_all_bytes",           KSTAT_DATA_UINT64 },
+       { "memory_free_bytes",          KSTAT_DATA_UINT64 },
+       { "memory_available_bytes",     KSTAT_DATA_INT64 },
        { "arc_no_grow",                KSTAT_DATA_UINT64 },
        { "arc_tempreserve",            KSTAT_DATA_UINT64 },
        { "arc_loaned_bytes",           KSTAT_DATA_UINT64 },
        { "arc_prune",                  KSTAT_DATA_UINT64 },
        { "arc_meta_used",              KSTAT_DATA_UINT64 },
        { "arc_meta_limit",             KSTAT_DATA_UINT64 },
+       { "arc_dnode_limit",            KSTAT_DATA_UINT64 },
        { "arc_meta_max",               KSTAT_DATA_UINT64 },
        { "arc_meta_min",               KSTAT_DATA_UINT64 },
+       { "sync_wait_for_async",        KSTAT_DATA_UINT64 },
+       { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
+       { "arc_need_free",              KSTAT_DATA_UINT64 },
+       { "arc_sys_free",               KSTAT_DATA_UINT64 },
+       { "arc_raw_size",               KSTAT_DATA_UINT64 }
 };
 
 #define        ARCSTAT(stat)   (arc_stats.stat.value.ui64)
@@ -521,22 +826,32 @@ static arc_state_t        *arc_l2c_only;
 #define        arc_c           ARCSTAT(arcstat_c)      /* target size of cache */
 #define        arc_c_min       ARCSTAT(arcstat_c_min)  /* min target cache size */
 #define        arc_c_max       ARCSTAT(arcstat_c_max)  /* max target cache size */
-#define        arc_no_grow     ARCSTAT(arcstat_no_grow)
+#define        arc_no_grow     ARCSTAT(arcstat_no_grow) /* do not grow cache size */
 #define        arc_tempreserve ARCSTAT(arcstat_tempreserve)
 #define        arc_loaned_bytes        ARCSTAT(arcstat_loaned_bytes)
 #define        arc_meta_limit  ARCSTAT(arcstat_meta_limit) /* max size for metadata */
+#define        arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */
 #define        arc_meta_min    ARCSTAT(arcstat_meta_min) /* min size for metadata */
 #define        arc_meta_used   ARCSTAT(arcstat_meta_used) /* size of metadata */
 #define        arc_meta_max    ARCSTAT(arcstat_meta_max) /* max size of metadata */
-
-#define        L2ARC_IS_VALID_COMPRESS(_c_) \
-       ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
+#define        arc_dbuf_size   ARCSTAT(arcstat_dbuf_size) /* dbuf metadata */
+#define        arc_dnode_size  ARCSTAT(arcstat_dnode_size) /* dnode metadata */
+#define        arc_bonus_size  ARCSTAT(arcstat_bonus_size) /* bonus buffer metadata */
+#define        arc_need_free   ARCSTAT(arcstat_need_free) /* bytes to be freed */
+#define        arc_sys_free    ARCSTAT(arcstat_sys_free) /* target system free bytes */
+
+/* size of all b_rabd's in entire arc */
+#define        arc_raw_size    ARCSTAT(arcstat_raw_size)
+/* compressed size of entire arc */
+#define        arc_compressed_size     ARCSTAT(arcstat_compressed_size)
+/* uncompressed size of entire arc */
+#define        arc_uncompressed_size   ARCSTAT(arcstat_uncompressed_size)
+/* number of bytes in the arc from arc_buf_t's */
+#define        arc_overhead_size       ARCSTAT(arcstat_overhead_size)
 
 static list_t arc_prune_list;
 static kmutex_t arc_prune_mtx;
 static taskq_t *arc_prune_taskq;
-static arc_buf_t *arc_eviction_list;
-static arc_buf_hdr_t arc_eviction_hdr;
 
 #define        GHOST_STATE(state)      \
        ((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||        \
@@ -546,39 +861,53 @@ static arc_buf_hdr_t arc_eviction_hdr;
 #define        HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
 #define        HDR_IO_ERROR(hdr)       ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
 #define        HDR_PREFETCH(hdr)       ((hdr)->b_flags & ARC_FLAG_PREFETCH)
-#define        HDR_FREED_IN_READ(hdr)  ((hdr)->b_flags & ARC_FLAG_FREED_IN_READ)
-#define        HDR_BUF_AVAILABLE(hdr)  ((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE)
+#define        HDR_COMPRESSION_ENABLED(hdr)    \
+       ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
 
 #define        HDR_L2CACHE(hdr)        ((hdr)->b_flags & ARC_FLAG_L2CACHE)
-#define        HDR_L2COMPRESS(hdr)     ((hdr)->b_flags & ARC_FLAG_L2COMPRESS)
 #define        HDR_L2_READING(hdr)     \
-           (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) &&      \
-           ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
+       (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) &&  \
+       ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
 #define        HDR_L2_WRITING(hdr)     ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
 #define        HDR_L2_EVICTED(hdr)     ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
 #define        HDR_L2_WRITE_HEAD(hdr)  ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
+#define        HDR_PROTECTED(hdr)      ((hdr)->b_flags & ARC_FLAG_PROTECTED)
+#define        HDR_NOAUTH(hdr)         ((hdr)->b_flags & ARC_FLAG_NOAUTH)
+#define        HDR_SHARED_DATA(hdr)    ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
 
 #define        HDR_ISTYPE_METADATA(hdr)        \
-           ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
+       ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
 #define        HDR_ISTYPE_DATA(hdr)    (!HDR_ISTYPE_METADATA(hdr))
 
 #define        HDR_HAS_L1HDR(hdr)      ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
 #define        HDR_HAS_L2HDR(hdr)      ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
+#define        HDR_HAS_RABD(hdr)       \
+       (HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) &&    \
+       (hdr)->b_crypt_hdr.b_rabd != NULL)
+#define        HDR_ENCRYPTED(hdr)      \
+       (HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
+#define        HDR_AUTHENTICATED(hdr)  \
+       (HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
 
 /* For storing compression mode in b_flags */
-#define        HDR_COMPRESS_OFFSET     24
-#define        HDR_COMPRESS_NBITS      7
+#define        HDR_COMPRESS_OFFSET     (highbit64(ARC_FLAG_COMPRESS_0) - 1)
+
+#define        HDR_GET_COMPRESS(hdr)   ((enum zio_compress)BF32_GET((hdr)->b_flags, \
+       HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
+#define        HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
+       HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
 
-#define        HDR_GET_COMPRESS(hdr)   ((enum zio_compress)BF32_GET(hdr->b_flags, \
-           HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS))
-#define        HDR_SET_COMPRESS(hdr, cmp) BF32_SET(hdr->b_flags, \
-           HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS, (cmp))
+#define        ARC_BUF_LAST(buf)       ((buf)->b_next == NULL)
+#define        ARC_BUF_SHARED(buf)     ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
+#define        ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
+#define        ARC_BUF_ENCRYPTED(buf)  ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
 
 /*
  * Other sizes
  */
 
-#define        HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
+#define        HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
+#define        HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
 #define        HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
 
 /*
@@ -619,6 +948,7 @@ uint64_t zfs_crc64_table[256];
 
 #define        L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
 #define        L2ARC_HEADROOM          2                       /* num of writes */
+
 /*
  * If we discover during ARC scan any buffers to be compressed, we boost
  * our headroom for the next scanning cycle by this percentage multiple.
@@ -627,6 +957,12 @@ uint64_t zfs_crc64_table[256];
 #define        L2ARC_FEED_SECS         1               /* caching interval secs */
 #define        L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
 
+/*
+ * We can feed L2ARC from two states of ARC buffers, mru and mfu,
+ * and each of the state has two types: data and metadata.
+ */
+#define        L2ARC_FEED_TYPES        4
+
 #define        l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
 #define        l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
 
@@ -638,7 +974,6 @@ unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
 unsigned long l2arc_feed_secs = L2ARC_FEED_SECS;       /* interval seconds */
 unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS;   /* min interval msecs */
 int l2arc_noprefetch = B_TRUE;                 /* don't cache prefetch bufs */
-int l2arc_nocompress = B_FALSE;                        /* don't compress bufs */
 int l2arc_feed_again = B_TRUE;                 /* turbo warmup */
 int l2arc_norw = B_FALSE;                      /* no reads during writes */
 
@@ -655,41 +990,56 @@ static kmutex_t l2arc_free_on_write_mtx;  /* mutex for list */
 static uint64_t l2arc_ndev;                    /* number of devices */
 
 typedef struct l2arc_read_callback {
-       arc_buf_t               *l2rcb_buf;             /* read buffer */
-       spa_t                   *l2rcb_spa;             /* spa */
+       arc_buf_hdr_t           *l2rcb_hdr;             /* read header */
        blkptr_t                l2rcb_bp;               /* original blkptr */
        zbookmark_phys_t        l2rcb_zb;               /* original bookmark */
        int                     l2rcb_flags;            /* original flags */
-       enum zio_compress       l2rcb_compress;         /* applied compress */
+       abd_t                   *l2rcb_abd;             /* temporary buffer */
 } l2arc_read_callback_t;
 
 typedef struct l2arc_data_free {
        /* protected by l2arc_free_on_write_mtx */
-       void            *l2df_data;
+       abd_t           *l2df_abd;
        size_t          l2df_size;
-       void            (*l2df_func)(void *, size_t);
+       arc_buf_contents_t l2df_type;
        list_node_t     l2df_list_node;
 } l2arc_data_free_t;
 
+typedef enum arc_fill_flags {
+       ARC_FILL_LOCKED         = 1 << 0, /* hdr lock is held */
+       ARC_FILL_COMPRESSED     = 1 << 1, /* fill with compressed data */
+       ARC_FILL_ENCRYPTED      = 1 << 2, /* fill with encrypted data */
+       ARC_FILL_NOAUTH         = 1 << 3, /* don't attempt to authenticate */
+       ARC_FILL_IN_PLACE       = 1 << 4  /* fill in place (special case) */
+} arc_fill_flags_t;
+
 static kmutex_t l2arc_feed_thr_lock;
 static kcondvar_t l2arc_feed_thr_cv;
 static uint8_t l2arc_thread_exit;
 
-static void arc_get_data_buf(arc_buf_t *);
+static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *);
+static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
+static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *);
+static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
+static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
+static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
+static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
+static void arc_hdr_alloc_abd(arc_buf_hdr_t *, boolean_t);
 static void arc_access(arc_buf_hdr_t *, kmutex_t *);
 static boolean_t arc_is_overflowing(void);
 static void arc_buf_watch(arc_buf_t *);
+static void arc_tuning_update(void);
+static void arc_prune_async(int64_t);
+static uint64_t arc_all_memory(void);
 
 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
 static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
+static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
+static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
 
 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
 static void l2arc_read_done(zio_t *);
 
-static boolean_t l2arc_compress_buf(arc_buf_hdr_t *);
-static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress);
-static void l2arc_release_cdata_buf(arc_buf_hdr_t *);
-
 static uint64_t
 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
 {
@@ -707,14 +1057,14 @@ buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
        return (crc);
 }
 
-#define        BUF_EMPTY(buf)                                          \
-       ((buf)->b_dva.dva_word[0] == 0 &&                       \
-       (buf)->b_dva.dva_word[1] == 0)
+#define        HDR_EMPTY(hdr)                                          \
+       ((hdr)->b_dva.dva_word[0] == 0 &&                       \
+       (hdr)->b_dva.dva_word[1] == 0)
 
-#define        BUF_EQUAL(spa, dva, birth, buf)                         \
-       ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&     \
-       ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&     \
-       ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
+#define        HDR_EQUAL(spa, dva, birth, hdr)                         \
+       ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&     \
+       ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&     \
+       ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
 
 static void
 buf_discard_identity(arc_buf_hdr_t *hdr)
@@ -736,7 +1086,7 @@ buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
        mutex_enter(hash_lock);
        for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
            hdr = hdr->b_hash_next) {
-               if (BUF_EQUAL(spa, dva, birth, hdr)) {
+               if (HDR_EQUAL(spa, dva, birth, hdr)) {
                        *lockp = hash_lock;
                        return (hdr);
                }
@@ -774,13 +1124,13 @@ buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
 
        for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
            fhdr = fhdr->b_hash_next, i++) {
-               if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
+               if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
                        return (fhdr);
        }
 
        hdr->b_hash_next = buf_hash_table.ht_table[idx];
        buf_hash_table.ht_table[idx] = hdr;
-       hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
+       arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
 
        /* collect some hash table performance data */
        if (i > 0) {
@@ -808,12 +1158,12 @@ buf_hash_remove(arc_buf_hdr_t *hdr)
 
        hdrp = &buf_hash_table.ht_table[idx];
        while ((fhdr = *hdrp) != hdr) {
-               ASSERT(fhdr != NULL);
+               ASSERT3P(fhdr, !=, NULL);
                hdrp = &fhdr->b_hash_next;
        }
        *hdrp = hdr->b_hash_next;
        hdr->b_hash_next = NULL;
-       hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE;
+       arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
 
        /* collect some hash table performance data */
        ARCSTAT_BUMPDOWN(arcstat_hash_elements);
@@ -826,7 +1176,9 @@ buf_hash_remove(arc_buf_hdr_t *hdr)
 /*
  * Global data structures and functions for the buf kmem cache.
  */
+
 static kmem_cache_t *hdr_full_cache;
+static kmem_cache_t *hdr_full_crypt_cache;
 static kmem_cache_t *hdr_l2only_cache;
 static kmem_cache_t *buf_cache;
 
@@ -849,6 +1201,7 @@ buf_fini(void)
        for (i = 0; i < BUF_LOCKS; i++)
                mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
        kmem_cache_destroy(hdr_full_cache);
+       kmem_cache_destroy(hdr_full_crypt_cache);
        kmem_cache_destroy(hdr_l2only_cache);
        kmem_cache_destroy(buf_cache);
 }
@@ -875,6 +1228,19 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
        return (0);
 }
 
+/* ARGSUSED */
+static int
+hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
+{
+       arc_buf_hdr_t *hdr = vbuf;
+
+       hdr_full_cons(vbuf, unused, kmflag);
+       bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr));
+       arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
+
+       return (0);
+}
+
 /* ARGSUSED */
 static int
 hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
@@ -910,7 +1276,7 @@ hdr_full_dest(void *vbuf, void *unused)
 {
        arc_buf_hdr_t *hdr = vbuf;
 
-       ASSERT(BUF_EMPTY(hdr));
+       ASSERT(HDR_EMPTY(hdr));
        cv_destroy(&hdr->b_l1hdr.b_cv);
        refcount_destroy(&hdr->b_l1hdr.b_refcnt);
        mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
@@ -918,13 +1284,23 @@ hdr_full_dest(void *vbuf, void *unused)
        arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
 }
 
+/* ARGSUSED */
+static void
+hdr_full_crypt_dest(void *vbuf, void *unused)
+{
+       arc_buf_hdr_t *hdr = vbuf;
+
+       hdr_full_dest(vbuf, unused);
+       arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
+}
+
 /* ARGSUSED */
 static void
 hdr_l2only_dest(void *vbuf, void *unused)
 {
        ASSERTV(arc_buf_hdr_t *hdr = vbuf);
 
-       ASSERT(BUF_EMPTY(hdr));
+       ASSERT(HDR_EMPTY(hdr));
        arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
 }
 
@@ -938,10 +1314,26 @@ buf_dest(void *vbuf, void *unused)
        arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 }
 
+/*
+ * Reclaim callback -- invoked when memory is low.
+ */
+/* ARGSUSED */
+static void
+hdr_recl(void *unused)
+{
+       dprintf("hdr_recl called\n");
+       /*
+        * umem calls the reclaim func when we destroy the buf cache,
+        * which is after we do arc_fini().
+        */
+       if (!arc_dead)
+               cv_signal(&arc_reclaim_thread_cv);
+}
+
 static void
 buf_init(void)
 {
-       uint64_t *ct;
+       uint64_t *ct = NULL;
        uint64_t hsize = 1ULL << 12;
        int i, j;
 
@@ -951,7 +1343,7 @@ buf_init(void)
         * By default, the table will take up
         * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
         */
-       while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
+       while (hsize * zfs_arc_average_blocksize < arc_all_memory())
                hsize <<= 1;
 retry:
        buf_hash_table.ht_mask = hsize - 1;
@@ -973,9 +1365,12 @@ retry:
        }
 
        hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
-           0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
+           0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
+       hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
+           HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
+           hdr_recl, NULL, NULL, 0);
        hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
-           HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
+           HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
            NULL, NULL, 0);
        buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
            0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
@@ -990,145 +1385,233 @@ retry:
        }
 }
 
+#define        ARC_MINTIME     (hz>>4) /* 62 ms */
+
 /*
- * Transition between the two allocation states for the arc_buf_hdr struct.
- * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
- * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
- * version is used when a cache buffer is only in the L2ARC in order to reduce
- * memory usage.
+ * This is the size that the buf occupies in memory. If the buf is compressed,
+ * it will correspond to the compressed size. You should use this method of
+ * getting the buf size unless you explicitly need the logical size.
  */
-static arc_buf_hdr_t *
-arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
+uint64_t
+arc_buf_size(arc_buf_t *buf)
 {
-       arc_buf_hdr_t *nhdr;
-       l2arc_dev_t *dev;
-
-       ASSERT(HDR_HAS_L2HDR(hdr));
-       ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
-           (old == hdr_l2only_cache && new == hdr_full_cache));
+       return (ARC_BUF_COMPRESSED(buf) ?
+           HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
+}
 
-       dev = hdr->b_l2hdr.b_dev;
-       nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
+uint64_t
+arc_buf_lsize(arc_buf_t *buf)
+{
+       return (HDR_GET_LSIZE(buf->b_hdr));
+}
 
-       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
-       buf_hash_remove(hdr);
+/*
+ * This function will return B_TRUE if the buffer is encrypted in memory.
+ * This buffer can be decrypted by calling arc_untransform().
+ */
+boolean_t
+arc_is_encrypted(arc_buf_t *buf)
+{
+       return (ARC_BUF_ENCRYPTED(buf) != 0);
+}
 
-       bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
-       if (new == hdr_full_cache) {
-               nhdr->b_flags |= ARC_FLAG_HAS_L1HDR;
-               /*
-                * arc_access and arc_change_state need to be aware that a
-                * header has just come out of L2ARC, so we set its state to
-                * l2c_only even though it's about to change.
-                */
-               nhdr->b_l1hdr.b_state = arc_l2c_only;
+/*
+ * Returns B_TRUE if the buffer represents data that has not had its MAC
+ * verified yet.
+ */
+boolean_t
+arc_is_unauthenticated(arc_buf_t *buf)
+{
+       return (HDR_NOAUTH(buf->b_hdr) != 0);
+}
 
-               /* Verify previous threads set to NULL before freeing */
-               ASSERT3P(nhdr->b_l1hdr.b_tmp_cdata, ==, NULL);
-       } else {
-               ASSERT(hdr->b_l1hdr.b_buf == NULL);
-               ASSERT0(hdr->b_l1hdr.b_datacnt);
+void
+arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
+    uint8_t *iv, uint8_t *mac)
+{
+       arc_buf_hdr_t *hdr = buf->b_hdr;
 
-               /*
-                * If we've reached here, We must have been called from
-                * arc_evict_hdr(), as such we should have already been
-                * removed from any ghost list we were previously on
-                * (which protects us from racing with arc_evict_state),
-                * thus no locking is needed during this check.
-                */
-               ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
+       ASSERT(HDR_PROTECTED(hdr));
 
-               /*
-                * A buffer must not be moved into the arc_l2c_only
-                * state if it's not finished being written out to the
-                * l2arc device. Otherwise, the b_l1hdr.b_tmp_cdata field
-                * might try to be accessed, even though it was removed.
-                */
-               VERIFY(!HDR_L2_WRITING(hdr));
-               VERIFY3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
+       bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
+       bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
+       bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
+       *byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
+           ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
+}
 
-               nhdr->b_flags &= ~ARC_FLAG_HAS_L1HDR;
-       }
-       /*
-        * The header has been reallocated so we need to re-insert it into any
-        * lists it was on.
-        */
-       (void) buf_hash_insert(nhdr, NULL);
+/*
+ * Indicates how this buffer is compressed in memory. If it is not compressed
+ * the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
+ * arc_untransform() as long as it is also unencrypted.
+ */
+enum zio_compress
+arc_get_compression(arc_buf_t *buf)
+{
+       return (ARC_BUF_COMPRESSED(buf) ?
+           HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
+}
 
-       ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
+/*
+ * Return the compression algorithm used to store this data in the ARC. If ARC
+ * compression is enabled or this is an encrypted block, this will be the same
+ * as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
+ */
+static inline enum zio_compress
+arc_hdr_get_compress(arc_buf_hdr_t *hdr)
+{
+       return (HDR_COMPRESSION_ENABLED(hdr) ?
+           HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
+}
 
-       mutex_enter(&dev->l2ad_mtx);
+static inline boolean_t
+arc_buf_is_shared(arc_buf_t *buf)
+{
+       boolean_t shared = (buf->b_data != NULL &&
+           buf->b_hdr->b_l1hdr.b_pabd != NULL &&
+           abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
+           buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
+       IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
+       IMPLY(shared, ARC_BUF_SHARED(buf));
+       IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
 
        /*
-        * We must place the realloc'ed header back into the list at
-        * the same spot. Otherwise, if it's placed earlier in the list,
-        * l2arc_write_buffers() could find it during the function's
-        * write phase, and try to write it out to the l2arc.
+        * It would be nice to assert arc_can_share() too, but the "hdr isn't
+        * already being shared" requirement prevents us from doing that.
         */
-       list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
-       list_remove(&dev->l2ad_buflist, hdr);
 
-       mutex_exit(&dev->l2ad_mtx);
+       return (shared);
+}
 
-       buf_discard_identity(hdr);
-       hdr->b_freeze_cksum = NULL;
-       kmem_cache_free(old, hdr);
+/*
+ * Free the checksum associated with this header. If there is no checksum, this
+ * is a no-op.
+ */
+static inline void
+arc_cksum_free(arc_buf_hdr_t *hdr)
+{
+       ASSERT(HDR_HAS_L1HDR(hdr));
 
-       return (nhdr);
+       mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
+       if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
+               kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
+               hdr->b_l1hdr.b_freeze_cksum = NULL;
+       }
+       mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
 }
 
+/*
+ * Return true iff at least one of the bufs on hdr is not compressed.
+ * Encrypted buffers count as compressed.
+ */
+static boolean_t
+arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
+{
+       for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
+               if (!ARC_BUF_COMPRESSED(b)) {
+                       return (B_TRUE);
+               }
+       }
+       return (B_FALSE);
+}
 
-#define        ARC_MINTIME     (hz>>4) /* 62 ms */
 
+/*
+ * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
+ * matches the checksum that is stored in the hdr. If there is no checksum,
+ * or if the buf is compressed, this is a no-op.
+ */
 static void
 arc_cksum_verify(arc_buf_t *buf)
 {
+       arc_buf_hdr_t *hdr = buf->b_hdr;
        zio_cksum_t zc;
 
        if (!(zfs_flags & ZFS_DEBUG_MODIFY))
                return;
 
-       mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
-       if (buf->b_hdr->b_freeze_cksum == NULL || HDR_IO_ERROR(buf->b_hdr)) {
-               mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
+       if (ARC_BUF_COMPRESSED(buf)) {
+               ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
+                   arc_hdr_has_uncompressed_buf(hdr));
                return;
        }
-       fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
-       if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
-               panic("buffer modified while frozen!");
-       mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
-}
 
-static int
-arc_cksum_equal(arc_buf_t *buf)
-{
-       zio_cksum_t zc;
-       int equal;
+       ASSERT(HDR_HAS_L1HDR(hdr));
 
-       mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
-       fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
-       equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
-       mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
+       mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
+       if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
+               mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
+               return;
+       }
 
-       return (equal);
+       fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
+       if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
+               panic("buffer modified while frozen!");
+       mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
 }
 
-static void
-arc_cksum_compute(arc_buf_t *buf, boolean_t force)
-{
-       if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
+/*
+ * This function makes the assumption that data stored in the L2ARC
+ * will be transformed exactly as it is in the main pool. Because of
+ * this we can verify the checksum against the reading process's bp.
+ */
+static boolean_t
+arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
+{
+       ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
+       VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
+
+       /*
+        * Block pointers always store the checksum for the logical data.
+        * If the block pointer has the gang bit set, then the checksum
+        * it represents is for the reconstituted data and not for an
+        * individual gang member. The zio pipeline, however, must be able to
+        * determine the checksum of each of the gang constituents so it
+        * treats the checksum comparison differently than what we need
+        * for l2arc blocks. This prevents us from using the
+        * zio_checksum_error() interface directly. Instead we must call the
+        * zio_checksum_error_impl() so that we can ensure the checksum is
+        * generated using the correct checksum algorithm and accounts for the
+        * logical I/O size and not just a gang fragment.
+        */
+       return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
+           BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
+           zio->io_offset, NULL) == 0);
+}
+
+/*
+ * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
+ * checksum and attaches it to the buf's hdr so that we can ensure that the buf
+ * isn't modified later on. If buf is compressed or there is already a checksum
+ * on the hdr, this is a no-op (we only checksum uncompressed bufs).
+ */
+static void
+arc_cksum_compute(arc_buf_t *buf)
+{
+       arc_buf_hdr_t *hdr = buf->b_hdr;
+
+       if (!(zfs_flags & ZFS_DEBUG_MODIFY))
                return;
 
+       ASSERT(HDR_HAS_L1HDR(hdr));
+
        mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
-       if (buf->b_hdr->b_freeze_cksum != NULL) {
-               mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
+       if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
+               ASSERT(arc_hdr_has_uncompressed_buf(hdr));
+               mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
+               return;
+       } else if (ARC_BUF_COMPRESSED(buf)) {
+               mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
                return;
        }
-       buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
+
+       ASSERT(!ARC_BUF_ENCRYPTED(buf));
+       ASSERT(!ARC_BUF_COMPRESSED(buf));
+       hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
            KM_SLEEP);
-       fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
-           buf->b_hdr->b_freeze_cksum);
-       mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
+       fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
+           hdr->b_l1hdr.b_freeze_cksum);
+       mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
        arc_buf_watch(buf);
 }
 
@@ -1136,7 +1619,7 @@ arc_cksum_compute(arc_buf_t *buf, boolean_t force)
 void
 arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
 {
-       panic("Got SIGSEGV at address: 0x%lx\n", (long) si->si_addr);
+       panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
 }
 #endif
 
@@ -1146,7 +1629,7 @@ arc_buf_unwatch(arc_buf_t *buf)
 {
 #ifndef _KERNEL
        if (arc_watch) {
-               ASSERT0(mprotect(buf->b_data, buf->b_hdr->b_size,
+               ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
                    PROT_READ | PROT_WRITE));
        }
 #endif
@@ -1158,18 +1641,28 @@ arc_buf_watch(arc_buf_t *buf)
 {
 #ifndef _KERNEL
        if (arc_watch)
-               ASSERT0(mprotect(buf->b_data, buf->b_hdr->b_size, PROT_READ));
+               ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
+                   PROT_READ));
 #endif
 }
 
 static arc_buf_contents_t
 arc_buf_type(arc_buf_hdr_t *hdr)
 {
+       arc_buf_contents_t type;
        if (HDR_ISTYPE_METADATA(hdr)) {
-               return (ARC_BUFC_METADATA);
+               type = ARC_BUFC_METADATA;
        } else {
-               return (ARC_BUFC_DATA);
+               type = ARC_BUFC_DATA;
        }
+       VERIFY3U(hdr->b_type, ==, type);
+       return (type);
+}
+
+boolean_t
+arc_is_metadata(arc_buf_t *buf)
+{
+       return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
 }
 
 static uint32_t
@@ -1191,310 +1684,1042 @@ arc_bufc_to_flags(arc_buf_contents_t type)
 void
 arc_buf_thaw(arc_buf_t *buf)
 {
-       if (zfs_flags & ZFS_DEBUG_MODIFY) {
-               if (buf->b_hdr->b_l1hdr.b_state != arc_anon)
-                       panic("modifying non-anon buffer!");
-               if (HDR_IO_IN_PROGRESS(buf->b_hdr))
-                       panic("modifying buffer while i/o in progress!");
-               arc_cksum_verify(buf);
-       }
+       arc_buf_hdr_t *hdr = buf->b_hdr;
 
-       mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
-       if (buf->b_hdr->b_freeze_cksum != NULL) {
-               kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
-               buf->b_hdr->b_freeze_cksum = NULL;
-       }
+       ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
+       ASSERT(!HDR_IO_IN_PROGRESS(hdr));
 
-       mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
+       arc_cksum_verify(buf);
+
+       /*
+        * Compressed buffers do not manipulate the b_freeze_cksum or
+        * allocate b_thawed.
+        */
+       if (ARC_BUF_COMPRESSED(buf)) {
+               ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
+                   arc_hdr_has_uncompressed_buf(hdr));
+               return;
+       }
 
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       arc_cksum_free(hdr);
        arc_buf_unwatch(buf);
 }
 
 void
 arc_buf_freeze(arc_buf_t *buf)
 {
+       arc_buf_hdr_t *hdr = buf->b_hdr;
        kmutex_t *hash_lock;
 
        if (!(zfs_flags & ZFS_DEBUG_MODIFY))
                return;
 
-       hash_lock = HDR_LOCK(buf->b_hdr);
+       if (ARC_BUF_COMPRESSED(buf)) {
+               ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
+                   arc_hdr_has_uncompressed_buf(hdr));
+               return;
+       }
+
+       hash_lock = HDR_LOCK(hdr);
        mutex_enter(hash_lock);
 
-       ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
-           buf->b_hdr->b_l1hdr.b_state == arc_anon);
-       arc_cksum_compute(buf, B_FALSE);
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL ||
+           hdr->b_l1hdr.b_state == arc_anon);
+       arc_cksum_compute(buf);
        mutex_exit(hash_lock);
-
 }
 
-static void
-add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
+/*
+ * The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
+ * the following functions should be used to ensure that the flags are
+ * updated in a thread-safe way. When manipulating the flags either
+ * the hash_lock must be held or the hdr must be undiscoverable. This
+ * ensures that we're not racing with any other threads when updating
+ * the flags.
+ */
+static inline void
+arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
 {
-       arc_state_t *state;
-
-       ASSERT(HDR_HAS_L1HDR(hdr));
-       ASSERT(MUTEX_HELD(hash_lock));
-
-       state = hdr->b_l1hdr.b_state;
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
+       hdr->b_flags |= flags;
+}
 
-       if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
-           (state != arc_anon)) {
-               /* We don't use the L2-only state list. */
-               if (state != arc_l2c_only) {
-                       arc_buf_contents_t type = arc_buf_type(hdr);
-                       uint64_t delta = hdr->b_size * hdr->b_l1hdr.b_datacnt;
-                       multilist_t *list = &state->arcs_list[type];
-                       uint64_t *size = &state->arcs_lsize[type];
+static inline void
+arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
+{
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
+       hdr->b_flags &= ~flags;
+}
 
-                       multilist_remove(list, hdr);
+/*
+ * Setting the compression bits in the arc_buf_hdr_t's b_flags is
+ * done in a special way since we have to clear and set bits
+ * at the same time. Consumers that wish to set the compression bits
+ * must use this function to ensure that the flags are updated in
+ * thread-safe manner.
+ */
+static void
+arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
+{
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
 
-                       if (GHOST_STATE(state)) {
-                               ASSERT0(hdr->b_l1hdr.b_datacnt);
-                               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
-                               delta = hdr->b_size;
-                       }
-                       ASSERT(delta > 0);
-                       ASSERT3U(*size, >=, delta);
-                       atomic_add_64(size, -delta);
-               }
-               /* remove the prefetch flag if we get a reference */
-               hdr->b_flags &= ~ARC_FLAG_PREFETCH;
+       /*
+        * Holes and embedded blocks will always have a psize = 0 so
+        * we ignore the compression of the blkptr and set the
+        * want to uncompress them. Mark them as uncompressed.
+        */
+       if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
+               arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
+               ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
+       } else {
+               arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
+               ASSERT(HDR_COMPRESSION_ENABLED(hdr));
        }
+
+       HDR_SET_COMPRESS(hdr, cmp);
+       ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
 }
 
-static int
-remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
+/*
+ * Looks for another buf on the same hdr which has the data decompressed, copies
+ * from it, and returns true. If no such buf exists, returns false.
+ */
+static boolean_t
+arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
 {
-       int cnt;
-       arc_state_t *state = hdr->b_l1hdr.b_state;
+       arc_buf_hdr_t *hdr = buf->b_hdr;
+       boolean_t copied = B_FALSE;
 
        ASSERT(HDR_HAS_L1HDR(hdr));
-       ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
-       ASSERT(!GHOST_STATE(state));
+       ASSERT3P(buf->b_data, !=, NULL);
+       ASSERT(!ARC_BUF_COMPRESSED(buf));
+
+       for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
+           from = from->b_next) {
+               /* can't use our own data buffer */
+               if (from == buf) {
+                       continue;
+               }
+
+               if (!ARC_BUF_COMPRESSED(from)) {
+                       bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
+                       copied = B_TRUE;
+                       break;
+               }
+       }
 
        /*
-        * arc_l2c_only counts as a ghost state so we don't need to explicitly
-        * check to prevent usage of the arc_l2c_only list.
+        * There were no decompressed bufs, so there should not be a
+        * checksum on the hdr either.
         */
-       if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
-           (state != arc_anon)) {
-               arc_buf_contents_t type = arc_buf_type(hdr);
-               multilist_t *list = &state->arcs_list[type];
-               uint64_t *size = &state->arcs_lsize[type];
+       EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
 
-               multilist_insert(list, hdr);
-
-               ASSERT(hdr->b_l1hdr.b_datacnt > 0);
-               atomic_add_64(size, hdr->b_size *
-                   hdr->b_l1hdr.b_datacnt);
-       }
-       return (cnt);
+       return (copied);
 }
 
 /*
- * Returns detailed information about a specific arc buffer.  When the
- * state_index argument is set the function will calculate the arc header
- * list position for its arc state.  Since this requires a linear traversal
- * callers are strongly encourage not to do this.  However, it can be helpful
- * for targeted analysis so the functionality is provided.
+ * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
  */
-void
-arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
+static uint64_t
+arc_hdr_size(arc_buf_hdr_t *hdr)
 {
-       arc_buf_hdr_t *hdr = ab->b_hdr;
-       l1arc_buf_hdr_t *l1hdr = NULL;
-       l2arc_buf_hdr_t *l2hdr = NULL;
-       arc_state_t *state = NULL;
+       uint64_t size;
 
-       if (HDR_HAS_L1HDR(hdr)) {
-               l1hdr = &hdr->b_l1hdr;
-               state = l1hdr->b_state;
+       if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
+           HDR_GET_PSIZE(hdr) > 0) {
+               size = HDR_GET_PSIZE(hdr);
+       } else {
+               ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
+               size = HDR_GET_LSIZE(hdr);
        }
-       if (HDR_HAS_L2HDR(hdr))
-               l2hdr = &hdr->b_l2hdr;
+       return (size);
+}
 
-       memset(abi, 0, sizeof (arc_buf_info_t));
-       abi->abi_flags = hdr->b_flags;
+static int
+arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
+{
+       int ret;
+       uint64_t csize;
+       uint64_t lsize = HDR_GET_LSIZE(hdr);
+       uint64_t psize = HDR_GET_PSIZE(hdr);
+       void *tmpbuf = NULL;
+       abd_t *abd = hdr->b_l1hdr.b_pabd;
 
-       if (l1hdr) {
-               abi->abi_datacnt = l1hdr->b_datacnt;
-               abi->abi_access = l1hdr->b_arc_access;
-               abi->abi_mru_hits = l1hdr->b_mru_hits;
-               abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
-               abi->abi_mfu_hits = l1hdr->b_mfu_hits;
-               abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
-               abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
+       ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr)));
+       ASSERT(HDR_AUTHENTICATED(hdr));
+       ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
+
+       /*
+        * The MAC is calculated on the compressed data that is stored on disk.
+        * However, if compressed arc is disabled we will only have the
+        * decompressed data available to us now. Compress it into a temporary
+        * abd so we can verify the MAC. The performance overhead of this will
+        * be relatively low, since most objects in an encrypted objset will
+        * be encrypted (instead of authenticated) anyway.
+        */
+       if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
+           !HDR_COMPRESSION_ENABLED(hdr)) {
+               tmpbuf = zio_buf_alloc(lsize);
+               abd = abd_get_from_buf(tmpbuf, lsize);
+               abd_take_ownership_of_buf(abd, B_TRUE);
+
+               csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
+                   hdr->b_l1hdr.b_pabd, tmpbuf, lsize);
+               ASSERT3U(csize, <=, psize);
+               abd_zero_off(abd, csize, psize - csize);
        }
 
-       if (l2hdr) {
-               abi->abi_l2arc_dattr = l2hdr->b_daddr;
-               abi->abi_l2arc_asize = l2hdr->b_asize;
-               abi->abi_l2arc_compress = HDR_GET_COMPRESS(hdr);
-               abi->abi_l2arc_hits = l2hdr->b_hits;
+       /*
+        * Authentication is best effort. We authenticate whenever the key is
+        * available. If we succeed we clear ARC_FLAG_NOAUTH.
+        */
+       if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
+               ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
+               ASSERT3U(lsize, ==, psize);
+               ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
+                   psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
+       } else {
+               ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
+                   hdr->b_crypt_hdr.b_mac);
        }
 
-       abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
-       abi->abi_state_contents = arc_buf_type(hdr);
-       abi->abi_size = hdr->b_size;
+       if (ret == 0)
+               arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
+       else if (ret != ENOENT)
+               goto error;
+
+       if (tmpbuf != NULL)
+               abd_free(abd);
+
+       return (0);
+
+error:
+       if (tmpbuf != NULL)
+               abd_free(abd);
+
+       return (ret);
 }
 
 /*
- * Move the supplied buffer to the indicated state. The hash lock
- * for the buffer must be held by the caller.
+ * This function will take a header that only has raw encrypted data in
+ * b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
+ * b_l1hdr.b_pabd. If designated in the header flags, this function will
+ * also decompress the data.
  */
-static void
-arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
-    kmutex_t *hash_lock)
+static int
+arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
 {
-       arc_state_t *old_state;
-       int64_t refcnt;
-       uint32_t datacnt;
-       uint64_t from_delta, to_delta;
-       arc_buf_contents_t buftype = arc_buf_type(hdr);
+       int ret;
+       dsl_crypto_key_t *dck = NULL;
+       abd_t *cabd = NULL;
+       void *tmp = NULL;
+       boolean_t no_crypt = B_FALSE;
+       boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
+
+       ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr)));
+       ASSERT(HDR_ENCRYPTED(hdr));
+
+       arc_hdr_alloc_abd(hdr, B_FALSE);
 
        /*
-        * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
-        * in arc_read() when bringing a buffer out of the L2ARC.  However, the
-        * L1 hdr doesn't always exist when we change state to arc_anon before
-        * destroying a header, in which case reallocating to add the L1 hdr is
-        * pointless.
+        * We must be careful to use the passed-in dsobj value here and
+        * not the value in b_dsobj. b_dsobj is meant to be a best guess for
+        * the L2ARC, which has the luxury of being able to fail without real
+        * consequences (the data simply won't make it to the L2ARC). In
+        * reality, the dsobj stored in the header may belong to a dataset
+        * that has been unmounted or otherwise disowned, meaning the key
+        * won't be accessible via that dsobj anymore.
         */
-       if (HDR_HAS_L1HDR(hdr)) {
-               old_state = hdr->b_l1hdr.b_state;
-               refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
-               datacnt = hdr->b_l1hdr.b_datacnt;
-       } else {
-               old_state = arc_l2c_only;
-               refcnt = 0;
-               datacnt = 0;
+       ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
+       if (ret != 0) {
+               ret = SET_ERROR(EACCES);
+               goto error;
        }
 
-       ASSERT(MUTEX_HELD(hash_lock));
-       ASSERT3P(new_state, !=, old_state);
-       ASSERT(refcnt == 0 || datacnt > 0);
-       ASSERT(!GHOST_STATE(new_state) || datacnt == 0);
-       ASSERT(old_state != arc_anon || datacnt <= 1);
+       ret = zio_do_crypt_abd(B_FALSE, &dck->dck_key,
+           hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_ot,
+           hdr->b_crypt_hdr.b_iv, hdr->b_crypt_hdr.b_mac,
+           HDR_GET_PSIZE(hdr), bswap, hdr->b_l1hdr.b_pabd,
+           hdr->b_crypt_hdr.b_rabd, &no_crypt);
+       if (ret != 0)
+               goto error;
 
-       from_delta = to_delta = datacnt * hdr->b_size;
+       if (no_crypt) {
+               abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
+                   HDR_GET_PSIZE(hdr));
+       }
 
        /*
-        * If this buffer is evictable, transfer it from the
-        * old state list to the new state list.
+        * If this header has disabled arc compression but the b_pabd is
+        * compressed after decrypting it, we need to decompress the newly
+        * decrypted data.
         */
-       if (refcnt == 0) {
-               if (old_state != arc_anon && old_state != arc_l2c_only) {
-                       uint64_t *size = &old_state->arcs_lsize[buftype];
-
-                       ASSERT(HDR_HAS_L1HDR(hdr));
-                       multilist_remove(&old_state->arcs_list[buftype], hdr);
-
-                       /*
-                        * If prefetching out of the ghost cache,
-                        * we will have a non-zero datacnt.
-                        */
-                       if (GHOST_STATE(old_state) && datacnt == 0) {
-                               /* ghost elements have a ghost size */
-                               ASSERT(hdr->b_l1hdr.b_buf == NULL);
-                               from_delta = hdr->b_size;
-                       }
-                       ASSERT3U(*size, >=, from_delta);
-                       atomic_add_64(size, -from_delta);
+       if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
+           !HDR_COMPRESSION_ENABLED(hdr)) {
+               /*
+                * We want to make sure that we are correctly honoring the
+                * zfs_abd_scatter_enabled setting, so we allocate an abd here
+                * and then loan a buffer from it, rather than allocating a
+                * linear buffer and wrapping it in an abd later.
+                */
+               cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
+               tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
+
+               ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
+                   hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
+                   HDR_GET_LSIZE(hdr));
+               if (ret != 0) {
+                       abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
+                       goto error;
                }
-               if (new_state != arc_anon && new_state != arc_l2c_only) {
-                       uint64_t *size = &new_state->arcs_lsize[buftype];
-
-                       /*
-                        * An L1 header always exists here, since if we're
-                        * moving to some L1-cached state (i.e. not l2c_only or
-                        * anonymous), we realloc the header to add an L1hdr
-                        * beforehand.
-                        */
-                       ASSERT(HDR_HAS_L1HDR(hdr));
-                       multilist_insert(&new_state->arcs_list[buftype], hdr);
 
-                       /* ghost elements have a ghost size */
-                       if (GHOST_STATE(new_state)) {
-                               ASSERT0(datacnt);
-                               ASSERT(hdr->b_l1hdr.b_buf == NULL);
-                               to_delta = hdr->b_size;
-                       }
-                       atomic_add_64(size, to_delta);
-               }
+               abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
+               arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
+                   arc_hdr_size(hdr), hdr);
+               hdr->b_l1hdr.b_pabd = cabd;
        }
 
-       ASSERT(!BUF_EMPTY(hdr));
-       if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
-               buf_hash_remove(hdr);
+       spa_keystore_dsl_key_rele(spa, dck, FTAG);
 
-       /* adjust state sizes (ignore arc_l2c_only) */
-       if (to_delta && new_state != arc_l2c_only)
-               atomic_add_64(&new_state->arcs_size, to_delta);
-       if (from_delta && old_state != arc_l2c_only) {
-               ASSERT3U(old_state->arcs_size, >=, from_delta);
-               atomic_add_64(&old_state->arcs_size, -from_delta);
-       }
-       if (HDR_HAS_L1HDR(hdr))
-               hdr->b_l1hdr.b_state = new_state;
+       return (0);
 
-       /*
-        * L2 headers should never be on the L2 state list since they don't
-        * have L1 headers allocated.
-        */
-       ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
-           multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
+error:
+       arc_hdr_free_abd(hdr, B_FALSE);
+       if (dck != NULL)
+               spa_keystore_dsl_key_rele(spa, dck, FTAG);
+       if (cabd != NULL)
+               arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
+
+       return (ret);
 }
 
-void
-arc_space_consume(uint64_t space, arc_space_type_t type)
+/*
+ * This function is called during arc_buf_fill() to prepare the header's
+ * abd plaintext pointer for use. This involves authenticated protected
+ * data and decrypting encrypted data into the plaintext abd.
+ */
+static int
+arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
+    uint64_t dsobj, boolean_t noauth)
 {
-       ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
+       int ret;
 
-       switch (type) {
-       default:
-               break;
-       case ARC_SPACE_DATA:
-               ARCSTAT_INCR(arcstat_data_size, space);
-               break;
-       case ARC_SPACE_META:
-               ARCSTAT_INCR(arcstat_meta_size, space);
-               break;
-       case ARC_SPACE_OTHER:
-               ARCSTAT_INCR(arcstat_other_size, space);
-               break;
-       case ARC_SPACE_HDRS:
-               ARCSTAT_INCR(arcstat_hdr_size, space);
-               break;
-       case ARC_SPACE_L2HDRS:
-               ARCSTAT_INCR(arcstat_l2_hdr_size, space);
-               break;
-       }
+       ASSERT(HDR_PROTECTED(hdr));
 
-       if (type != ARC_SPACE_DATA) {
-               ARCSTAT_INCR(arcstat_meta_used, space);
-               if (arc_meta_max < arc_meta_used)
-                       arc_meta_max = arc_meta_used;
+       if (hash_lock != NULL)
+               mutex_enter(hash_lock);
+
+       if (HDR_NOAUTH(hdr) && !noauth) {
+               /*
+                * The caller requested authenticated data but our data has
+                * not been authenticated yet. Verify the MAC now if we can.
+                */
+               ret = arc_hdr_authenticate(hdr, spa, dsobj);
+               if (ret != 0)
+                       goto error;
+       } else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
+               /*
+                * If we only have the encrypted version of the data, but the
+                * unencrypted version was requested we take this opportunity
+                * to store the decrypted version in the header for future use.
+                */
+               ret = arc_hdr_decrypt(hdr, spa, dsobj);
+               if (ret != 0)
+                       goto error;
        }
 
-       atomic_add_64(&arc_size, space);
-}
+       ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
 
-void
-arc_space_return(uint64_t space, arc_space_type_t type)
-{
-       ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
+       if (hash_lock != NULL)
+               mutex_exit(hash_lock);
 
-       switch (type) {
-       default:
-               break;
+       return (0);
+
+error:
+       if (hash_lock != NULL)
+               mutex_exit(hash_lock);
+
+       return (ret);
+}
+
+/*
+ * This function is used by the dbuf code to decrypt bonus buffers in place.
+ * The dbuf code itself doesn't have any locking for decrypting a shared dnode
+ * block, so we use the hash lock here to protect against concurrent calls to
+ * arc_buf_fill().
+ */
+static void
+arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock)
+{
+       arc_buf_hdr_t *hdr = buf->b_hdr;
+
+       ASSERT(HDR_ENCRYPTED(hdr));
+       ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
+       ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr)));
+       ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
+
+       zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
+           arc_buf_size(buf));
+       buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
+       buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
+       hdr->b_crypt_hdr.b_ebufcnt -= 1;
+}
+
+/*
+ * Given a buf that has a data buffer attached to it, this function will
+ * efficiently fill the buf with data of the specified compression setting from
+ * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
+ * are already sharing a data buf, no copy is performed.
+ *
+ * If the buf is marked as compressed but uncompressed data was requested, this
+ * will allocate a new data buffer for the buf, remove that flag, and fill the
+ * buf with uncompressed data. You can't request a compressed buf on a hdr with
+ * uncompressed data, and (since we haven't added support for it yet) if you
+ * want compressed data your buf must already be marked as compressed and have
+ * the correct-sized data buffer.
+ */
+static int
+arc_buf_fill(arc_buf_t *buf, spa_t *spa, uint64_t dsobj, arc_fill_flags_t flags)
+{
+       int error = 0;
+       arc_buf_hdr_t *hdr = buf->b_hdr;
+       boolean_t hdr_compressed =
+           (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
+       boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
+       boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
+       dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
+       kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
+
+       ASSERT3P(buf->b_data, !=, NULL);
+       IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
+       IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
+       IMPLY(encrypted, HDR_ENCRYPTED(hdr));
+       IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
+       IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
+       IMPLY(encrypted, !ARC_BUF_SHARED(buf));
+
+       /*
+        * If the caller wanted encrypted data we just need to copy it from
+        * b_rabd and potentially byteswap it. We won't be able to do any
+        * further transforms on it.
+        */
+       if (encrypted) {
+               ASSERT(HDR_HAS_RABD(hdr));
+               abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
+                   HDR_GET_PSIZE(hdr));
+               goto byteswap;
+       }
+
+       /*
+        * Adjust encrypted and authenticated headers to accomodate the
+        * request if needed.
+        */
+       if (HDR_PROTECTED(hdr)) {
+               error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
+                   dsobj, !!(flags & ARC_FILL_NOAUTH));
+               if (error != 0)
+                       return (error);
+       }
+
+       /*
+        * There is a special case here for dnode blocks which are
+        * decrypting their bonus buffers. These blocks may request to
+        * be decrypted in-place. This is necessary because there may
+        * be many dnodes pointing into this buffer and there is
+        * currently no method to synchronize replacing the backing
+        * b_data buffer and updating all of the pointers. Here we use
+        * the hash lock to ensure there are no races. If the need
+        * arises for other types to be decrypted in-place, they must
+        * add handling here as well.
+        */
+       if ((flags & ARC_FILL_IN_PLACE) != 0) {
+               ASSERT(!hdr_compressed);
+               ASSERT(!compressed);
+               ASSERT(!encrypted);
+
+               if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
+                       ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
+
+                       if (hash_lock != NULL)
+                               mutex_enter(hash_lock);
+                       arc_buf_untransform_in_place(buf, hash_lock);
+                       if (hash_lock != NULL)
+                               mutex_exit(hash_lock);
+
+                       /* Compute the hdr's checksum if necessary */
+                       arc_cksum_compute(buf);
+               }
+
+               return (0);
+       }
+
+       if (hdr_compressed == compressed) {
+               if (!arc_buf_is_shared(buf)) {
+                       abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
+                           arc_buf_size(buf));
+               }
+       } else {
+               ASSERT(hdr_compressed);
+               ASSERT(!compressed);
+               ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr));
+
+               /*
+                * If the buf is sharing its data with the hdr, unlink it and
+                * allocate a new data buffer for the buf.
+                */
+               if (arc_buf_is_shared(buf)) {
+                       ASSERT(ARC_BUF_COMPRESSED(buf));
+
+                       /* We need to give the buf it's own b_data */
+                       buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
+                       buf->b_data =
+                           arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
+                       arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
+
+                       /* Previously overhead was 0; just add new overhead */
+                       ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
+               } else if (ARC_BUF_COMPRESSED(buf)) {
+                       /* We need to reallocate the buf's b_data */
+                       arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
+                           buf);
+                       buf->b_data =
+                           arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
+
+                       /* We increased the size of b_data; update overhead */
+                       ARCSTAT_INCR(arcstat_overhead_size,
+                           HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
+               }
+
+               /*
+                * Regardless of the buf's previous compression settings, it
+                * should not be compressed at the end of this function.
+                */
+               buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
+
+               /*
+                * Try copying the data from another buf which already has a
+                * decompressed version. If that's not possible, it's time to
+                * bite the bullet and decompress the data from the hdr.
+                */
+               if (arc_buf_try_copy_decompressed_data(buf)) {
+                       /* Skip byteswapping and checksumming (already done) */
+                       ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL);
+                       return (0);
+               } else {
+                       error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
+                           hdr->b_l1hdr.b_pabd, buf->b_data,
+                           HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
+
+                       /*
+                        * Absent hardware errors or software bugs, this should
+                        * be impossible, but log it anyway so we can debug it.
+                        */
+                       if (error != 0) {
+                               zfs_dbgmsg(
+                                   "hdr %p, compress %d, psize %d, lsize %d",
+                                   hdr, arc_hdr_get_compress(hdr),
+                                   HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
+                               return (SET_ERROR(EIO));
+                       }
+               }
+       }
+
+byteswap:
+       /* Byteswap the buf's data if necessary */
+       if (bswap != DMU_BSWAP_NUMFUNCS) {
+               ASSERT(!HDR_SHARED_DATA(hdr));
+               ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
+               dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
+       }
+
+       /* Compute the hdr's checksum if necessary */
+       arc_cksum_compute(buf);
+
+       return (0);
+}
+
+/*
+ * If this function is being called to decrypt an encrypted buffer or verify an
+ * authenticated one, the key must be loaded and a mapping must be made
+ * available in the keystore via spa_keystore_create_mapping() or one of its
+ * callers.
+ */
+int
+arc_untransform(arc_buf_t *buf, spa_t *spa, uint64_t dsobj, boolean_t in_place)
+{
+       arc_fill_flags_t flags = 0;
+
+       if (in_place)
+               flags |= ARC_FILL_IN_PLACE;
+
+       return (arc_buf_fill(buf, spa, dsobj, flags));
+}
+
+/*
+ * Increment the amount of evictable space in the arc_state_t's refcount.
+ * We account for the space used by the hdr and the arc buf individually
+ * so that we can add and remove them from the refcount individually.
+ */
+static void
+arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
+{
+       arc_buf_contents_t type = arc_buf_type(hdr);
+       arc_buf_t *buf;
+
+       ASSERT(HDR_HAS_L1HDR(hdr));
+
+       if (GHOST_STATE(state)) {
+               ASSERT0(hdr->b_l1hdr.b_bufcnt);
+               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+               ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+               ASSERT(!HDR_HAS_RABD(hdr));
+               (void) refcount_add_many(&state->arcs_esize[type],
+                   HDR_GET_LSIZE(hdr), hdr);
+               return;
+       }
+
+       ASSERT(!GHOST_STATE(state));
+       if (hdr->b_l1hdr.b_pabd != NULL) {
+               (void) refcount_add_many(&state->arcs_esize[type],
+                   arc_hdr_size(hdr), hdr);
+       }
+       if (HDR_HAS_RABD(hdr)) {
+               (void) refcount_add_many(&state->arcs_esize[type],
+                   HDR_GET_PSIZE(hdr), hdr);
+       }
+
+       for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
+               if (arc_buf_is_shared(buf))
+                       continue;
+               (void) refcount_add_many(&state->arcs_esize[type],
+                   arc_buf_size(buf), buf);
+       }
+}
+
+/*
+ * Decrement the amount of evictable space in the arc_state_t's refcount.
+ * We account for the space used by the hdr and the arc buf individually
+ * so that we can add and remove them from the refcount individually.
+ */
+static void
+arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
+{
+       arc_buf_contents_t type = arc_buf_type(hdr);
+       arc_buf_t *buf;
+
+       ASSERT(HDR_HAS_L1HDR(hdr));
+
+       if (GHOST_STATE(state)) {
+               ASSERT0(hdr->b_l1hdr.b_bufcnt);
+               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+               ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+               ASSERT(!HDR_HAS_RABD(hdr));
+               (void) refcount_remove_many(&state->arcs_esize[type],
+                   HDR_GET_LSIZE(hdr), hdr);
+               return;
+       }
+
+       ASSERT(!GHOST_STATE(state));
+       if (hdr->b_l1hdr.b_pabd != NULL) {
+               (void) refcount_remove_many(&state->arcs_esize[type],
+                   arc_hdr_size(hdr), hdr);
+       }
+       if (HDR_HAS_RABD(hdr)) {
+               (void) refcount_remove_many(&state->arcs_esize[type],
+                   HDR_GET_PSIZE(hdr), hdr);
+       }
+
+       for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
+               if (arc_buf_is_shared(buf))
+                       continue;
+               (void) refcount_remove_many(&state->arcs_esize[type],
+                   arc_buf_size(buf), buf);
+       }
+}
+
+/*
+ * Add a reference to this hdr indicating that someone is actively
+ * referencing that memory. When the refcount transitions from 0 to 1,
+ * we remove it from the respective arc_state_t list to indicate that
+ * it is not evictable.
+ */
+static void
+add_reference(arc_buf_hdr_t *hdr, void *tag)
+{
+       arc_state_t *state;
+
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       if (!MUTEX_HELD(HDR_LOCK(hdr))) {
+               ASSERT(hdr->b_l1hdr.b_state == arc_anon);
+               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+       }
+
+       state = hdr->b_l1hdr.b_state;
+
+       if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
+           (state != arc_anon)) {
+               /* We don't use the L2-only state list. */
+               if (state != arc_l2c_only) {
+                       multilist_remove(state->arcs_list[arc_buf_type(hdr)],
+                           hdr);
+                       arc_evictable_space_decrement(hdr, state);
+               }
+               /* remove the prefetch flag if we get a reference */
+               arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
+       }
+}
+
+/*
+ * Remove a reference from this hdr. When the reference transitions from
+ * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
+ * list making it eligible for eviction.
+ */
+static int
+remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
+{
+       int cnt;
+       arc_state_t *state = hdr->b_l1hdr.b_state;
+
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
+       ASSERT(!GHOST_STATE(state));
+
+       /*
+        * arc_l2c_only counts as a ghost state so we don't need to explicitly
+        * check to prevent usage of the arc_l2c_only list.
+        */
+       if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
+           (state != arc_anon)) {
+               multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
+               ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
+               arc_evictable_space_increment(hdr, state);
+       }
+       return (cnt);
+}
+
+/*
+ * Returns detailed information about a specific arc buffer.  When the
+ * state_index argument is set the function will calculate the arc header
+ * list position for its arc state.  Since this requires a linear traversal
+ * callers are strongly encourage not to do this.  However, it can be helpful
+ * for targeted analysis so the functionality is provided.
+ */
+void
+arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
+{
+       arc_buf_hdr_t *hdr = ab->b_hdr;
+       l1arc_buf_hdr_t *l1hdr = NULL;
+       l2arc_buf_hdr_t *l2hdr = NULL;
+       arc_state_t *state = NULL;
+
+       memset(abi, 0, sizeof (arc_buf_info_t));
+
+       if (hdr == NULL)
+               return;
+
+       abi->abi_flags = hdr->b_flags;
+
+       if (HDR_HAS_L1HDR(hdr)) {
+               l1hdr = &hdr->b_l1hdr;
+               state = l1hdr->b_state;
+       }
+       if (HDR_HAS_L2HDR(hdr))
+               l2hdr = &hdr->b_l2hdr;
+
+       if (l1hdr) {
+               abi->abi_bufcnt = l1hdr->b_bufcnt;
+               abi->abi_access = l1hdr->b_arc_access;
+               abi->abi_mru_hits = l1hdr->b_mru_hits;
+               abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
+               abi->abi_mfu_hits = l1hdr->b_mfu_hits;
+               abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
+               abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
+       }
+
+       if (l2hdr) {
+               abi->abi_l2arc_dattr = l2hdr->b_daddr;
+               abi->abi_l2arc_hits = l2hdr->b_hits;
+       }
+
+       abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
+       abi->abi_state_contents = arc_buf_type(hdr);
+       abi->abi_size = arc_hdr_size(hdr);
+}
+
+/*
+ * Move the supplied buffer to the indicated state. The hash lock
+ * for the buffer must be held by the caller.
+ */
+static void
+arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
+    kmutex_t *hash_lock)
+{
+       arc_state_t *old_state;
+       int64_t refcnt;
+       uint32_t bufcnt;
+       boolean_t update_old, update_new;
+       arc_buf_contents_t buftype = arc_buf_type(hdr);
+
+       /*
+        * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
+        * in arc_read() when bringing a buffer out of the L2ARC.  However, the
+        * L1 hdr doesn't always exist when we change state to arc_anon before
+        * destroying a header, in which case reallocating to add the L1 hdr is
+        * pointless.
+        */
+       if (HDR_HAS_L1HDR(hdr)) {
+               old_state = hdr->b_l1hdr.b_state;
+               refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
+               bufcnt = hdr->b_l1hdr.b_bufcnt;
+               update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
+                   HDR_HAS_RABD(hdr));
+       } else {
+               old_state = arc_l2c_only;
+               refcnt = 0;
+               bufcnt = 0;
+               update_old = B_FALSE;
+       }
+       update_new = update_old;
+
+       ASSERT(MUTEX_HELD(hash_lock));
+       ASSERT3P(new_state, !=, old_state);
+       ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
+       ASSERT(old_state != arc_anon || bufcnt <= 1);
+
+       /*
+        * If this buffer is evictable, transfer it from the
+        * old state list to the new state list.
+        */
+       if (refcnt == 0) {
+               if (old_state != arc_anon && old_state != arc_l2c_only) {
+                       ASSERT(HDR_HAS_L1HDR(hdr));
+                       multilist_remove(old_state->arcs_list[buftype], hdr);
+
+                       if (GHOST_STATE(old_state)) {
+                               ASSERT0(bufcnt);
+                               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+                               update_old = B_TRUE;
+                       }
+                       arc_evictable_space_decrement(hdr, old_state);
+               }
+               if (new_state != arc_anon && new_state != arc_l2c_only) {
+                       /*
+                        * An L1 header always exists here, since if we're
+                        * moving to some L1-cached state (i.e. not l2c_only or
+                        * anonymous), we realloc the header to add an L1hdr
+                        * beforehand.
+                        */
+                       ASSERT(HDR_HAS_L1HDR(hdr));
+                       multilist_insert(new_state->arcs_list[buftype], hdr);
+
+                       if (GHOST_STATE(new_state)) {
+                               ASSERT0(bufcnt);
+                               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+                               update_new = B_TRUE;
+                       }
+                       arc_evictable_space_increment(hdr, new_state);
+               }
+       }
+
+       ASSERT(!HDR_EMPTY(hdr));
+       if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
+               buf_hash_remove(hdr);
+
+       /* adjust state sizes (ignore arc_l2c_only) */
+
+       if (update_new && new_state != arc_l2c_only) {
+               ASSERT(HDR_HAS_L1HDR(hdr));
+               if (GHOST_STATE(new_state)) {
+                       ASSERT0(bufcnt);
+
+                       /*
+                        * When moving a header to a ghost state, we first
+                        * remove all arc buffers. Thus, we'll have a
+                        * bufcnt of zero, and no arc buffer to use for
+                        * the reference. As a result, we use the arc
+                        * header pointer for the reference.
+                        */
+                       (void) refcount_add_many(&new_state->arcs_size,
+                           HDR_GET_LSIZE(hdr), hdr);
+                       ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+                       ASSERT(!HDR_HAS_RABD(hdr));
+               } else {
+                       arc_buf_t *buf;
+                       uint32_t buffers = 0;
+
+                       /*
+                        * Each individual buffer holds a unique reference,
+                        * thus we must remove each of these references one
+                        * at a time.
+                        */
+                       for (buf = hdr->b_l1hdr.b_buf; buf != NULL;
+                           buf = buf->b_next) {
+                               ASSERT3U(bufcnt, !=, 0);
+                               buffers++;
+
+                               /*
+                                * When the arc_buf_t is sharing the data
+                                * block with the hdr, the owner of the
+                                * reference belongs to the hdr. Only
+                                * add to the refcount if the arc_buf_t is
+                                * not shared.
+                                */
+                               if (arc_buf_is_shared(buf))
+                                       continue;
+
+                               (void) refcount_add_many(&new_state->arcs_size,
+                                   arc_buf_size(buf), buf);
+                       }
+                       ASSERT3U(bufcnt, ==, buffers);
+
+                       if (hdr->b_l1hdr.b_pabd != NULL) {
+                               (void) refcount_add_many(&new_state->arcs_size,
+                                   arc_hdr_size(hdr), hdr);
+                       }
+
+                       if (HDR_HAS_RABD(hdr)) {
+                               (void) refcount_add_many(&new_state->arcs_size,
+                                   HDR_GET_PSIZE(hdr), hdr);
+                       }
+               }
+       }
+
+       if (update_old && old_state != arc_l2c_only) {
+               ASSERT(HDR_HAS_L1HDR(hdr));
+               if (GHOST_STATE(old_state)) {
+                       ASSERT0(bufcnt);
+                       ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+                       ASSERT(!HDR_HAS_RABD(hdr));
+
+                       /*
+                        * When moving a header off of a ghost state,
+                        * the header will not contain any arc buffers.
+                        * We use the arc header pointer for the reference
+                        * which is exactly what we did when we put the
+                        * header on the ghost state.
+                        */
+
+                       (void) refcount_remove_many(&old_state->arcs_size,
+                           HDR_GET_LSIZE(hdr), hdr);
+               } else {
+                       arc_buf_t *buf;
+                       uint32_t buffers = 0;
+
+                       /*
+                        * Each individual buffer holds a unique reference,
+                        * thus we must remove each of these references one
+                        * at a time.
+                        */
+                       for (buf = hdr->b_l1hdr.b_buf; buf != NULL;
+                           buf = buf->b_next) {
+                               ASSERT3U(bufcnt, !=, 0);
+                               buffers++;
+
+                               /*
+                                * When the arc_buf_t is sharing the data
+                                * block with the hdr, the owner of the
+                                * reference belongs to the hdr. Only
+                                * add to the refcount if the arc_buf_t is
+                                * not shared.
+                                */
+                               if (arc_buf_is_shared(buf))
+                                       continue;
+
+                               (void) refcount_remove_many(
+                                   &old_state->arcs_size, arc_buf_size(buf),
+                                   buf);
+                       }
+                       ASSERT3U(bufcnt, ==, buffers);
+                       ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
+                           HDR_HAS_RABD(hdr));
+
+                       if (hdr->b_l1hdr.b_pabd != NULL) {
+                               (void) refcount_remove_many(
+                                   &old_state->arcs_size, arc_hdr_size(hdr),
+                                   hdr);
+                       }
+
+                       if (HDR_HAS_RABD(hdr)) {
+                               (void) refcount_remove_many(
+                                   &old_state->arcs_size, HDR_GET_PSIZE(hdr),
+                                   hdr);
+                       }
+               }
+       }
+
+       if (HDR_HAS_L1HDR(hdr))
+               hdr->b_l1hdr.b_state = new_state;
+
+       /*
+        * L2 headers should never be on the L2 state list since they don't
+        * have L1 headers allocated.
+        */
+       ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
+           multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
+}
+
+void
+arc_space_consume(uint64_t space, arc_space_type_t type)
+{
+       ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
+
+       switch (type) {
+       default:
+               break;
+       case ARC_SPACE_DATA:
+               ARCSTAT_INCR(arcstat_data_size, space);
+               break;
+       case ARC_SPACE_META:
+               ARCSTAT_INCR(arcstat_metadata_size, space);
+               break;
+       case ARC_SPACE_BONUS:
+               ARCSTAT_INCR(arcstat_bonus_size, space);
+               break;
+       case ARC_SPACE_DNODE:
+               ARCSTAT_INCR(arcstat_dnode_size, space);
+               break;
+       case ARC_SPACE_DBUF:
+               ARCSTAT_INCR(arcstat_dbuf_size, space);
+               break;
+       case ARC_SPACE_HDRS:
+               ARCSTAT_INCR(arcstat_hdr_size, space);
+               break;
+       case ARC_SPACE_L2HDRS:
+               ARCSTAT_INCR(arcstat_l2_hdr_size, space);
+               break;
+       }
+
+       if (type != ARC_SPACE_DATA)
+               ARCSTAT_INCR(arcstat_meta_used, space);
+
+       atomic_add_64(&arc_size, space);
+}
+
+void
+arc_space_return(uint64_t space, arc_space_type_t type)
+{
+       ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
+
+       switch (type) {
+       default:
+               break;
        case ARC_SPACE_DATA:
                ARCSTAT_INCR(arcstat_data_size, -space);
                break;
        case ARC_SPACE_META:
-               ARCSTAT_INCR(arcstat_meta_size, -space);
+               ARCSTAT_INCR(arcstat_metadata_size, -space);
+               break;
+       case ARC_SPACE_BONUS:
+               ARCSTAT_INCR(arcstat_bonus_size, -space);
                break;
-       case ARC_SPACE_OTHER:
-               ARCSTAT_INCR(arcstat_other_size, -space);
+       case ARC_SPACE_DNODE:
+               ARCSTAT_INCR(arcstat_dnode_size, -space);
+               break;
+       case ARC_SPACE_DBUF:
+               ARCSTAT_INCR(arcstat_dbuf_size, -space);
                break;
        case ARC_SPACE_HDRS:
                ARCSTAT_INCR(arcstat_hdr_size, -space);
@@ -1506,6 +2731,8 @@ arc_space_return(uint64_t space, arc_space_type_t type)
 
        if (type != ARC_SPACE_DATA) {
                ASSERT(arc_meta_used >= space);
+               if (arc_meta_max < arc_meta_used)
+                       arc_meta_max = arc_meta_used;
                ARCSTAT_INCR(arcstat_meta_used, -space);
        }
 
@@ -1513,49 +2740,161 @@ arc_space_return(uint64_t space, arc_space_type_t type)
        atomic_add_64(&arc_size, -space);
 }
 
-arc_buf_t *
-arc_buf_alloc(spa_t *spa, uint64_t size, void *tag, arc_buf_contents_t type)
+/*
+ * Given a hdr and a buf, returns whether that buf can share its b_data buffer
+ * with the hdr's b_pabd.
+ */
+static boolean_t
+arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
+{
+       /*
+        * The criteria for sharing a hdr's data are:
+        * 1. the buffer is not encrypted
+        * 2. the hdr's compression matches the buf's compression
+        * 3. the hdr doesn't need to be byteswapped
+        * 4. the hdr isn't already being shared
+        * 5. the buf is either compressed or it is the last buf in the hdr list
+        *
+        * Criterion #5 maintains the invariant that shared uncompressed
+        * bufs must be the final buf in the hdr's b_buf list. Reading this, you
+        * might ask, "if a compressed buf is allocated first, won't that be the
+        * last thing in the list?", but in that case it's impossible to create
+        * a shared uncompressed buf anyway (because the hdr must be compressed
+        * to have the compressed buf). You might also think that #3 is
+        * sufficient to make this guarantee, however it's possible
+        * (specifically in the rare L2ARC write race mentioned in
+        * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
+        * is sharable, but wasn't at the time of its allocation. Rather than
+        * allow a new shared uncompressed buf to be created and then shuffle
+        * the list around to make it the last element, this simply disallows
+        * sharing if the new buf isn't the first to be added.
+        */
+       ASSERT3P(buf->b_hdr, ==, hdr);
+       boolean_t hdr_compressed =
+           arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
+       boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
+       return (!ARC_BUF_ENCRYPTED(buf) &&
+           buf_compressed == hdr_compressed &&
+           hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
+           !HDR_SHARED_DATA(hdr) &&
+           (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
+}
+
+/*
+ * Allocate a buf for this hdr. If you care about the data that's in the hdr,
+ * or if you want a compressed buffer, pass those flags in. Returns 0 if the
+ * copy was made successfully, or an error code otherwise.
+ */
+static int
+arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj, void *tag,
+    boolean_t encrypted, boolean_t compressed, boolean_t noauth,
+    boolean_t fill, arc_buf_t **ret)
 {
-       arc_buf_hdr_t *hdr;
        arc_buf_t *buf;
+       arc_fill_flags_t flags = ARC_FILL_LOCKED;
+
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
+       VERIFY(hdr->b_type == ARC_BUFC_DATA ||
+           hdr->b_type == ARC_BUFC_METADATA);
+       ASSERT3P(ret, !=, NULL);
+       ASSERT3P(*ret, ==, NULL);
+       IMPLY(encrypted, compressed);
 
-       VERIFY3U(size, <=, spa_maxblocksize(spa));
-       hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
-       ASSERT(BUF_EMPTY(hdr));
-       ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
-       hdr->b_size = size;
-       hdr->b_spa = spa_load_guid(spa);
        hdr->b_l1hdr.b_mru_hits = 0;
        hdr->b_l1hdr.b_mru_ghost_hits = 0;
        hdr->b_l1hdr.b_mfu_hits = 0;
        hdr->b_l1hdr.b_mfu_ghost_hits = 0;
        hdr->b_l1hdr.b_l2_hits = 0;
 
-       buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
+       buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
        buf->b_hdr = hdr;
        buf->b_data = NULL;
-       buf->b_efunc = NULL;
-       buf->b_private = NULL;
-       buf->b_next = NULL;
+       buf->b_next = hdr->b_l1hdr.b_buf;
+       buf->b_flags = 0;
 
-       hdr->b_flags = arc_bufc_to_flags(type);
-       hdr->b_flags |= ARC_FLAG_HAS_L1HDR;
+       add_reference(hdr, tag);
 
-       hdr->b_l1hdr.b_buf = buf;
-       hdr->b_l1hdr.b_state = arc_anon;
-       hdr->b_l1hdr.b_arc_access = 0;
-       hdr->b_l1hdr.b_datacnt = 1;
-       hdr->b_l1hdr.b_tmp_cdata = NULL;
+       /*
+        * We're about to change the hdr's b_flags. We must either
+        * hold the hash_lock or be undiscoverable.
+        */
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
 
-       arc_get_data_buf(buf);
+       /*
+        * Only honor requests for compressed bufs if the hdr is actually
+        * compressed. This must be overriden if the buffer is encrypted since
+        * encrypted buffers cannot be decompressed.
+        */
+       if (encrypted) {
+               buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
+               buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
+               flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
+       } else if (compressed &&
+           arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
+               buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
+               flags |= ARC_FILL_COMPRESSED;
+       }
 
-       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
-       (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
+       if (noauth) {
+               ASSERT0(encrypted);
+               flags |= ARC_FILL_NOAUTH;
+       }
 
-       return (buf);
+       /*
+        * If the hdr's data can be shared then we share the data buffer and
+        * set the appropriate bit in the hdr's b_flags to indicate the hdr is
+        * allocate a new buffer to store the buf's data.
+        *
+        * There are two additional restrictions here because we're sharing
+        * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
+        * actively involved in an L2ARC write, because if this buf is used by
+        * an arc_write() then the hdr's data buffer will be released when the
+        * write completes, even though the L2ARC write might still be using it.
+        * Second, the hdr's ABD must be linear so that the buf's user doesn't
+        * need to be ABD-aware.
+        */
+       boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) &&
+           hdr->b_l1hdr.b_pabd != NULL && abd_is_linear(hdr->b_l1hdr.b_pabd);
+
+       /* Set up b_data and sharing */
+       if (can_share) {
+               buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
+               buf->b_flags |= ARC_BUF_FLAG_SHARED;
+               arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
+       } else {
+               buf->b_data =
+                   arc_get_data_buf(hdr, arc_buf_size(buf), buf);
+               ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
+       }
+       VERIFY3P(buf->b_data, !=, NULL);
+
+       hdr->b_l1hdr.b_buf = buf;
+       hdr->b_l1hdr.b_bufcnt += 1;
+       if (encrypted)
+               hdr->b_crypt_hdr.b_ebufcnt += 1;
+
+       /*
+        * If the user wants the data from the hdr, we need to either copy or
+        * decompress the data.
+        */
+       if (fill) {
+               return (arc_buf_fill(buf, spa, dsobj, flags));
+       }
+
+       return (0);
 }
 
-static char *arc_onloan_tag = "onloan";
+static char *arc_onloan_tag = "onloan";
+
+static inline void
+arc_loaned_bytes_update(int64_t delta)
+{
+       atomic_add_64(&arc_loaned_bytes, delta);
+
+       /* assert that it did not wrap around */
+       ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
+}
 
 /*
  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
@@ -1564,16 +2903,42 @@ static char *arc_onloan_tag = "onloan";
  * freed.
  */
 arc_buf_t *
-arc_loan_buf(spa_t *spa, uint64_t size)
+arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
 {
-       arc_buf_t *buf;
+       arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
+           is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
+
+       arc_loaned_bytes_update(size);
+
+       return (buf);
+}
+
+arc_buf_t *
+arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
+    enum zio_compress compression_type)
+{
+       arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
+           psize, lsize, compression_type);
+
+       arc_loaned_bytes_update(psize);
+
+       return (buf);
+}
 
-       buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
+arc_buf_t *
+arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
+    const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
+    dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
+    enum zio_compress compression_type)
+{
+       arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
+           byteorder, salt, iv, mac, ot, psize, lsize, compression_type);
 
-       atomic_add_64(&arc_loaned_bytes, size);
+       atomic_add_64(&arc_loaned_bytes, psize);
        return (buf);
 }
 
+
 /*
  * Return a loaned arc buffer to the arc.
  */
@@ -1582,12 +2947,12 @@ arc_return_buf(arc_buf_t *buf, void *tag)
 {
        arc_buf_hdr_t *hdr = buf->b_hdr;
 
-       ASSERT(buf->b_data != NULL);
+       ASSERT3P(buf->b_data, !=, NULL);
        ASSERT(HDR_HAS_L1HDR(hdr));
        (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
        (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
 
-       atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
+       arc_loaned_bytes_update(-arc_buf_size(buf));
 }
 
 /* Detach an arc_buf from a dbuf (tag) */
@@ -1596,244 +2961,711 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
 {
        arc_buf_hdr_t *hdr = buf->b_hdr;
 
-       ASSERT(buf->b_data != NULL);
+       ASSERT3P(buf->b_data, !=, NULL);
        ASSERT(HDR_HAS_L1HDR(hdr));
        (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
        (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
-       buf->b_efunc = NULL;
-       buf->b_private = NULL;
 
-       atomic_add_64(&arc_loaned_bytes, hdr->b_size);
+       arc_loaned_bytes_update(arc_buf_size(buf));
 }
 
-static arc_buf_t *
-arc_buf_clone(arc_buf_t *from)
+static void
+l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
 {
-       arc_buf_t *buf;
-       arc_buf_hdr_t *hdr = from->b_hdr;
-       uint64_t size = hdr->b_size;
+       l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
+
+       df->l2df_abd = abd;
+       df->l2df_size = size;
+       df->l2df_type = type;
+       mutex_enter(&l2arc_free_on_write_mtx);
+       list_insert_head(l2arc_free_on_write, df);
+       mutex_exit(&l2arc_free_on_write_mtx);
+}
+
+static void
+arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
+{
+       arc_state_t *state = hdr->b_l1hdr.b_state;
+       arc_buf_contents_t type = arc_buf_type(hdr);
+       uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
+
+       /* protected by hash lock, if in the hash table */
+       if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
+               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+               ASSERT(state != arc_anon && state != arc_l2c_only);
+
+               (void) refcount_remove_many(&state->arcs_esize[type],
+                   size, hdr);
+       }
+       (void) refcount_remove_many(&state->arcs_size, size, hdr);
+       if (type == ARC_BUFC_METADATA) {
+               arc_space_return(size, ARC_SPACE_META);
+       } else {
+               ASSERT(type == ARC_BUFC_DATA);
+               arc_space_return(size, ARC_SPACE_DATA);
+       }
+
+       if (free_rdata) {
+               l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
+       } else {
+               l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
+       }
+}
+
+/*
+ * Share the arc_buf_t's data with the hdr. Whenever we are sharing the
+ * data buffer, we transfer the refcount ownership to the hdr and update
+ * the appropriate kstats.
+ */
+static void
+arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
+{
+       ASSERT(arc_can_share(hdr, buf));
+       ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+       ASSERT(!ARC_BUF_ENCRYPTED(buf));
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
+
+       /*
+        * Start sharing the data buffer. We transfer the
+        * refcount ownership to the hdr since it always owns
+        * the refcount whenever an arc_buf_t is shared.
+        */
+       refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf, hdr);
+       hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
+       abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
+           HDR_ISTYPE_METADATA(hdr));
+       arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
+       buf->b_flags |= ARC_BUF_FLAG_SHARED;
+
+       /*
+        * Since we've transferred ownership to the hdr we need
+        * to increment its compressed and uncompressed kstats and
+        * decrement the overhead size.
+        */
+       ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
+       ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
+       ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
+}
+
+static void
+arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
+{
+       ASSERT(arc_buf_is_shared(buf));
+       ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
+
+       /*
+        * We are no longer sharing this buffer so we need
+        * to transfer its ownership to the rightful owner.
+        */
+       refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr, buf);
+       arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
+       abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
+       abd_put(hdr->b_l1hdr.b_pabd);
+       hdr->b_l1hdr.b_pabd = NULL;
+       buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
+
+       /*
+        * Since the buffer is no longer shared between
+        * the arc buf and the hdr, count it as overhead.
+        */
+       ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
+       ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
+       ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
+}
 
+/*
+ * Remove an arc_buf_t from the hdr's buf list and return the last
+ * arc_buf_t on the list. If no buffers remain on the list then return
+ * NULL.
+ */
+static arc_buf_t *
+arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
+{
        ASSERT(HDR_HAS_L1HDR(hdr));
-       ASSERT(hdr->b_l1hdr.b_state != arc_anon);
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
 
-       buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
-       buf->b_hdr = hdr;
-       buf->b_data = NULL;
-       buf->b_efunc = NULL;
-       buf->b_private = NULL;
-       buf->b_next = hdr->b_l1hdr.b_buf;
-       hdr->b_l1hdr.b_buf = buf;
-       arc_get_data_buf(buf);
-       bcopy(from->b_data, buf->b_data, size);
+       arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
+       arc_buf_t *lastbuf = NULL;
 
        /*
-        * This buffer already exists in the arc so create a duplicate
-        * copy for the caller.  If the buffer is associated with user data
-        * then track the size and number of duplicates.  These stats will be
-        * updated as duplicate buffers are created and destroyed.
+        * Remove the buf from the hdr list and locate the last
+        * remaining buffer on the list.
         */
-       if (HDR_ISTYPE_DATA(hdr)) {
-               ARCSTAT_BUMP(arcstat_duplicate_buffers);
-               ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
+       while (*bufp != NULL) {
+               if (*bufp == buf)
+                       *bufp = buf->b_next;
+
+               /*
+                * If we've removed a buffer in the middle of
+                * the list then update the lastbuf and update
+                * bufp.
+                */
+               if (*bufp != NULL) {
+                       lastbuf = *bufp;
+                       bufp = &(*bufp)->b_next;
+               }
        }
-       hdr->b_l1hdr.b_datacnt += 1;
-       return (buf);
+       buf->b_next = NULL;
+       ASSERT3P(lastbuf, !=, buf);
+       IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
+       IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
+       IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
+
+       return (lastbuf);
 }
 
-void
-arc_buf_add_ref(arc_buf_t *buf, void* tag)
+/*
+ * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
+ * list and free it.
+ */
+static void
+arc_buf_destroy_impl(arc_buf_t *buf)
 {
-       arc_buf_hdr_t *hdr;
-       kmutex_t *hash_lock;
+       arc_buf_hdr_t *hdr = buf->b_hdr;
 
        /*
-        * Check to see if this buffer is evicted.  Callers
-        * must verify b_data != NULL to know if the add_ref
-        * was successful.
+        * Free up the data associated with the buf but only if we're not
+        * sharing this with the hdr. If we are sharing it with the hdr, the
+        * hdr is responsible for doing the free.
         */
-       mutex_enter(&buf->b_evict_lock);
-       if (buf->b_data == NULL) {
-               mutex_exit(&buf->b_evict_lock);
-               return;
+       if (buf->b_data != NULL) {
+               /*
+                * We're about to change the hdr's b_flags. We must either
+                * hold the hash_lock or be undiscoverable.
+                */
+               ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
+
+               arc_cksum_verify(buf);
+               arc_buf_unwatch(buf);
+
+               if (arc_buf_is_shared(buf)) {
+                       arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
+               } else {
+                       uint64_t size = arc_buf_size(buf);
+                       arc_free_data_buf(hdr, buf->b_data, size, buf);
+                       ARCSTAT_INCR(arcstat_overhead_size, -size);
+               }
+               buf->b_data = NULL;
+
+               ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
+               hdr->b_l1hdr.b_bufcnt -= 1;
+
+               if (ARC_BUF_ENCRYPTED(buf))
+                       hdr->b_crypt_hdr.b_ebufcnt -= 1;
+
+               /*
+                * if we have no more encrypted buffers and we've already
+                * gotten a copy of the decrypted data we can free b_rabd to
+                * save some space.
+                */
+               if (hdr->b_crypt_hdr.b_ebufcnt == 0 && HDR_HAS_RABD(hdr) &&
+                   hdr->b_l1hdr.b_pabd != NULL)
+                       arc_hdr_free_abd(hdr, B_TRUE);
        }
-       hash_lock = HDR_LOCK(buf->b_hdr);
-       mutex_enter(hash_lock);
-       hdr = buf->b_hdr;
-       ASSERT(HDR_HAS_L1HDR(hdr));
-       ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
-       mutex_exit(&buf->b_evict_lock);
 
-       ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
-           hdr->b_l1hdr.b_state == arc_mfu);
+       arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
 
-       add_reference(hdr, hash_lock, tag);
-       DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
-       arc_access(hdr, hash_lock);
-       mutex_exit(hash_lock);
-       ARCSTAT_BUMP(arcstat_hits);
-       ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
-           demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
-           data, metadata, hits);
+       if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
+               /*
+                * If the current arc_buf_t is sharing its data buffer with the
+                * hdr, then reassign the hdr's b_pabd to share it with the new
+                * buffer at the end of the list. The shared buffer is always
+                * the last one on the hdr's buffer list.
+                *
+                * There is an equivalent case for compressed bufs, but since
+                * they aren't guaranteed to be the last buf in the list and
+                * that is an exceedingly rare case, we just allow that space be
+                * wasted temporarily. We must also be careful not to share
+                * encrypted buffers, since they cannot be shared.
+                */
+               if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
+                       /* Only one buf can be shared at once */
+                       VERIFY(!arc_buf_is_shared(lastbuf));
+                       /* hdr is uncompressed so can't have compressed buf */
+                       VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
+
+                       ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
+                       arc_hdr_free_abd(hdr, B_FALSE);
+
+                       /*
+                        * We must setup a new shared block between the
+                        * last buffer and the hdr. The data would have
+                        * been allocated by the arc buf so we need to transfer
+                        * ownership to the hdr since it's now being shared.
+                        */
+                       arc_share_buf(hdr, lastbuf);
+               }
+       } else if (HDR_SHARED_DATA(hdr)) {
+               /*
+                * Uncompressed shared buffers are always at the end
+                * of the list. Compressed buffers don't have the
+                * same requirements. This makes it hard to
+                * simply assert that the lastbuf is shared so
+                * we rely on the hdr's compression flags to determine
+                * if we have a compressed, shared buffer.
+                */
+               ASSERT3P(lastbuf, !=, NULL);
+               ASSERT(arc_buf_is_shared(lastbuf) ||
+                   arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
+       }
+
+       /*
+        * Free the checksum if we're removing the last uncompressed buf from
+        * this hdr.
+        */
+       if (!arc_hdr_has_uncompressed_buf(hdr)) {
+               arc_cksum_free(hdr);
+       }
+
+       /* clean up the buf */
+       buf->b_hdr = NULL;
+       kmem_cache_free(buf_cache, buf);
 }
 
 static void
-arc_buf_free_on_write(void *data, size_t size,
-    void (*free_func)(void *, size_t))
+arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, boolean_t alloc_rdata)
 {
-       l2arc_data_free_t *df;
+       uint64_t size;
 
-       df = kmem_alloc(sizeof (*df), KM_SLEEP);
-       df->l2df_data = data;
-       df->l2df_size = size;
-       df->l2df_func = free_func;
-       mutex_enter(&l2arc_free_on_write_mtx);
-       list_insert_head(l2arc_free_on_write, df);
-       mutex_exit(&l2arc_free_on_write_mtx);
+       ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
+       IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
+
+       if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
+               hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
+
+       if (alloc_rdata) {
+               size = HDR_GET_PSIZE(hdr);
+               ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
+               hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr);
+               ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
+               ARCSTAT_INCR(arcstat_raw_size, size);
+       } else {
+               size = arc_hdr_size(hdr);
+               ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+               hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr);
+               ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
+       }
+
+       ARCSTAT_INCR(arcstat_compressed_size, size);
+       ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
 }
 
-/*
- * Free the arc data buffer.  If it is an l2arc write in progress,
- * the buffer is placed on l2arc_free_on_write to be freed later.
- */
 static void
-arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
+arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
 {
-       arc_buf_hdr_t *hdr = buf->b_hdr;
+       uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
+
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
+       IMPLY(free_rdata, HDR_HAS_RABD(hdr));
 
+       /*
+        * If the hdr is currently being written to the l2arc then
+        * we defer freeing the data by adding it to the l2arc_free_on_write
+        * list. The l2arc will free the data once it's finished
+        * writing it to the l2arc device.
+        */
        if (HDR_L2_WRITING(hdr)) {
-               arc_buf_free_on_write(buf->b_data, hdr->b_size, free_func);
+               arc_hdr_free_on_write(hdr, free_rdata);
                ARCSTAT_BUMP(arcstat_l2_free_on_write);
+       } else if (free_rdata) {
+               arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
+       } else {
+               arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
+       }
+
+       if (free_rdata) {
+               hdr->b_crypt_hdr.b_rabd = NULL;
+               ARCSTAT_INCR(arcstat_raw_size, -size);
        } else {
-               free_func(buf->b_data, hdr->b_size);
+               hdr->b_l1hdr.b_pabd = NULL;
        }
+
+       if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
+               hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
+
+       ARCSTAT_INCR(arcstat_compressed_size, -size);
+       ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
 }
 
-static void
-arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr)
+static arc_buf_hdr_t *
+arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
+    boolean_t protected, enum zio_compress compression_type,
+    arc_buf_contents_t type, boolean_t alloc_rdata)
 {
-       ASSERT(HDR_HAS_L2HDR(hdr));
-       ASSERT(MUTEX_HELD(&hdr->b_l2hdr.b_dev->l2ad_mtx));
+       arc_buf_hdr_t *hdr;
+
+       VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
+       if (protected) {
+               hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE);
+       } else {
+               hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
+       }
+
+       ASSERT(HDR_EMPTY(hdr));
+       ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+       HDR_SET_PSIZE(hdr, psize);
+       HDR_SET_LSIZE(hdr, lsize);
+       hdr->b_spa = spa;
+       hdr->b_type = type;
+       hdr->b_flags = 0;
+       arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
+       arc_hdr_set_compress(hdr, compression_type);
+       if (protected)
+               arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
+
+       hdr->b_l1hdr.b_state = arc_anon;
+       hdr->b_l1hdr.b_arc_access = 0;
+       hdr->b_l1hdr.b_bufcnt = 0;
+       hdr->b_l1hdr.b_buf = NULL;
 
        /*
-        * The b_tmp_cdata field is linked off of the b_l1hdr, so if
-        * that doesn't exist, the header is in the arc_l2c_only state,
-        * and there isn't anything to free (it's already been freed).
+        * Allocate the hdr's buffer. This will contain either
+        * the compressed or uncompressed data depending on the block
+        * it references and compressed arc enablement.
         */
-       if (!HDR_HAS_L1HDR(hdr))
-               return;
+       arc_hdr_alloc_abd(hdr, alloc_rdata);
+       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+
+       return (hdr);
+}
+
+/*
+ * Transition between the two allocation states for the arc_buf_hdr struct.
+ * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
+ * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
+ * version is used when a cache buffer is only in the L2ARC in order to reduce
+ * memory usage.
+ */
+static arc_buf_hdr_t *
+arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
+{
+       arc_buf_hdr_t *nhdr;
+       l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
+
+       ASSERT(HDR_HAS_L2HDR(hdr));
+       ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
+           (old == hdr_l2only_cache && new == hdr_full_cache));
 
        /*
-        * The header isn't being written to the l2arc device, thus it
-        * shouldn't have a b_tmp_cdata to free.
+        * if the caller wanted a new full header and the header is to be
+        * encrypted we will actually allocate the header from the full crypt
+        * cache instead. The same applies to freeing from the old cache.
         */
-       if (!HDR_L2_WRITING(hdr)) {
-               ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
-               return;
+       if (HDR_PROTECTED(hdr) && new == hdr_full_cache)
+               new = hdr_full_crypt_cache;
+       if (HDR_PROTECTED(hdr) && old == hdr_full_cache)
+               old = hdr_full_crypt_cache;
+
+       nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
+
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
+       buf_hash_remove(hdr);
+
+       bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
+
+       if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
+               arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
+               /*
+                * arc_access and arc_change_state need to be aware that a
+                * header has just come out of L2ARC, so we set its state to
+                * l2c_only even though it's about to change.
+                */
+               nhdr->b_l1hdr.b_state = arc_l2c_only;
+
+               /* Verify previous threads set to NULL before freeing */
+               ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
+               ASSERT(!HDR_HAS_RABD(hdr));
+       } else {
+               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+               ASSERT0(hdr->b_l1hdr.b_bufcnt);
+               ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+
+               /*
+                * If we've reached here, We must have been called from
+                * arc_evict_hdr(), as such we should have already been
+                * removed from any ghost list we were previously on
+                * (which protects us from racing with arc_evict_state),
+                * thus no locking is needed during this check.
+                */
+               ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
+
+               /*
+                * A buffer must not be moved into the arc_l2c_only
+                * state if it's not finished being written out to the
+                * l2arc device. Otherwise, the b_l1hdr.b_pabd field
+                * might try to be accessed, even though it was removed.
+                */
+               VERIFY(!HDR_L2_WRITING(hdr));
+               VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+               ASSERT(!HDR_HAS_RABD(hdr));
+
+               arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
        }
+       /*
+        * The header has been reallocated so we need to re-insert it into any
+        * lists it was on.
+        */
+       (void) buf_hash_insert(nhdr, NULL);
+
+       ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
+
+       mutex_enter(&dev->l2ad_mtx);
+
+       /*
+        * We must place the realloc'ed header back into the list at
+        * the same spot. Otherwise, if it's placed earlier in the list,
+        * l2arc_write_buffers() could find it during the function's
+        * write phase, and try to write it out to the l2arc.
+        */
+       list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
+       list_remove(&dev->l2ad_buflist, hdr);
+
+       mutex_exit(&dev->l2ad_mtx);
+
+       /*
+        * Since we're using the pointer address as the tag when
+        * incrementing and decrementing the l2ad_alloc refcount, we
+        * must remove the old pointer (that we're about to destroy) and
+        * add the new pointer to the refcount. Otherwise we'd remove
+        * the wrong pointer address when calling arc_hdr_destroy() later.
+        */
+
+       (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
+       (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
+
+       buf_discard_identity(hdr);
+       kmem_cache_free(old, hdr);
+
+       return (nhdr);
+}
+
+/*
+ * This function allows an L1 header to be reallocated as a crypt
+ * header and vice versa. If we are going to a crypt header, the
+ * new fields will be zeroed out.
+ */
+static arc_buf_hdr_t *
+arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
+{
+       arc_buf_hdr_t *nhdr;
+       arc_buf_t *buf;
+       kmem_cache_t *ncache, *ocache;
+
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt);
+       ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
+       ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
 
-       /*
-        * The header does not have compression enabled. This can be due
-        * to the buffer not being compressible, or because we're
-        * freeing the buffer before the second phase of
-        * l2arc_write_buffer() has started (which does the compression
-        * step). In either case, b_tmp_cdata does not point to a
-        * separately compressed buffer, so there's nothing to free (it
-        * points to the same buffer as the arc_buf_t's b_data field).
-        */
-       if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) {
-               hdr->b_l1hdr.b_tmp_cdata = NULL;
-               return;
+       if (need_crypt) {
+               ncache = hdr_full_crypt_cache;
+               ocache = hdr_full_cache;
+       } else {
+               ncache = hdr_full_cache;
+               ocache = hdr_full_crypt_cache;
        }
 
+       nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE);
+       bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
+       nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum;
+       nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt;
+       nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap;
+       nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state;
+       nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access;
+       nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits;
+       nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits;
+       nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits;
+       nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits;
+       nhdr->b_l1hdr.b_l2_hits = hdr->b_l1hdr.b_l2_hits;
+       nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb;
+       nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
+       nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
+
        /*
-        * There's nothing to free since the buffer was all zero's and
-        * compressed to a zero length buffer.
+        * This refcount_add() exists only to ensure that the individual
+        * arc buffers always point to a header that is referenced, avoiding
+        * a small race condition that could trigger ASSERTs.
         */
-       if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_EMPTY) {
-               ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
-               return;
+       (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
+
+       for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
+               mutex_enter(&buf->b_evict_lock);
+               buf->b_hdr = nhdr;
+               mutex_exit(&buf->b_evict_lock);
        }
 
-       ASSERT(L2ARC_IS_VALID_COMPRESS(HDR_GET_COMPRESS(hdr)));
+       refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
+       (void) refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
+
+       if (need_crypt) {
+               arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
+       } else {
+               arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED);
+       }
 
-       arc_buf_free_on_write(hdr->b_l1hdr.b_tmp_cdata,
-           hdr->b_size, zio_data_buf_free);
+       buf_discard_identity(hdr);
+       kmem_cache_free(ocache, hdr);
 
-       ARCSTAT_BUMP(arcstat_l2_cdata_free_on_write);
-       hdr->b_l1hdr.b_tmp_cdata = NULL;
+       return (nhdr);
 }
 
 /*
- * Free up buf->b_data and if 'remove' is set, then pull the
- * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
+ * This function is used by the send / receive code to convert a newly
+ * allocated arc_buf_t to one that is suitable for a raw encrypted write. It
+ * is also used to allow the root objset block to be uupdated without altering
+ * its embedded MACs. Both block types will always be uncompressed so we do not
+ * have to worry about compression type or psize.
  */
-static void
-arc_buf_destroy(arc_buf_t *buf, boolean_t remove)
+void
+arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
+    dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
+    const uint8_t *mac)
 {
-       arc_buf_t **bufp;
+       arc_buf_hdr_t *hdr = buf->b_hdr;
 
-       /* free up data associated with the buf */
-       if (buf->b_data != NULL) {
-               arc_state_t *state = buf->b_hdr->b_l1hdr.b_state;
-               uint64_t size = buf->b_hdr->b_size;
-               arc_buf_contents_t type = arc_buf_type(buf->b_hdr);
+       ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
+       ASSERT(HDR_HAS_L1HDR(hdr));
+       ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
+
+       buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
+       if (!HDR_PROTECTED(hdr))
+               hdr = arc_hdr_realloc_crypt(hdr, B_TRUE);
+       hdr->b_crypt_hdr.b_dsobj = dsobj;
+       hdr->b_crypt_hdr.b_ot = ot;
+       hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
+           DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
+       if (!arc_hdr_has_uncompressed_buf(hdr))
+               arc_cksum_free(hdr);
+
+       if (salt != NULL)
+               bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
+       if (iv != NULL)
+               bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
+       if (mac != NULL)
+               bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
+}
 
-               arc_cksum_verify(buf);
-               arc_buf_unwatch(buf);
+/*
+ * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
+ * The buf is returned thawed since we expect the consumer to modify it.
+ */
+arc_buf_t *
+arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
+{
+       arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
+           B_FALSE, ZIO_COMPRESS_OFF, type, B_FALSE);
+       ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
 
-               if (type == ARC_BUFC_METADATA) {
-                       arc_buf_data_free(buf, zio_buf_free);
-                       arc_space_return(size, ARC_SPACE_META);
-               } else {
-                       ASSERT(type == ARC_BUFC_DATA);
-                       arc_buf_data_free(buf, zio_data_buf_free);
-                       arc_space_return(size, ARC_SPACE_DATA);
-               }
+       arc_buf_t *buf = NULL;
+       VERIFY0(arc_buf_alloc_impl(hdr, spa, 0, tag, B_FALSE, B_FALSE,
+           B_FALSE, B_FALSE, &buf));
+       arc_buf_thaw(buf);
+
+       return (buf);
+}
 
-               /* protected by hash lock, if in the hash table */
-               if (multilist_link_active(&buf->b_hdr->b_l1hdr.b_arc_node)) {
-                       uint64_t *cnt = &state->arcs_lsize[type];
+/*
+ * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
+ * for bufs containing metadata.
+ */
+arc_buf_t *
+arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
+    enum zio_compress compression_type)
+{
+       ASSERT3U(lsize, >, 0);
+       ASSERT3U(lsize, >=, psize);
+       ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
+       ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
 
-                       ASSERT(refcount_is_zero(
-                           &buf->b_hdr->b_l1hdr.b_refcnt));
-                       ASSERT(state != arc_anon && state != arc_l2c_only);
+       arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
+           B_FALSE, compression_type, ARC_BUFC_DATA, B_FALSE);
+       ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
 
-                       ASSERT3U(*cnt, >=, size);
-                       atomic_add_64(cnt, -size);
-               }
-               ASSERT3U(state->arcs_size, >=, size);
-               atomic_add_64(&state->arcs_size, -size);
-               buf->b_data = NULL;
+       arc_buf_t *buf = NULL;
+       VERIFY0(arc_buf_alloc_impl(hdr, spa, 0, tag, B_FALSE,
+           B_TRUE, B_FALSE, B_FALSE, &buf));
+       arc_buf_thaw(buf);
+       ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
 
+       if (!arc_buf_is_shared(buf)) {
                /*
-                * If we're destroying a duplicate buffer make sure
-                * that the appropriate statistics are updated.
+                * To ensure that the hdr has the correct data in it if we call
+                * arc_untransform() on this buf before it's been written to
+                * disk, it's easiest if we just set up sharing between the
+                * buf and the hdr.
                 */
-               if (buf->b_hdr->b_l1hdr.b_datacnt > 1 &&
-                   HDR_ISTYPE_DATA(buf->b_hdr)) {
-                       ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
-                       ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
-               }
-               ASSERT(buf->b_hdr->b_l1hdr.b_datacnt > 0);
-               buf->b_hdr->b_l1hdr.b_datacnt -= 1;
+               ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd));
+               arc_hdr_free_abd(hdr, B_FALSE);
+               arc_share_buf(hdr, buf);
        }
 
-       /* only remove the buf if requested */
-       if (!remove)
-               return;
+       return (buf);
+}
 
-       /* remove the buf from the hdr list */
-       for (bufp = &buf->b_hdr->b_l1hdr.b_buf; *bufp != buf;
-           bufp = &(*bufp)->b_next)
-               continue;
-       *bufp = buf->b_next;
-       buf->b_next = NULL;
+arc_buf_t *
+arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
+    const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
+    dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
+    enum zio_compress compression_type)
+{
+       arc_buf_hdr_t *hdr;
+       arc_buf_t *buf;
+       arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
+           ARC_BUFC_METADATA : ARC_BUFC_DATA;
+
+       ASSERT3U(lsize, >, 0);
+       ASSERT3U(lsize, >=, psize);
+       ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
+       ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
+
+       hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
+           compression_type, type, B_TRUE);
+       ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
+
+       hdr->b_crypt_hdr.b_dsobj = dsobj;
+       hdr->b_crypt_hdr.b_ot = ot;
+       hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
+           DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
+       bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
+       bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
+       bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
 
-       ASSERT(buf->b_efunc == NULL);
+       /*
+        * This buffer will be considered encrypted even if the ot is not an
+        * encrypted type. It will become authenticated instead in
+        * arc_write_ready().
+        */
+       buf = NULL;
+       VERIFY0(arc_buf_alloc_impl(hdr, spa, dsobj, tag, B_TRUE, B_TRUE,
+           B_FALSE, B_FALSE, &buf));
+       arc_buf_thaw(buf);
+       ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
 
-       /* clean up the buf */
-       buf->b_hdr = NULL;
-       kmem_cache_free(buf_cache, buf);
+       return (buf);
+}
+
+static void
+arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
+{
+       l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
+       l2arc_dev_t *dev = l2hdr->b_dev;
+       uint64_t psize = arc_hdr_size(hdr);
+
+       ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
+       ASSERT(HDR_HAS_L2HDR(hdr));
+
+       list_remove(&dev->l2ad_buflist, hdr);
+
+       ARCSTAT_INCR(arcstat_l2_psize, -psize);
+       ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
+
+       vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
+
+       (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
+       arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
 }
 
 static void
@@ -1841,67 +3673,51 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
 {
        if (HDR_HAS_L1HDR(hdr)) {
                ASSERT(hdr->b_l1hdr.b_buf == NULL ||
-                   hdr->b_l1hdr.b_datacnt > 0);
+                   hdr->b_l1hdr.b_bufcnt > 0);
                ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
                ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
        }
        ASSERT(!HDR_IO_IN_PROGRESS(hdr));
        ASSERT(!HDR_IN_HASH_TABLE(hdr));
 
-       if (HDR_HAS_L2HDR(hdr)) {
-               l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
-               boolean_t buflist_held = MUTEX_HELD(&l2hdr->b_dev->l2ad_mtx);
+       if (!HDR_EMPTY(hdr))
+               buf_discard_identity(hdr);
 
-               if (!buflist_held) {
-                       mutex_enter(&l2hdr->b_dev->l2ad_mtx);
-                       l2hdr = &hdr->b_l2hdr;
-               }
+       if (HDR_HAS_L2HDR(hdr)) {
+               l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
+               boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
 
-               list_remove(&l2hdr->b_dev->l2ad_buflist, hdr);
+               if (!buflist_held)
+                       mutex_enter(&dev->l2ad_mtx);
 
                /*
-                * We don't want to leak the b_tmp_cdata buffer that was
-                * allocated in l2arc_write_buffers()
+                * Even though we checked this conditional above, we
+                * need to check this again now that we have the
+                * l2ad_mtx. This is because we could be racing with
+                * another thread calling l2arc_evict() which might have
+                * destroyed this header's L2 portion as we were waiting
+                * to acquire the l2ad_mtx. If that happens, we don't
+                * want to re-destroy the header's L2 portion.
                 */
-               arc_buf_l2_cdata_free(hdr);
-
-               arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
-               ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
-               ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
+               if (HDR_HAS_L2HDR(hdr))
+                       arc_hdr_l2hdr_destroy(hdr);
 
                if (!buflist_held)
-                       mutex_exit(&l2hdr->b_dev->l2ad_mtx);
-
-               hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
+                       mutex_exit(&dev->l2ad_mtx);
        }
 
-       if (!BUF_EMPTY(hdr))
-               buf_discard_identity(hdr);
+       if (HDR_HAS_L1HDR(hdr)) {
+               arc_cksum_free(hdr);
 
-       if (hdr->b_freeze_cksum != NULL) {
-               kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
-               hdr->b_freeze_cksum = NULL;
-       }
+               while (hdr->b_l1hdr.b_buf != NULL)
+                       arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
 
-       if (HDR_HAS_L1HDR(hdr)) {
-               while (hdr->b_l1hdr.b_buf) {
-                       arc_buf_t *buf = hdr->b_l1hdr.b_buf;
-
-                       if (buf->b_efunc != NULL) {
-                               mutex_enter(&arc_user_evicts_lock);
-                               mutex_enter(&buf->b_evict_lock);
-                               ASSERT(buf->b_hdr != NULL);
-                               arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE);
-                               hdr->b_l1hdr.b_buf = buf->b_next;
-                               buf->b_hdr = &arc_eviction_hdr;
-                               buf->b_next = arc_eviction_list;
-                               arc_eviction_list = buf;
-                               mutex_exit(&buf->b_evict_lock);
-                               cv_signal(&arc_user_evicts_cv);
-                               mutex_exit(&arc_user_evicts_lock);
-                       } else {
-                               arc_buf_destroy(hdr->b_l1hdr.b_buf, TRUE);
-                       }
+               if (hdr->b_l1hdr.b_pabd != NULL) {
+                       arc_hdr_free_abd(hdr, B_FALSE);
+               }
+
+               if (HDR_HAS_RABD(hdr)) {
+                       arc_hdr_free_abd(hdr, B_TRUE);
                }
        }
 
@@ -1909,141 +3725,41 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
        if (HDR_HAS_L1HDR(hdr)) {
                ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
                ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
-               kmem_cache_free(hdr_full_cache, hdr);
-       } else {
-               kmem_cache_free(hdr_l2only_cache, hdr);
-       }
-}
-
-void
-arc_buf_free(arc_buf_t *buf, void *tag)
-{
-       arc_buf_hdr_t *hdr = buf->b_hdr;
-       int hashed = hdr->b_l1hdr.b_state != arc_anon;
-
-       ASSERT(buf->b_efunc == NULL);
-       ASSERT(buf->b_data != NULL);
-
-       if (hashed) {
-               kmutex_t *hash_lock = HDR_LOCK(hdr);
-
-               mutex_enter(hash_lock);
-               hdr = buf->b_hdr;
-               ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
 
-               (void) remove_reference(hdr, hash_lock, tag);
-               if (hdr->b_l1hdr.b_datacnt > 1) {
-                       arc_buf_destroy(buf, TRUE);
+               if (!HDR_PROTECTED(hdr)) {
+                       kmem_cache_free(hdr_full_cache, hdr);
                } else {
-                       ASSERT(buf == hdr->b_l1hdr.b_buf);
-                       ASSERT(buf->b_efunc == NULL);
-                       hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
+                       kmem_cache_free(hdr_full_crypt_cache, hdr);
                }
-               mutex_exit(hash_lock);
-       } else if (HDR_IO_IN_PROGRESS(hdr)) {
-               int destroy_hdr;
-               /*
-                * We are in the middle of an async write.  Don't destroy
-                * this buffer unless the write completes before we finish
-                * decrementing the reference count.
-                */
-               mutex_enter(&arc_user_evicts_lock);
-               (void) remove_reference(hdr, NULL, tag);
-               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
-               destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
-               mutex_exit(&arc_user_evicts_lock);
-               if (destroy_hdr)
-                       arc_hdr_destroy(hdr);
        } else {
-               if (remove_reference(hdr, NULL, tag) > 0)
-                       arc_buf_destroy(buf, TRUE);
-               else
-                       arc_hdr_destroy(hdr);
+               kmem_cache_free(hdr_l2only_cache, hdr);
        }
 }
 
-boolean_t
-arc_buf_remove_ref(arc_buf_t *buf, void* tag)
+void
+arc_buf_destroy(arc_buf_t *buf, void* tag)
 {
        arc_buf_hdr_t *hdr = buf->b_hdr;
-       kmutex_t *hash_lock = NULL;
-       boolean_t no_callback = (buf->b_efunc == NULL);
+       kmutex_t *hash_lock = HDR_LOCK(hdr);
 
        if (hdr->b_l1hdr.b_state == arc_anon) {
-               ASSERT(hdr->b_l1hdr.b_datacnt == 1);
-               arc_buf_free(buf, tag);
-               return (no_callback);
+               ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
+               ASSERT(!HDR_IO_IN_PROGRESS(hdr));
+               VERIFY0(remove_reference(hdr, NULL, tag));
+               arc_hdr_destroy(hdr);
+               return;
        }
 
-       hash_lock = HDR_LOCK(hdr);
        mutex_enter(hash_lock);
-       hdr = buf->b_hdr;
-       ASSERT(hdr->b_l1hdr.b_datacnt > 0);
+       ASSERT3P(hdr, ==, buf->b_hdr);
+       ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
        ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
-       ASSERT(hdr->b_l1hdr.b_state != arc_anon);
-       ASSERT(buf->b_data != NULL);
+       ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
+       ASSERT3P(buf->b_data, !=, NULL);
 
        (void) remove_reference(hdr, hash_lock, tag);
-       if (hdr->b_l1hdr.b_datacnt > 1) {
-               if (no_callback)
-                       arc_buf_destroy(buf, TRUE);
-       } else if (no_callback) {
-               ASSERT(hdr->b_l1hdr.b_buf == buf && buf->b_next == NULL);
-               ASSERT(buf->b_efunc == NULL);
-               hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
-       }
-       ASSERT(no_callback || hdr->b_l1hdr.b_datacnt > 1 ||
-           refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+       arc_buf_destroy_impl(buf);
        mutex_exit(hash_lock);
-       return (no_callback);
-}
-
-uint64_t
-arc_buf_size(arc_buf_t *buf)
-{
-       return (buf->b_hdr->b_size);
-}
-
-/*
- * Called from the DMU to determine if the current buffer should be
- * evicted. In order to ensure proper locking, the eviction must be initiated
- * from the DMU. Return true if the buffer is associated with user data and
- * duplicate buffers still exist.
- */
-boolean_t
-arc_buf_eviction_needed(arc_buf_t *buf)
-{
-       arc_buf_hdr_t *hdr;
-       boolean_t evict_needed = B_FALSE;
-
-       if (zfs_disable_dup_eviction)
-               return (B_FALSE);
-
-       mutex_enter(&buf->b_evict_lock);
-       hdr = buf->b_hdr;
-       if (hdr == NULL) {
-               /*
-                * We are in arc_do_user_evicts(); let that function
-                * perform the eviction.
-                */
-               ASSERT(buf->b_data == NULL);
-               mutex_exit(&buf->b_evict_lock);
-               return (B_FALSE);
-       } else if (buf->b_data == NULL) {
-               /*
-                * We have already been added to the arc eviction list;
-                * recommend eviction.
-                */
-               ASSERT3P(hdr, ==, &arc_eviction_hdr);
-               mutex_exit(&buf->b_evict_lock);
-               return (B_TRUE);
-       }
-
-       if (hdr->b_l1hdr.b_datacnt > 1 && HDR_ISTYPE_DATA(hdr))
-               evict_needed = B_TRUE;
-
-       mutex_exit(&buf->b_evict_lock);
-       return (evict_needed);
 }
 
 /*
@@ -2070,11 +3786,11 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
        state = hdr->b_l1hdr.b_state;
        if (GHOST_STATE(state)) {
                ASSERT(!HDR_IO_IN_PROGRESS(hdr));
-               ASSERT(hdr->b_l1hdr.b_buf == NULL);
+               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
 
                /*
                 * l2arc_write_buffers() relies on a header's L1 portion
-                * (i.e. its b_tmp_cdata field) during its write phase.
+                * (i.e. its b_pabd field) during it's write phase.
                 * Thus, we cannot push a header onto the arc_l2c_only
                 * state (removing its L1 piece) until the header is
                 * done being written to the l2arc.
@@ -2085,11 +3801,13 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                }
 
                ARCSTAT_BUMP(arcstat_deleted);
-               bytes_evicted += hdr->b_size;
+               bytes_evicted += HDR_GET_LSIZE(hdr);
 
                DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
 
                if (HDR_HAS_L2HDR(hdr)) {
+                       ASSERT(hdr->b_l1hdr.b_pabd == NULL);
+                       ASSERT(!HDR_HAS_RABD(hdr));
                        /*
                         * This buffer is cached on the 2nd Level ARC;
                         * don't destroy the header.
@@ -2121,7 +3839,6 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
        }
 
        ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
-       ASSERT3U(hdr->b_l1hdr.b_datacnt, >, 0);
        while (hdr->b_l1hdr.b_buf) {
                arc_buf_t *buf = hdr->b_l1hdr.b_buf;
                if (!mutex_tryenter(&buf->b_evict_lock)) {
@@ -2129,37 +3846,43 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                        break;
                }
                if (buf->b_data != NULL)
-                       bytes_evicted += hdr->b_size;
-               if (buf->b_efunc != NULL) {
-                       mutex_enter(&arc_user_evicts_lock);
-                       arc_buf_destroy(buf, FALSE);
-                       hdr->b_l1hdr.b_buf = buf->b_next;
-                       buf->b_hdr = &arc_eviction_hdr;
-                       buf->b_next = arc_eviction_list;
-                       arc_eviction_list = buf;
-                       cv_signal(&arc_user_evicts_cv);
-                       mutex_exit(&arc_user_evicts_lock);
-                       mutex_exit(&buf->b_evict_lock);
-               } else {
-                       mutex_exit(&buf->b_evict_lock);
-                       arc_buf_destroy(buf, TRUE);
-               }
+                       bytes_evicted += HDR_GET_LSIZE(hdr);
+               mutex_exit(&buf->b_evict_lock);
+               arc_buf_destroy_impl(buf);
        }
 
        if (HDR_HAS_L2HDR(hdr)) {
-               ARCSTAT_INCR(arcstat_evict_l2_cached, hdr->b_size);
+               ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
        } else {
-               if (l2arc_write_eligible(hdr->b_spa, hdr))
-                       ARCSTAT_INCR(arcstat_evict_l2_eligible, hdr->b_size);
-               else
-                       ARCSTAT_INCR(arcstat_evict_l2_ineligible, hdr->b_size);
+               if (l2arc_write_eligible(hdr->b_spa, hdr)) {
+                       ARCSTAT_INCR(arcstat_evict_l2_eligible,
+                           HDR_GET_LSIZE(hdr));
+               } else {
+                       ARCSTAT_INCR(arcstat_evict_l2_ineligible,
+                           HDR_GET_LSIZE(hdr));
+               }
        }
 
-       if (hdr->b_l1hdr.b_datacnt == 0) {
+       if (hdr->b_l1hdr.b_bufcnt == 0) {
+               arc_cksum_free(hdr);
+
+               bytes_evicted += arc_hdr_size(hdr);
+
+               /*
+                * If this hdr is being evicted and has a compressed
+                * buffer then we discard it here before we change states.
+                * This ensures that the accounting is updated correctly
+                * in arc_free_data_impl().
+                */
+               if (hdr->b_l1hdr.b_pabd != NULL)
+                       arc_hdr_free_abd(hdr, B_FALSE);
+
+               if (HDR_HAS_RABD(hdr))
+                       arc_hdr_free_abd(hdr, B_TRUE);
+
                arc_change_state(evicted_state, hdr, hash_lock);
                ASSERT(HDR_IN_HASH_TABLE(hdr));
-               hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
-               hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
+               arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
                DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
        }
 
@@ -2177,7 +3900,7 @@ arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
        int evict_count = 0;
 
        ASSERT3P(marker, !=, NULL);
-       ASSERTV(if (bytes < 0) ASSERT(bytes == ARC_EVICT_ALL));
+       IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
 
        mls = multilist_sublist_lock(ml, idx);
 
@@ -2253,7 +3976,7 @@ arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
                         * thread. If we used cv_broadcast, we could
                         * wake up "too many" threads causing arc_size
                         * to significantly overflow arc_c; since
-                        * arc_get_data_buf() doesn't check for overflow
+                        * arc_get_data_impl() doesn't check for overflow
                         * when it's woken up (it doesn't because it's
                         * possible for the ARC to be overflowing while
                         * full of un-evictable buffers, and the
@@ -2295,12 +4018,12 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
     arc_buf_contents_t type)
 {
        uint64_t total_evicted = 0;
-       multilist_t *ml = &state->arcs_list[type];
+       multilist_t *ml = state->arcs_list[type];
        int num_sublists;
        arc_buf_hdr_t **markers;
        int i;
 
-       ASSERTV(if (bytes < 0) ASSERT(bytes == ARC_EVICT_ALL));
+       IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
 
        num_sublists = multilist_get_num_sublists(ml);
 
@@ -2334,6 +4057,18 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
         * we're evicting all available buffers.
         */
        while (total_evicted < bytes || bytes == ARC_EVICT_ALL) {
+               int sublist_idx = multilist_get_random_index(ml);
+               uint64_t scan_evicted = 0;
+
+               /*
+                * Try to reduce pinned dnodes with a floor of arc_dnode_limit.
+                * Request that 10% of the LRUs be scanned by the superblock
+                * shrinker.
+                */
+               if (type == ARC_BUFC_DATA && arc_dnode_size > arc_dnode_limit)
+                       arc_prune_async((arc_dnode_size - arc_dnode_limit) /
+                           sizeof (dnode_t) / zfs_arc_dnode_reduce_percent);
+
                /*
                 * Start eviction using a randomly selected sublist,
                 * this is to try and evenly balance eviction across all
@@ -2341,9 +4076,6 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
                 * (e.g. index 0) would cause evictions to favor certain
                 * sublists over others.
                 */
-               int sublist_idx = multilist_get_random_index(ml);
-               uint64_t scan_evicted = 0;
-
                for (i = 0; i < num_sublists; i++) {
                        uint64_t bytes_remaining;
                        uint64_t bytes_evicted;
@@ -2406,12 +4138,12 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
  * Flush all "evictable" data of the given type from the arc state
  * specified. This will not evict any "active" buffers (i.e. referenced).
  *
- * When 'retry' is set to FALSE, the function will make a single pass
+ * When 'retry' is set to B_FALSE, the function will make a single pass
  * over the state and evict any buffers that it can. Since it doesn't
  * continually retry the eviction, it might end up leaving some buffers
  * in the ARC due to lock misses.
  *
- * When 'retry' is set to TRUE, the function will continually retry the
+ * When 'retry' is set to B_TRUE, the function will continually retry the
  * eviction until *all* evictable buffers have been removed from the
  * state. As a result, if concurrent insertions into the state are
  * allowed (e.g. if the ARC isn't shutting down), this function might
@@ -2423,7 +4155,7 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
 {
        uint64_t evicted = 0;
 
-       while (state->arcs_lsize[type] != 0) {
+       while (refcount_count(&state->arcs_esize[type]) != 0) {
                evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
 
                if (!retry)
@@ -2434,8 +4166,8 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
 }
 
 /*
- * Helper function for arc_prune() it is responsible for safely handling
- * the execution of a registered arc_prune_func_t.
+ * Helper function for arc_prune_async() it is responsible for safely
+ * handling the execution of a registered arc_prune_func_t.
  */
 static void
 arc_prune_task(void *ptr)
@@ -2446,12 +4178,7 @@ arc_prune_task(void *ptr)
        if (func != NULL)
                func(ap->p_adjust, ap->p_private);
 
-       /* Callback unregistered concurrently with execution */
-       if (refcount_remove(&ap->p_refcnt, func) == 0) {
-               ASSERT(!list_link_active(&ap->p_node));
-               refcount_destroy(&ap->p_refcnt);
-               kmem_free(ap, sizeof (*ap));
-       }
+       refcount_remove(&ap->p_refcnt, func);
 }
 
 /*
@@ -2460,8 +4187,8 @@ arc_prune_task(void *ptr)
  * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers.  This
  * is analogous to dnlc_reduce_cache() but more generic.
  *
- * This operation is performed asyncronously so it may be safely called
- * in the context of the arc_adapt_thread().  A reference is taken here
+ * This operation is performed asynchronously so it may be safely called
+ * in the context of the arc_reclaim_thread().  A reference is taken here
  * for each registered arc_prune_t and the arc_prune_task() is responsible
  * for releasing it once the registered arc_prune_func_t has completed.
  */
@@ -2479,19 +4206,16 @@ arc_prune_async(int64_t adjust)
 
                refcount_add(&ap->p_refcnt, ap->p_pfunc);
                ap->p_adjust = adjust;
-               taskq_dispatch(arc_prune_taskq, arc_prune_task, ap, TQ_SLEEP);
+               if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
+                   ap, TQ_SLEEP) == TASKQID_INVALID) {
+                       refcount_remove(&ap->p_refcnt, ap->p_pfunc);
+                       continue;
+               }
                ARCSTAT_BUMP(arcstat_prune);
        }
        mutex_exit(&arc_prune_mtx);
 }
 
-static void
-arc_prune(int64_t adjust)
-{
-       arc_prune_async(adjust);
-       taskq_wait_outstanding(arc_prune_taskq, 0);
-}
-
 /*
  * Evict the specified number of bytes from the state specified,
  * restricting eviction to the spa and type given. This function
@@ -2506,8 +4230,8 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
 {
        int64_t delta;
 
-       if (bytes > 0 && state->arcs_lsize[type] > 0) {
-               delta = MIN(state->arcs_lsize[type], bytes);
+       if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
+               delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
                return (arc_evict_state(state, spa, delta, type));
        }
 
@@ -2534,10 +4258,10 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
 static uint64_t
 arc_adjust_meta_balanced(void)
 {
-       int64_t adjustmnt, delta, prune = 0;
+       int64_t delta, prune = 0, adjustmnt;
        uint64_t total_evicted = 0;
        arc_buf_contents_t type = ARC_BUFC_DATA;
-       unsigned long restarts = zfs_arc_meta_adjust_restarts;
+       int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
 
 restart:
        /*
@@ -2550,8 +4274,9 @@ restart:
         */
        adjustmnt = arc_meta_used - arc_meta_limit;
 
-       if (adjustmnt > 0 && arc_mru->arcs_lsize[type] > 0) {
-               delta = MIN(arc_mru->arcs_lsize[type], adjustmnt);
+       if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) {
+               delta = MIN(refcount_count(&arc_mru->arcs_esize[type]),
+                   adjustmnt);
                total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
                adjustmnt -= delta;
        }
@@ -2566,23 +4291,26 @@ restart:
         * simply decrement the amount of data evicted from the MRU.
         */
 
-       if (adjustmnt > 0 && arc_mfu->arcs_lsize[type] > 0) {
-               delta = MIN(arc_mfu->arcs_lsize[type], adjustmnt);
+       if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
+               delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]),
+                   adjustmnt);
                total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
        }
 
        adjustmnt = arc_meta_used - arc_meta_limit;
 
-       if (adjustmnt > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
+       if (adjustmnt > 0 &&
+           refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
                delta = MIN(adjustmnt,
-                   arc_mru_ghost->arcs_lsize[type]);
+                   refcount_count(&arc_mru_ghost->arcs_esize[type]));
                total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
                adjustmnt -= delta;
        }
 
-       if (adjustmnt > 0 && arc_mfu_ghost->arcs_lsize[type] > 0) {
+       if (adjustmnt > 0 &&
+           refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
                delta = MIN(adjustmnt,
-                   arc_mfu_ghost->arcs_lsize[type]);
+                   refcount_count(&arc_mfu_ghost->arcs_esize[type]));
                total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
        }
 
@@ -2631,17 +4359,18 @@ arc_adjust_meta_only(void)
         * evict some from the MRU here, and some from the MFU below.
         */
        target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
-           (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size - arc_p));
+           (int64_t)(refcount_count(&arc_anon->arcs_size) +
+           refcount_count(&arc_mru->arcs_size) - arc_p));
 
        total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
 
        /*
         * Similar to the above, we want to evict enough bytes to get us
         * below the meta limit, but not so much as to drop us below the
-        * space alloted to the MFU (which is defined as arc_c - arc_p).
+        * space allotted to the MFU (which is defined as arc_c - arc_p).
         */
        target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
-           (int64_t)(arc_mfu->arcs_size - (arc_c - arc_p)));
+           (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p)));
 
        total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
 
@@ -2668,8 +4397,8 @@ arc_adjust_meta(void)
 static arc_buf_contents_t
 arc_adjust_type(arc_state_t *state)
 {
-       multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
-       multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
+       multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA];
+       multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA];
        int data_idx = multilist_get_random_index(data_ml);
        int meta_idx = multilist_get_random_index(meta_ml);
        multilist_sublist_t *data_mls;
@@ -2759,8 +4488,8 @@ arc_adjust(void)
         * arc_p here, and then evict more from the MFU below.
         */
        target = MIN((int64_t)(arc_size - arc_c),
-           (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
-           arc_p));
+           (int64_t)(refcount_count(&arc_anon->arcs_size) +
+           refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
 
        /*
         * If we're below arc_meta_min, always prefer to evict data.
@@ -2806,7 +4535,7 @@ arc_adjust(void)
         */
        target = arc_size - arc_c;
 
-       if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
+       if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA &&
            arc_meta_used > arc_meta_min) {
                bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
                total_evicted += bytes;
@@ -2844,7 +4573,8 @@ arc_adjust(void)
         * cache. The following logic enforces these limits on the ghost
         * caches, and evicts from them as needed.
         */
-       target = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
+       target = refcount_count(&arc_mru->arcs_size) +
+           refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
 
        bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
        total_evicted += bytes;
@@ -2862,7 +4592,8 @@ arc_adjust(void)
         *      mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
         *                  mru ghost + mfu ghost <= arc_c
         */
-       target = arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
+       target = refcount_count(&arc_mru_ghost->arcs_size) +
+           refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
 
        bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
        total_evicted += bytes;
@@ -2875,36 +4606,13 @@ arc_adjust(void)
        return (total_evicted);
 }
 
-static void
-arc_do_user_evicts(void)
-{
-       mutex_enter(&arc_user_evicts_lock);
-       while (arc_eviction_list != NULL) {
-               arc_buf_t *buf = arc_eviction_list;
-               arc_eviction_list = buf->b_next;
-               mutex_enter(&buf->b_evict_lock);
-               buf->b_hdr = NULL;
-               mutex_exit(&buf->b_evict_lock);
-               mutex_exit(&arc_user_evicts_lock);
-
-               if (buf->b_efunc != NULL)
-                       VERIFY0(buf->b_efunc(buf->b_private));
-
-               buf->b_efunc = NULL;
-               buf->b_private = NULL;
-               kmem_cache_free(buf_cache, buf);
-               mutex_enter(&arc_user_evicts_lock);
-       }
-       mutex_exit(&arc_user_evicts_lock);
-}
-
 void
 arc_flush(spa_t *spa, boolean_t retry)
 {
        uint64_t guid = 0;
 
        /*
-        * If retry is TRUE, a spa must not be specified since we have
+        * If retry is B_TRUE, a spa must not be specified since we have
         * no good way to determine if all of a spa's buffers have been
         * evicted from an arc state.
         */
@@ -2924,68 +4632,270 @@ arc_flush(spa_t *spa, boolean_t retry)
 
        (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
        (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
-
-       arc_do_user_evicts();
-       ASSERT(spa || arc_eviction_list == NULL);
 }
 
 void
-arc_shrink(uint64_t bytes)
+arc_shrink(int64_t to_free)
 {
-       if (arc_c > arc_c_min) {
-               uint64_t to_free;
+       uint64_t c = arc_c;
+
+       if (c > to_free && c - to_free > arc_c_min) {
+               arc_c = c - to_free;
+               atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
+               if (arc_c > arc_size)
+                       arc_c = MAX(arc_size, arc_c_min);
+               if (arc_p > arc_c)
+                       arc_p = (arc_c >> 1);
+               ASSERT(arc_c >= arc_c_min);
+               ASSERT((int64_t)arc_p >= 0);
+       } else {
+               arc_c = arc_c_min;
+       }
 
-               to_free = bytes ? bytes : arc_c >> zfs_arc_shrink_shift;
+       if (arc_size > arc_c)
+               (void) arc_adjust();
+}
 
-               if (arc_c > arc_c_min + to_free)
-                       atomic_add_64(&arc_c, -to_free);
-               else
-                       arc_c = arc_c_min;
+/*
+ * Return maximum amount of memory that we could possibly use.  Reduced
+ * to half of all memory in user space which is primarily used for testing.
+ */
+static uint64_t
+arc_all_memory(void)
+{
+#ifdef _KERNEL
+#ifdef CONFIG_HIGHMEM
+       return (ptob(totalram_pages - totalhigh_pages));
+#else
+       return (ptob(totalram_pages));
+#endif /* CONFIG_HIGHMEM */
+#else
+       return (ptob(physmem) / 2);
+#endif /* _KERNEL */
+}
+
+/*
+ * Return the amount of memory that is considered free.  In user space
+ * which is primarily used for testing we pretend that free memory ranges
+ * from 0-20% of all memory.
+ */
+static uint64_t
+arc_free_memory(void)
+{
+#ifdef _KERNEL
+#ifdef CONFIG_HIGHMEM
+       struct sysinfo si;
+       si_meminfo(&si);
+       return (ptob(si.freeram - si.freehigh));
+#else
+#ifdef ZFS_GLOBAL_NODE_PAGE_STATE
+       return (ptob(nr_free_pages() +
+           global_node_page_state(NR_INACTIVE_FILE) +
+           global_node_page_state(NR_INACTIVE_ANON) +
+           global_node_page_state(NR_SLAB_RECLAIMABLE)));
+#else
+       return (ptob(nr_free_pages() +
+           global_page_state(NR_INACTIVE_FILE) +
+           global_page_state(NR_INACTIVE_ANON) +
+           global_page_state(NR_SLAB_RECLAIMABLE)));
+#endif /* ZFS_GLOBAL_NODE_PAGE_STATE */
+#endif /* CONFIG_HIGHMEM */
+#else
+       return (spa_get_random(arc_all_memory() * 20 / 100));
+#endif /* _KERNEL */
+}
+
+typedef enum free_memory_reason_t {
+       FMR_UNKNOWN,
+       FMR_NEEDFREE,
+       FMR_LOTSFREE,
+       FMR_SWAPFS_MINFREE,
+       FMR_PAGES_PP_MAXIMUM,
+       FMR_HEAP_ARENA,
+       FMR_ZIO_ARENA,
+} free_memory_reason_t;
+
+int64_t last_free_memory;
+free_memory_reason_t last_free_reason;
+
+#ifdef _KERNEL
+/*
+ * Additional reserve of pages for pp_reserve.
+ */
+int64_t arc_pages_pp_reserve = 64;
+
+/*
+ * Additional reserve of pages for swapfs.
+ */
+int64_t arc_swapfs_reserve = 64;
+#endif /* _KERNEL */
+
+/*
+ * Return the amount of memory that can be consumed before reclaim will be
+ * needed.  Positive if there is sufficient free memory, negative indicates
+ * the amount of memory that needs to be freed up.
+ */
+static int64_t
+arc_available_memory(void)
+{
+       int64_t lowest = INT64_MAX;
+       free_memory_reason_t r = FMR_UNKNOWN;
+#ifdef _KERNEL
+       int64_t n;
+#ifdef __linux__
+#ifdef freemem
+#undef freemem
+#endif
+       pgcnt_t needfree = btop(arc_need_free);
+       pgcnt_t lotsfree = btop(arc_sys_free);
+       pgcnt_t desfree = 0;
+       pgcnt_t freemem = btop(arc_free_memory());
+#endif
+
+       if (needfree > 0) {
+               n = PAGESIZE * (-needfree);
+               if (n < lowest) {
+                       lowest = n;
+                       r = FMR_NEEDFREE;
+               }
+       }
+
+       /*
+        * check that we're out of range of the pageout scanner.  It starts to
+        * schedule paging if freemem is less than lotsfree and needfree.
+        * lotsfree is the high-water mark for pageout, and needfree is the
+        * number of needed free pages.  We add extra pages here to make sure
+        * the scanner doesn't start up while we're freeing memory.
+        */
+       n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
+       if (n < lowest) {
+               lowest = n;
+               r = FMR_LOTSFREE;
+       }
+
+#ifndef __linux__
+       /*
+        * check to make sure that swapfs has enough space so that anon
+        * reservations can still succeed. anon_resvmem() checks that the
+        * availrmem is greater than swapfs_minfree, and the number of reserved
+        * swap pages.  We also add a bit of extra here just to prevent
+        * circumstances from getting really dire.
+        */
+       n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve -
+           desfree - arc_swapfs_reserve);
+       if (n < lowest) {
+               lowest = n;
+               r = FMR_SWAPFS_MINFREE;
+       }
+
+       /*
+        * Check that we have enough availrmem that memory locking (e.g., via
+        * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
+        * stores the number of pages that cannot be locked; when availrmem
+        * drops below pages_pp_maximum, page locking mechanisms such as
+        * page_pp_lock() will fail.)
+        */
+       n = PAGESIZE * (availrmem - pages_pp_maximum -
+           arc_pages_pp_reserve);
+       if (n < lowest) {
+               lowest = n;
+               r = FMR_PAGES_PP_MAXIMUM;
+       }
+#endif
+
+#if defined(_ILP32)
+       /*
+        * If we're on a 32-bit platform, it's possible that we'll exhaust the
+        * kernel heap space before we ever run out of available physical
+        * memory.  Most checks of the size of the heap_area compare against
+        * tune.t_minarmem, which is the minimum available real memory that we
+        * can have in the system.  However, this is generally fixed at 25 pages
+        * which is so low that it's useless.  In this comparison, we seek to
+        * calculate the total heap-size, and reclaim if more than 3/4ths of the
+        * heap is allocated.  (Or, in the calculation, if less than 1/4th is
+        * free)
+        */
+       n = vmem_size(heap_arena, VMEM_FREE) -
+           (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
+       if (n < lowest) {
+               lowest = n;
+               r = FMR_HEAP_ARENA;
+       }
+#endif
 
-               to_free = bytes ? bytes : arc_p >> zfs_arc_shrink_shift;
+       /*
+        * If zio data pages are being allocated out of a separate heap segment,
+        * then enforce that the size of available vmem for this arena remains
+        * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
+        *
+        * Note that reducing the arc_zio_arena_free_shift keeps more virtual
+        * memory (in the zio_arena) free, which can avoid memory
+        * fragmentation issues.
+        */
+       if (zio_arena != NULL) {
+               n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
+                   (vmem_size(zio_arena, VMEM_ALLOC) >>
+                   arc_zio_arena_free_shift);
+               if (n < lowest) {
+                       lowest = n;
+                       r = FMR_ZIO_ARENA;
+               }
+       }
+#else /* _KERNEL */
+       /* Every 100 calls, free a small amount */
+       if (spa_get_random(100) == 0)
+               lowest = -1024;
+#endif /* _KERNEL */
 
-               if (arc_p > to_free)
-                       atomic_add_64(&arc_p, -to_free);
-               else
-                       arc_p = 0;
+       last_free_memory = lowest;
+       last_free_reason = r;
 
-               if (arc_c > arc_size)
-                       arc_c = MAX(arc_size, arc_c_min);
-               if (arc_p > arc_c)
-                       arc_p = (arc_c >> 1);
-               ASSERT(arc_c >= arc_c_min);
-               ASSERT((int64_t)arc_p >= 0);
-       }
+       return (lowest);
+}
 
-       if (arc_size > arc_c)
-               (void) arc_adjust();
+/*
+ * Determine if the system is under memory pressure and is asking
+ * to reclaim memory. A return value of B_TRUE indicates that the system
+ * is under memory pressure and that the arc should adjust accordingly.
+ */
+static boolean_t
+arc_reclaim_needed(void)
+{
+       return (arc_available_memory() < 0);
 }
 
 static void
-arc_kmem_reap_now(arc_reclaim_strategy_t strat, uint64_t bytes)
+arc_kmem_reap_now(void)
 {
        size_t                  i;
        kmem_cache_t            *prev_cache = NULL;
        kmem_cache_t            *prev_data_cache = NULL;
        extern kmem_cache_t     *zio_buf_cache[];
        extern kmem_cache_t     *zio_data_buf_cache[];
+       extern kmem_cache_t     *range_seg_cache;
 
+#ifdef _KERNEL
        if ((arc_meta_used >= arc_meta_limit) && zfs_arc_meta_prune) {
                /*
                 * We are exceeding our meta-data cache limit.
                 * Prune some entries to release holds on meta-data.
                 */
-               arc_prune(zfs_arc_meta_prune);
+               arc_prune_async(zfs_arc_meta_prune);
        }
-
+#if defined(_ILP32)
        /*
-        * An aggressive reclamation will shrink the cache size as well as
-        * reap free buffers from the arc kmem caches.
+        * Reclaim unused memory from all kmem caches.
         */
-       if (strat == ARC_RECLAIM_AGGR)
-               arc_shrink(bytes);
+       kmem_reap();
+#endif
+#endif
 
        for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
+#if defined(_ILP32)
+               /* reach upper limit of cache size on 32-bit */
+               if (zio_buf_cache[i] == NULL)
+                       break;
+#endif
                if (zio_buf_cache[i] != prev_cache) {
                        prev_cache = zio_buf_cache[i];
                        kmem_cache_reap_now(zio_buf_cache[i]);
@@ -2995,20 +4905,28 @@ arc_kmem_reap_now(arc_reclaim_strategy_t strat, uint64_t bytes)
                        kmem_cache_reap_now(zio_data_buf_cache[i]);
                }
        }
-
        kmem_cache_reap_now(buf_cache);
        kmem_cache_reap_now(hdr_full_cache);
        kmem_cache_reap_now(hdr_l2only_cache);
+       kmem_cache_reap_now(range_seg_cache);
+
+       if (zio_arena != NULL) {
+               /*
+                * Ask the vmem arena to reclaim unused memory from its
+                * quantum caches.
+                */
+               vmem_qcache_reap(zio_arena);
+       }
 }
 
 /*
- * Threads can block in arc_get_data_buf() waiting for this thread to evict
+ * Threads can block in arc_get_data_impl() waiting for this thread to evict
  * enough data and signal them to proceed. When this happens, the threads in
- * arc_get_data_buf() are sleeping while holding the hash lock for their
+ * arc_get_data_impl() are sleeping while holding the hash lock for their
  * particular arc header. Thus, we must be careful to never sleep on a
  * hash lock in this thread. This is to prevent the following deadlock:
  *
- *  - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L",
+ *  - Thread A sleeps on CV in arc_get_data_impl() holding hash lock "L",
  *    waiting for the reclaim thread to signal it.
  *
  *  - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
@@ -3018,108 +4936,20 @@ arc_kmem_reap_now(arc_reclaim_strategy_t strat, uint64_t bytes)
  * using mutex_tryenter() from arc_reclaim_thread().
  */
 static void
-arc_adapt_thread(void)
+arc_reclaim_thread(void *unused)
 {
+       fstrans_cookie_t        cookie = spl_fstrans_mark();
+       hrtime_t                growtime = 0;
        callb_cpr_t             cpr;
-       fstrans_cookie_t        cookie;
-       uint64_t                arc_evicted;
 
        CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG);
 
-       cookie = spl_fstrans_mark();
        mutex_enter(&arc_reclaim_lock);
-       while (arc_reclaim_thread_exit == 0) {
-#ifndef _KERNEL
-               arc_reclaim_strategy_t  last_reclaim = ARC_RECLAIM_CONS;
-
-               mutex_exit(&arc_reclaim_lock);
-               if (spa_get_random(100) == 0) {
-
-                       if (arc_no_grow) {
-                               if (last_reclaim == ARC_RECLAIM_CONS) {
-                                       last_reclaim = ARC_RECLAIM_AGGR;
-                               } else {
-                                       last_reclaim = ARC_RECLAIM_CONS;
-                               }
-                       } else {
-                               arc_no_grow = TRUE;
-                               last_reclaim = ARC_RECLAIM_AGGR;
-                               membar_producer();
-                       }
-
-                       /* reset the growth delay for every reclaim */
-                       arc_grow_time = ddi_get_lbolt() +
-                           (zfs_arc_grow_retry * hz);
-
-                       arc_kmem_reap_now(last_reclaim, 0);
-                       arc_warm = B_TRUE;
-               }
-#else /* _KERNEL */
-               mutex_exit(&arc_reclaim_lock);
-#endif /* !_KERNEL */
-
-               /* No recent memory pressure allow the ARC to grow. */
-               if (arc_no_grow &&
-                   ddi_time_after_eq(ddi_get_lbolt(), arc_grow_time))
-                       arc_no_grow = FALSE;
-
-               arc_evicted = arc_adjust();
-
-               /*
-                * We're either no longer overflowing, or we
-                * can't evict anything more, so we should wake
-                * up any threads before we go to sleep.
-                */
-               if (arc_size <= arc_c || arc_evicted == 0)
-                       cv_broadcast(&arc_reclaim_waiters_cv);
-
-               mutex_enter(&arc_reclaim_lock);
-
-               /* block until needed, or one second, whichever is shorter */
-               CALLB_CPR_SAFE_BEGIN(&cpr);
-               (void) cv_timedwait_interruptible(&arc_reclaim_thread_cv,
-                   &arc_reclaim_lock, (ddi_get_lbolt() + hz));
-               CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock);
-
-
-               /* Allow the module options to be changed */
-               if (zfs_arc_max > 64 << 20 &&
-                   zfs_arc_max < physmem * PAGESIZE &&
-                   zfs_arc_max != arc_c_max)
-                       arc_c_max = zfs_arc_max;
-
-               if (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT &&
-                   zfs_arc_min <= arc_c_max &&
-                   zfs_arc_min != arc_c_min)
-                       arc_c_min = zfs_arc_min;
-
-               if (zfs_arc_meta_limit > 0 &&
-                   zfs_arc_meta_limit <= arc_c_max &&
-                   zfs_arc_meta_limit != arc_meta_limit)
-                       arc_meta_limit = zfs_arc_meta_limit;
-       }
-
-       arc_reclaim_thread_exit = 0;
-       cv_broadcast(&arc_reclaim_thread_cv);
-       CALLB_CPR_EXIT(&cpr);           /* drops arc_reclaim_lock */
-       spl_fstrans_unmark(cookie);
-       thread_exit();
-}
-
-static void
-arc_user_evicts_thread(void)
-{
-       callb_cpr_t cpr;
-       fstrans_cookie_t        cookie;
-
-       CALLB_CPR_INIT(&cpr, &arc_user_evicts_lock, callb_generic_cpr, FTAG);
-
-       cookie = spl_fstrans_mark();
-       mutex_enter(&arc_user_evicts_lock);
-       while (!arc_user_evicts_thread_exit) {
-               mutex_exit(&arc_user_evicts_lock);
-
-               arc_do_user_evicts();
+       while (!arc_reclaim_thread_exit) {
+               int64_t to_free;
+               uint64_t evicted = 0;
+               uint64_t need_free = arc_need_free;
+               arc_tuning_update();
 
                /*
                 * This is necessary in order for the mdb ::arc dcmd to
@@ -3132,24 +4962,89 @@ arc_user_evicts_thread(void)
                 * structures can be queried directly if more accurate
                 * information is needed.
                 */
+#ifndef __linux__
                if (arc_ksp != NULL)
                        arc_ksp->ks_update(arc_ksp, KSTAT_READ);
+#endif
+               mutex_exit(&arc_reclaim_lock);
+
+               /*
+                * We call arc_adjust() before (possibly) calling
+                * arc_kmem_reap_now(), so that we can wake up
+                * arc_get_data_buf() sooner.
+                */
+               evicted = arc_adjust();
+
+               int64_t free_memory = arc_available_memory();
+               if (free_memory < 0) {
+
+                       arc_no_grow = B_TRUE;
+                       arc_warm = B_TRUE;
 
-               mutex_enter(&arc_user_evicts_lock);
+                       /*
+                        * Wait at least zfs_grow_retry (default 5) seconds
+                        * before considering growing.
+                        */
+                       growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
+
+                       arc_kmem_reap_now();
+
+                       /*
+                        * If we are still low on memory, shrink the ARC
+                        * so that we have arc_shrink_min free space.
+                        */
+                       free_memory = arc_available_memory();
+
+                       to_free = (arc_c >> arc_shrink_shift) - free_memory;
+                       if (to_free > 0) {
+#ifdef _KERNEL
+                               to_free = MAX(to_free, need_free);
+#endif
+                               arc_shrink(to_free);
+                       }
+               } else if (free_memory < arc_c >> arc_no_grow_shift) {
+                       arc_no_grow = B_TRUE;
+               } else if (gethrtime() >= growtime) {
+                       arc_no_grow = B_FALSE;
+               }
+
+               mutex_enter(&arc_reclaim_lock);
 
                /*
-                * Block until signaled, or after one second (we need to
-                * call the arc's kstat update function regularly).
+                * If evicted is zero, we couldn't evict anything via
+                * arc_adjust(). This could be due to hash lock
+                * collisions, but more likely due to the majority of
+                * arc buffers being unevictable. Therefore, even if
+                * arc_size is above arc_c, another pass is unlikely to
+                * be helpful and could potentially cause us to enter an
+                * infinite loop.
                 */
-               CALLB_CPR_SAFE_BEGIN(&cpr);
-               (void) cv_timedwait_interruptible(&arc_user_evicts_cv,
-                   &arc_user_evicts_lock, ddi_get_lbolt() + hz);
-               CALLB_CPR_SAFE_END(&cpr, &arc_user_evicts_lock);
+               if (arc_size <= arc_c || evicted == 0) {
+                       /*
+                        * We're either no longer overflowing, or we
+                        * can't evict anything more, so we should wake
+                        * up any threads before we go to sleep and remove
+                        * the bytes we were working on from arc_need_free
+                        * since nothing more will be done here.
+                        */
+                       cv_broadcast(&arc_reclaim_waiters_cv);
+                       ARCSTAT_INCR(arcstat_need_free, -need_free);
+
+                       /*
+                        * Block until signaled, or after one second (we
+                        * might need to perform arc_kmem_reap_now()
+                        * even if we aren't being signalled)
+                        */
+                       CALLB_CPR_SAFE_BEGIN(&cpr);
+                       (void) cv_timedwait_sig_hires(&arc_reclaim_thread_cv,
+                           &arc_reclaim_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
+                       CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock);
+               }
        }
 
-       arc_user_evicts_thread_exit = FALSE;
-       cv_broadcast(&arc_user_evicts_cv);
-       CALLB_CPR_EXIT(&cpr);           /* drops arc_user_evicts_lock */
+       arc_reclaim_thread_exit = B_FALSE;
+       cv_broadcast(&arc_reclaim_thread_cv);
+       CALLB_CPR_EXIT(&cpr);           /* drops arc_reclaim_lock */
        spl_fstrans_unmark(cookie);
        thread_exit();
 }
@@ -3201,23 +5096,32 @@ arc_user_evicts_thread(void)
  *         increase this negative difference.
  */
 static uint64_t
-arc_evictable_memory(void) {
+arc_evictable_memory(void)
+{
        uint64_t arc_clean =
-           arc_mru->arcs_lsize[ARC_BUFC_DATA] +
-           arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
-           arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
-           arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
-       uint64_t ghost_clean =
-           arc_mru_ghost->arcs_lsize[ARC_BUFC_DATA] +
-           arc_mru_ghost->arcs_lsize[ARC_BUFC_METADATA] +
-           arc_mfu_ghost->arcs_lsize[ARC_BUFC_DATA] +
-           arc_mfu_ghost->arcs_lsize[ARC_BUFC_METADATA];
+           refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
+           refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
+           refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
+           refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
        uint64_t arc_dirty = MAX((int64_t)arc_size - (int64_t)arc_clean, 0);
 
-       if (arc_dirty >= arc_c_min)
-               return (ghost_clean + arc_clean);
+       /*
+        * Scale reported evictable memory in proportion to page cache, cap
+        * at specified min/max.
+        */
+#ifdef ZFS_GLOBAL_NODE_PAGE_STATE
+       uint64_t min = (ptob(global_node_page_state(NR_FILE_PAGES)) / 100) *
+           zfs_arc_pc_percent;
+#else
+       uint64_t min = (ptob(global_page_state(NR_FILE_PAGES)) / 100) *
+           zfs_arc_pc_percent;
+#endif
+       min = MAX(arc_c_min, MIN(arc_c_max, min));
 
-       return (ghost_clean + MAX((int64_t)arc_size - (int64_t)arc_c_min, 0));
+       if (arc_dirty >= min)
+               return (arc_clean);
+
+       return (MAX((int64_t)arc_size - (int64_t)min, 0));
 }
 
 /*
@@ -3251,33 +5155,33 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
                return (SHRINK_STOP);
 
        /* Reclaim in progress */
-       if (mutex_tryenter(&arc_reclaim_lock) == 0)
-               return (SHRINK_STOP);
+       if (mutex_tryenter(&arc_reclaim_lock) == 0) {
+               ARCSTAT_INCR(arcstat_need_free, ptob(sc->nr_to_scan));
+               return (0);
+       }
 
        mutex_exit(&arc_reclaim_lock);
 
        /*
         * Evict the requested number of pages by shrinking arc_c the
-        * requested amount.  If there is nothing left to evict just
-        * reap whatever we can from the various arc slabs.
+        * requested amount.
         */
        if (pages > 0) {
-               arc_kmem_reap_now(ARC_RECLAIM_AGGR, ptob(sc->nr_to_scan));
-
+               arc_shrink(ptob(sc->nr_to_scan));
+               if (current_is_kswapd())
+                       arc_kmem_reap_now();
 #ifdef HAVE_SPLIT_SHRINKER_CALLBACK
-               pages = MAX(pages - btop(arc_evictable_memory()), 0);
+               pages = MAX((int64_t)pages -
+                   (int64_t)btop(arc_evictable_memory()), 0);
 #else
                pages = btop(arc_evictable_memory());
 #endif
-       } else {
-               arc_kmem_reap_now(ARC_RECLAIM_CONS, ptob(sc->nr_to_scan));
+               /*
+                * We've shrunk what we can, wake up threads.
+                */
+               cv_broadcast(&arc_reclaim_waiters_cv);
+       } else
                pages = SHRINK_STOP;
-       }
-
-       /*
-        * We've reaped what we can, wake up threads.
-        */
-       cv_broadcast(&arc_reclaim_waiters_cv);
 
        /*
         * When direct reclaim is observed it usually indicates a rapid
@@ -3290,7 +5194,7 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
                ARCSTAT_BUMP(arcstat_memory_indirect_count);
        } else {
                arc_no_grow = B_TRUE;
-               arc_grow_time = ddi_get_lbolt() + (zfs_arc_grow_retry * hz);
+               arc_kmem_reap_now();
                ARCSTAT_BUMP(arcstat_memory_direct_count);
        }
 
@@ -3303,13 +5207,16 @@ SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS);
 
 /*
  * Adapt arc info given the number of bytes we are trying to add and
- * the state that we are comming from.  This function is only called
+ * the state that we are coming from.  This function is only called
  * when we are adding new content to the cache.
  */
 static void
 arc_adapt(int bytes, arc_state_t *state)
 {
        int mult;
+       uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
+       int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
+       int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
 
        if (state == arc_l2c_only)
                return;
@@ -3324,27 +5231,28 @@ arc_adapt(int bytes, arc_state_t *state)
         *        target size of the MRU list.
         */
        if (state == arc_mru_ghost) {
-               mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
-                   1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
-
+               mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
                if (!zfs_arc_p_dampener_disable)
                        mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
 
-               arc_p = MIN(arc_c, arc_p + bytes * mult);
+               arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
        } else if (state == arc_mfu_ghost) {
                uint64_t delta;
 
-               mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
-                   1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
-
+               mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
                if (!zfs_arc_p_dampener_disable)
                        mult = MIN(mult, 10);
 
                delta = MIN(bytes * mult, arc_p);
-               arc_p = MAX(0, arc_p - delta);
+               arc_p = MAX(arc_p_min, arc_p - delta);
        }
        ASSERT((int64_t)arc_p >= 0);
 
+       if (arc_reclaim_needed()) {
+               cv_signal(&arc_reclaim_thread_cv);
+               return;
+       }
+
        if (arc_no_grow)
                return;
 
@@ -3355,7 +5263,7 @@ arc_adapt(int bytes, arc_state_t *state)
         * If we're within (2 * maxblocksize) bytes of the target
         * cache size, increment the target cache size
         */
-       VERIFY3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
+       ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
        if (arc_size >= arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
                atomic_add_64(&arc_c, (int64_t)bytes);
                if (arc_c > arc_c_max)
@@ -3382,19 +5290,45 @@ arc_is_overflowing(void)
        return (arc_size >= arc_c + overflow);
 }
 
+static abd_t *
+arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
+{
+       arc_buf_contents_t type = arc_buf_type(hdr);
+
+       arc_get_data_impl(hdr, size, tag);
+       if (type == ARC_BUFC_METADATA) {
+               return (abd_alloc(size, B_TRUE));
+       } else {
+               ASSERT(type == ARC_BUFC_DATA);
+               return (abd_alloc(size, B_FALSE));
+       }
+}
+
+static void *
+arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
+{
+       arc_buf_contents_t type = arc_buf_type(hdr);
+
+       arc_get_data_impl(hdr, size, tag);
+       if (type == ARC_BUFC_METADATA) {
+               return (zio_buf_alloc(size));
+       } else {
+               ASSERT(type == ARC_BUFC_DATA);
+               return (zio_data_buf_alloc(size));
+       }
+}
+
 /*
- * The buffer, supplied as the first argument, needs a data block. If we
- * are hitting the hard limit for the cache size, we must sleep, waiting
- * for the eviction thread to catch up. If we're past the target size
- * but below the hard limit, we'll only signal the reclaim thread and
- * continue on.
+ * Allocate a block and return it to the caller. If we are hitting the
+ * hard limit for the cache size, we must sleep, waiting for the eviction
+ * thread to catch up. If we're past the target size but below the hard
+ * limit, we'll only signal the reclaim thread and continue on.
  */
 static void
-arc_get_data_buf(arc_buf_t *buf)
+arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
 {
-       arc_state_t             *state = buf->b_hdr->b_l1hdr.b_state;
-       uint64_t                size = buf->b_hdr->b_size;
-       arc_buf_contents_t      type = arc_buf_type(buf->b_hdr);
+       arc_state_t *state = hdr->b_l1hdr.b_state;
+       arc_buf_contents_t type = arc_buf_type(hdr);
 
        arc_adapt(size, state);
 
@@ -3434,12 +5368,10 @@ arc_get_data_buf(arc_buf_t *buf)
                mutex_exit(&arc_reclaim_lock);
        }
 
+       VERIFY3U(hdr->b_type, ==, type);
        if (type == ARC_BUFC_METADATA) {
-               buf->b_data = zio_buf_alloc(size);
                arc_space_consume(size, ARC_SPACE_META);
        } else {
-               ASSERT(type == ARC_BUFC_DATA);
-               buf->b_data = zio_data_buf_alloc(size);
                arc_space_consume(size, ARC_SPACE_DATA);
        }
 
@@ -3447,10 +5379,9 @@ arc_get_data_buf(arc_buf_t *buf)
         * Update the state size.  Note that ghost states have a
         * "ghost size" and so don't need to be updated.
         */
-       if (!GHOST_STATE(buf->b_hdr->b_l1hdr.b_state)) {
-               arc_buf_hdr_t *hdr = buf->b_hdr;
+       if (!GHOST_STATE(state)) {
 
-               atomic_add_64(&hdr->b_l1hdr.b_state->arcs_size, size);
+               (void) refcount_add_many(&state->arcs_size, size, tag);
 
                /*
                 * If this is reached via arc_read, the link is
@@ -3463,19 +5394,70 @@ arc_get_data_buf(arc_buf_t *buf)
                 */
                if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
                        ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
-                       atomic_add_64(&hdr->b_l1hdr.b_state->arcs_lsize[type],
-                           size);
+                       (void) refcount_add_many(&state->arcs_esize[type],
+                           size, tag);
                }
+
                /*
                 * If we are growing the cache, and we are adding anonymous
                 * data, and we have outgrown arc_p, update arc_p
                 */
                if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
-                   arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
+                   (refcount_count(&arc_anon->arcs_size) +
+                   refcount_count(&arc_mru->arcs_size) > arc_p))
                        arc_p = MIN(arc_c, arc_p + size);
        }
 }
 
+static void
+arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag)
+{
+       arc_free_data_impl(hdr, size, tag);
+       abd_free(abd);
+}
+
+static void
+arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag)
+{
+       arc_buf_contents_t type = arc_buf_type(hdr);
+
+       arc_free_data_impl(hdr, size, tag);
+       if (type == ARC_BUFC_METADATA) {
+               zio_buf_free(buf, size);
+       } else {
+               ASSERT(type == ARC_BUFC_DATA);
+               zio_data_buf_free(buf, size);
+       }
+}
+
+/*
+ * Free the arc data buffer.
+ */
+static void
+arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
+{
+       arc_state_t *state = hdr->b_l1hdr.b_state;
+       arc_buf_contents_t type = arc_buf_type(hdr);
+
+       /* protected by hash lock, if in the hash table */
+       if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
+               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+               ASSERT(state != arc_anon && state != arc_l2c_only);
+
+               (void) refcount_remove_many(&state->arcs_esize[type],
+                   size, tag);
+       }
+       (void) refcount_remove_many(&state->arcs_size, size, tag);
+
+       VERIFY3U(hdr->b_type, ==, type);
+       if (type == ARC_BUFC_METADATA) {
+               arc_space_return(size, ARC_SPACE_META);
+       } else {
+               ASSERT(type == ARC_BUFC_DATA);
+               arc_space_return(size, ARC_SPACE_DATA);
+       }
+}
+
 /*
  * This routine is called whenever a buffer is accessed.
  * NOTE: the hash lock is dropped in this function.
@@ -3517,7 +5499,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                                ASSERT(multilist_link_active(
                                    &hdr->b_l1hdr.b_arc_node));
                        } else {
-                               hdr->b_flags &= ~ARC_FLAG_PREFETCH;
+                               arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
                                atomic_inc_32(&hdr->b_l1hdr.b_mru_hits);
                                ARCSTAT_BUMP(arcstat_mru_hits);
                        }
@@ -3554,7 +5536,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                if (HDR_PREFETCH(hdr)) {
                        new_state = arc_mru;
                        if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
-                               hdr->b_flags &= ~ARC_FLAG_PREFETCH;
+                               arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
                        DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
                } else {
                        new_state = arc_mfu;
@@ -3621,23 +5603,23 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
        }
 }
 
-/* a generic arc_done_func_t which you can use */
+/* a generic arc_read_done_func_t which you can use */
 /* ARGSUSED */
 void
-arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
+arc_bcopy_func(zio_t *zio, int error, arc_buf_t *buf, void *arg)
 {
-       if (zio == NULL || zio->io_error == 0)
-               bcopy(buf->b_data, arg, buf->b_hdr->b_size);
-       VERIFY(arc_buf_remove_ref(buf, arg));
+       if (error == 0)
+               bcopy(buf->b_data, arg, arc_buf_size(buf));
+       arc_buf_destroy(buf, arg);
 }
 
-/* a generic arc_done_func_t */
+/* a generic arc_read_done_func_t */
 void
-arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
+arc_getbuf_func(zio_t *zio, int error, arc_buf_t *buf, void *arg)
 {
        arc_buf_t **bufp = arg;
-       if (zio && zio->io_error) {
-               VERIFY(arc_buf_remove_ref(buf, arg));
+       if (error != 0) {
+               arc_buf_destroy(buf, arg);
                *bufp = NULL;
        } else {
                *bufp = buf;
@@ -3645,18 +5627,33 @@ arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
        }
 }
 
+static void
+arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
+{
+       if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
+               ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
+               ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
+       } else {
+               if (HDR_COMPRESSION_ENABLED(hdr)) {
+                       ASSERT3U(arc_hdr_get_compress(hdr), ==,
+                           BP_GET_COMPRESS(bp));
+               }
+               ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
+               ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
+               ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
+       }
+}
+
 static void
 arc_read_done(zio_t *zio)
 {
-       arc_buf_hdr_t   *hdr;
-       arc_buf_t       *buf;
-       arc_buf_t       *abuf;  /* buffer we're assigning to callback */
+       blkptr_t        *bp = zio->io_bp;
+       arc_buf_hdr_t   *hdr = zio->io_private;
        kmutex_t        *hash_lock = NULL;
-       arc_callback_t  *callback_list, *acb;
-       int             freeable = FALSE;
-
-       buf = zio->io_private;
-       hdr = buf->b_hdr;
+       arc_callback_t  *callback_list;
+       arc_callback_t  *acb;
+       boolean_t       freeable = B_FALSE;
+       boolean_t       no_zio_error = (zio->io_error == 0);
 
        /*
         * The hdr was inserted into hash-table and removed from lists
@@ -3675,37 +5672,56 @@ arc_read_done(zio_t *zio)
                ASSERT3U(hdr->b_dva.dva_word[1], ==,
                    BP_IDENTITY(zio->io_bp)->dva_word[1]);
 
-               found = buf_hash_find(hdr->b_spa, zio->io_bp,
-                   &hash_lock);
+               found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
 
-               ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
-                   hash_lock == NULL) ||
-                   (found == hdr &&
+               ASSERT((found == hdr &&
                    DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
                    (found == hdr && HDR_L2_READING(hdr)));
+               ASSERT3P(hash_lock, !=, NULL);
        }
 
-       hdr->b_flags &= ~ARC_FLAG_L2_EVICTED;
-       if (l2arc_noprefetch && HDR_PREFETCH(hdr))
-               hdr->b_flags &= ~ARC_FLAG_L2CACHE;
+       if (BP_IS_PROTECTED(bp)) {
+               hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
+               hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
+               zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
+                   hdr->b_crypt_hdr.b_iv);
 
-       /* byteswap if necessary */
-       callback_list = hdr->b_l1hdr.b_acb;
-       ASSERT(callback_list != NULL);
-       if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
-               dmu_object_byteswap_t bswap =
-                   DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
-               if (BP_GET_LEVEL(zio->io_bp) > 0)
-                   byteswap_uint64_array(buf->b_data, hdr->b_size);
-               else
-                   dmu_ot_byteswap[bswap].ob_func(buf->b_data, hdr->b_size);
+               if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
+                       void *tmpbuf;
+
+                       tmpbuf = abd_borrow_buf_copy(zio->io_abd,
+                           sizeof (zil_chain_t));
+                       zio_crypt_decode_mac_zil(tmpbuf,
+                           hdr->b_crypt_hdr.b_mac);
+                       abd_return_buf(zio->io_abd, tmpbuf,
+                           sizeof (zil_chain_t));
+               } else {
+                       zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
+               }
        }
 
-       arc_cksum_compute(buf, B_FALSE);
-       arc_buf_watch(buf);
+       if (no_zio_error) {
+               /* byteswap if necessary */
+               if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
+                       if (BP_GET_LEVEL(zio->io_bp) > 0) {
+                               hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
+                       } else {
+                               hdr->b_l1hdr.b_byteswap =
+                                   DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
+                       }
+               } else {
+                       hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
+               }
+       }
+
+       arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
+       if (l2arc_noprefetch && HDR_PREFETCH(hdr))
+               arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
 
-       if (hash_lock && zio->io_error == 0 &&
-           hdr->b_l1hdr.b_state == arc_anon) {
+       callback_list = hdr->b_l1hdr.b_acb;
+       ASSERT3P(callback_list, !=, NULL);
+
+       if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) {
                /*
                 * Only call arc_access on anonymous buffers.  This is because
                 * if we've issued an I/O for an evicted buffer, we've already
@@ -3715,32 +5731,65 @@ arc_read_done(zio_t *zio)
                arc_access(hdr, hash_lock);
        }
 
-       /* create copies of the data buffer for the callers */
-       abuf = buf;
-       for (acb = callback_list; acb; acb = acb->acb_next) {
-               if (acb->acb_done) {
-                       if (abuf == NULL) {
-                               ARCSTAT_BUMP(arcstat_duplicate_reads);
-                               abuf = arc_buf_clone(buf);
+       /*
+        * If a read request has a callback (i.e. acb_done is not NULL), then we
+        * make a buf containing the data according to the parameters which were
+        * passed in. The implementation of arc_buf_alloc_impl() ensures that we
+        * aren't needlessly decompressing the data multiple times.
+        */
+       int callback_cnt = 0;
+       for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
+               if (!acb->acb_done)
+                       continue;
+
+               /* This is a demand read since prefetches don't use callbacks */
+               callback_cnt++;
+
+               int error = arc_buf_alloc_impl(hdr, zio->io_spa,
+                   zio->io_bookmark.zb_objset, acb->acb_private,
+                   acb->acb_encrypted, acb->acb_compressed, acb->acb_noauth,
+                   no_zio_error, &acb->acb_buf);
+
+               /*
+                * assert non-speculative zios didn't fail because an
+                * encryption key wasn't loaded
+                */
+               ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
+                   error == 0 || error != ENOENT);
+
+               /*
+                * If we failed to decrypt, report an error now (as the zio
+                * layer would have done if it had done the transforms).
+                */
+               if (error == ECKSUM) {
+                       ASSERT(BP_IS_PROTECTED(bp));
+                       error = SET_ERROR(EIO);
+                       spa_log_error(zio->io_spa, &zio->io_bookmark);
+                       if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
+                               zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
+                                   zio->io_spa, NULL, &zio->io_bookmark, zio,
+                                   0, 0);
                        }
-                       acb->acb_buf = abuf;
-                       abuf = NULL;
+               }
+
+               if (no_zio_error) {
+                       zio->io_error = error;
                }
        }
        hdr->b_l1hdr.b_acb = NULL;
-       hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
-       ASSERT(!HDR_BUF_AVAILABLE(hdr));
-       if (abuf == buf) {
-               ASSERT(buf->b_efunc == NULL);
-               ASSERT(hdr->b_l1hdr.b_datacnt == 1);
-               hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
+       arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
+       if (callback_cnt == 0) {
+               ASSERT(HDR_PREFETCH(hdr) || HDR_HAS_RABD(hdr));
+               ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
        }
 
        ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
            callback_list != NULL);
 
-       if (zio->io_error != 0) {
-               hdr->b_flags |= ARC_FLAG_IO_ERROR;
+       if (no_zio_error) {
+               arc_hdr_verify(hdr, zio->io_bp);
+       } else {
+               arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
                if (hdr->b_l1hdr.b_state != arc_anon)
                        arc_change_state(arc_anon, hdr, hash_lock);
                if (HDR_IN_HASH_TABLE(hdr))
@@ -3770,8 +5819,10 @@ arc_read_done(zio_t *zio)
 
        /* execute each callback and free its structure */
        while ((acb = callback_list) != NULL) {
-               if (acb->acb_done)
-                       acb->acb_done(zio, acb->acb_buf, acb->acb_private);
+               if (acb->acb_done) {
+                       acb->acb_done(zio, zio->io_error, acb->acb_buf,
+                           acb->acb_private);
+               }
 
                if (acb->acb_zio_dummy != NULL) {
                        acb->acb_zio_dummy->io_error = zio->io_error;
@@ -3805,15 +5856,19 @@ arc_read_done(zio_t *zio)
  * for readers of this block.
  */
 int
-arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
-    void *private, zio_priority_t priority, int zio_flags,
-    arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
+arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
+    arc_read_done_func_t *done, void *private, zio_priority_t priority,
+    int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
 {
        arc_buf_hdr_t *hdr = NULL;
-       arc_buf_t *buf = NULL;
        kmutex_t *hash_lock = NULL;
        zio_t *rzio;
        uint64_t guid = spa_load_guid(spa);
+       boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
+       boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
+           (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
+       boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
+           (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
        int rc = 0;
 
        ASSERT(!BP_IS_EMBEDDED(bp) ||
@@ -3828,12 +5883,51 @@ top:
                hdr = buf_hash_find(guid, bp, &hash_lock);
        }
 
-       if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_datacnt > 0) {
-
+       /*
+        * Determine if we have an L1 cache hit or a cache miss. For simplicity
+        * we maintain encrypted data seperately from compressed / uncompressed
+        * data. If the user is requesting raw encrypted data and we don't have
+        * that in the header we will read from disk to guarantee that we can
+        * get it even if the encryption keys aren't loaded.
+        */
+       if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
+           (hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
+               arc_buf_t *buf = NULL;
                *arc_flags |= ARC_FLAG_CACHED;
 
                if (HDR_IO_IN_PROGRESS(hdr)) {
 
+                       if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
+                           priority == ZIO_PRIORITY_SYNC_READ) {
+                               /*
+                                * This sync read must wait for an
+                                * in-progress async read (e.g. a predictive
+                                * prefetch).  Async reads are queued
+                                * separately at the vdev_queue layer, so
+                                * this is a form of priority inversion.
+                                * Ideally, we would "inherit" the demand
+                                * i/o's priority by moving the i/o from
+                                * the async queue to the synchronous queue,
+                                * but there is currently no mechanism to do
+                                * so.  Track this so that we can evaluate
+                                * the magnitude of this potential performance
+                                * problem.
+                                *
+                                * Note that if the prefetch i/o is already
+                                * active (has been issued to the device),
+                                * the prefetch improved performance, because
+                                * we issued it sooner than we would have
+                                * without the prefetch.
+                                */
+                               DTRACE_PROBE1(arc__sync__wait__for__async,
+                                   arc_buf_hdr_t *, hdr);
+                               ARCSTAT_BUMP(arcstat_sync_wait_for_async);
+                       }
+                       if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
+                               arc_hdr_clear_flags(hdr,
+                                   ARC_FLAG_PREDICTIVE_PREFETCH);
+                       }
+
                        if (*arc_flags & ARC_FLAG_WAIT) {
                                cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
                                mutex_exit(hash_lock);
@@ -3842,20 +5936,20 @@ top:
                        ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
 
                        if (done) {
-                               arc_callback_t  *acb = NULL;
+                               arc_callback_t *acb = NULL;
 
                                acb = kmem_zalloc(sizeof (arc_callback_t),
                                    KM_SLEEP);
                                acb->acb_done = done;
                                acb->acb_private = private;
+                               acb->acb_compressed = compressed_read;
                                if (pio != NULL)
                                        acb->acb_zio_dummy = zio_null(pio,
                                            spa, NULL, NULL, NULL, zio_flags);
 
-                               ASSERT(acb->acb_done != NULL);
+                               ASSERT3P(acb->acb_done, !=, NULL);
                                acb->acb_next = hdr->b_l1hdr.b_acb;
                                hdr->b_l1hdr.b_acb = acb;
-                               add_reference(hdr, hash_lock, private);
                                mutex_exit(hash_lock);
                                goto out;
                        }
@@ -3867,32 +5961,37 @@ top:
                    hdr->b_l1hdr.b_state == arc_mfu);
 
                if (done) {
-                       add_reference(hdr, hash_lock, private);
-                       /*
-                        * If this block is already in use, create a new
-                        * copy of the data so that we will be guaranteed
-                        * that arc_release() will always succeed.
-                        */
-                       buf = hdr->b_l1hdr.b_buf;
-                       ASSERT(buf);
-                       ASSERT(buf->b_data);
-                       if (HDR_BUF_AVAILABLE(hdr)) {
-                               ASSERT(buf->b_efunc == NULL);
-                               hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
-                       } else {
-                               buf = arc_buf_clone(buf);
+                       if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
+                               /*
+                                * This is a demand read which does not have to
+                                * wait for i/o because we did a predictive
+                                * prefetch i/o for it, which has completed.
+                                */
+                               DTRACE_PROBE1(
+                                   arc__demand__hit__predictive__prefetch,
+                                   arc_buf_hdr_t *, hdr);
+                               ARCSTAT_BUMP(
+                                   arcstat_demand_hit_predictive_prefetch);
+                               arc_hdr_clear_flags(hdr,
+                                   ARC_FLAG_PREDICTIVE_PREFETCH);
                        }
+                       ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp));
+
+                       /* Get a buf with the desired data in it. */
+                       rc = arc_buf_alloc_impl(hdr, spa, zb->zb_objset,
+                           private, encrypted_read, compressed_read,
+                           noauth_read, B_TRUE, &buf);
 
+                       ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
+                           rc == 0 || rc != ENOENT);
                } else if (*arc_flags & ARC_FLAG_PREFETCH &&
                    refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
-                       hdr->b_flags |= ARC_FLAG_PREFETCH;
+                       arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
                }
                DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
                arc_access(hdr, hash_lock);
                if (*arc_flags & ARC_FLAG_L2CACHE)
-                       hdr->b_flags |= ARC_FLAG_L2CACHE;
-               if (*arc_flags & ARC_FLAG_L2COMPRESS)
-                       hdr->b_flags |= ARC_FLAG_L2COMPRESS;
+                       arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
                mutex_exit(hash_lock);
                ARCSTAT_BUMP(arcstat_hits);
                ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
@@ -3900,29 +5999,23 @@ top:
                    data, metadata, hits);
 
                if (done)
-                       done(NULL, buf, private);
+                       done(NULL, rc, buf, private);
        } else {
-               uint64_t size = BP_GET_LSIZE(bp);
+               uint64_t lsize = BP_GET_LSIZE(bp);
+               uint64_t psize = BP_GET_PSIZE(bp);
                arc_callback_t *acb;
                vdev_t *vd = NULL;
                uint64_t addr = 0;
                boolean_t devw = B_FALSE;
-               enum zio_compress b_compress = ZIO_COMPRESS_OFF;
-               int32_t b_asize = 0;
+               uint64_t size;
+               void *hdr_abd;
 
                /*
                 * Gracefully handle a damaged logical block size as a
-                * checksum error by passing a dummy zio to the done callback.
+                * checksum error.
                 */
-               if (size > spa_maxblocksize(spa)) {
-                       if (done) {
-                               rzio = zio_null(pio, spa, NULL,
-                                   NULL, NULL, zio_flags);
-                               rzio->io_error = ECKSUM;
-                               done(rzio, buf, private);
-                               zio_nowait(rzio);
-                       }
-                       rc = ECKSUM;
+               if (lsize > spa_maxblocksize(spa)) {
+                       rc = SET_ERROR(ECKSUM);
                        goto out;
                }
 
@@ -3930,8 +6023,10 @@ top:
                        /* this block is not in the cache */
                        arc_buf_hdr_t *exists = NULL;
                        arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
-                       buf = arc_buf_alloc(spa, size, private, type);
-                       hdr = buf->b_hdr;
+                       hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
+                           BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), type,
+                           encrypted_read);
+
                        if (!BP_IS_EMBEDDED(bp)) {
                                hdr->b_dva = *BP_IDENTITY(bp);
                                hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
@@ -3941,76 +6036,110 @@ top:
                                /* somebody beat us to the hash insert */
                                mutex_exit(hash_lock);
                                buf_discard_identity(hdr);
-                               (void) arc_buf_remove_ref(buf, private);
+                               arc_hdr_destroy(hdr);
                                goto top; /* restart the IO request */
                        }
-
-                       /* if this is a prefetch, we don't have a reference */
-                       if (*arc_flags & ARC_FLAG_PREFETCH) {
-                               (void) remove_reference(hdr, hash_lock,
-                                   private);
-                               hdr->b_flags |= ARC_FLAG_PREFETCH;
-                       }
-                       if (*arc_flags & ARC_FLAG_L2CACHE)
-                               hdr->b_flags |= ARC_FLAG_L2CACHE;
-                       if (*arc_flags & ARC_FLAG_L2COMPRESS)
-                               hdr->b_flags |= ARC_FLAG_L2COMPRESS;
-                       if (BP_GET_LEVEL(bp) > 0)
-                               hdr->b_flags |= ARC_FLAG_INDIRECT;
                } else {
                        /*
-                        * This block is in the ghost cache. If it was L2-only
-                        * (and thus didn't have an L1 hdr), we realloc the
-                        * header to add an L1 hdr.
+                        * This block is in the ghost cache or encrypted data
+                        * was requested and we didn't have it. If it was
+                        * L2-only (and thus didn't have an L1 hdr),
+                        * we realloc the header to add an L1 hdr.
                         */
                        if (!HDR_HAS_L1HDR(hdr)) {
                                hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
                                    hdr_full_cache);
                        }
 
-                       ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
-                       ASSERT(!HDR_IO_IN_PROGRESS(hdr));
-                       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
-                       ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+                       if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
+                               ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+                               ASSERT(!HDR_HAS_RABD(hdr));
+                               ASSERT(!HDR_IO_IN_PROGRESS(hdr));
+                               ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
+                               ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+                               ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+                       } else if (HDR_IO_IN_PROGRESS(hdr)) {
+                               /*
+                                * If this header already had an IO in progress
+                                * and we are performing another IO to fetch
+                                * encrypted data we must wait until the first
+                                * IO completes so as not to confuse
+                                * arc_read_done(). This should be very rare
+                                * and so the performance impact shouldn't
+                                * matter.
+                                */
+                               cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
+                               mutex_exit(hash_lock);
+                               goto top;
+                       }
 
-                       /* if this is a prefetch, we don't have a reference */
-                       if (*arc_flags & ARC_FLAG_PREFETCH)
-                               hdr->b_flags |= ARC_FLAG_PREFETCH;
-                       else
-                               add_reference(hdr, hash_lock, private);
-                       if (*arc_flags & ARC_FLAG_L2CACHE)
-                               hdr->b_flags |= ARC_FLAG_L2CACHE;
-                       if (*arc_flags & ARC_FLAG_L2COMPRESS)
-                               hdr->b_flags |= ARC_FLAG_L2COMPRESS;
-                       buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
-                       buf->b_hdr = hdr;
-                       buf->b_data = NULL;
-                       buf->b_efunc = NULL;
-                       buf->b_private = NULL;
-                       buf->b_next = NULL;
-                       hdr->b_l1hdr.b_buf = buf;
-                       ASSERT0(hdr->b_l1hdr.b_datacnt);
-                       hdr->b_l1hdr.b_datacnt = 1;
-                       arc_get_data_buf(buf);
+                       /*
+                        * This is a delicate dance that we play here.
+                        * This hdr might be in the ghost list so we access
+                        * it to move it out of the ghost list before we
+                        * initiate the read. If it's a prefetch then
+                        * it won't have a callback so we'll remove the
+                        * reference that arc_buf_alloc_impl() created. We
+                        * do this after we've called arc_access() to
+                        * avoid hitting an assert in remove_reference().
+                        */
                        arc_access(hdr, hash_lock);
+                       arc_hdr_alloc_abd(hdr, encrypted_read);
+               }
+
+               if (encrypted_read) {
+                       ASSERT(HDR_HAS_RABD(hdr));
+                       size = HDR_GET_PSIZE(hdr);
+                       hdr_abd = hdr->b_crypt_hdr.b_rabd;
+                       zio_flags |= ZIO_FLAG_RAW;
+               } else {
+                       ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
+                       size = arc_hdr_size(hdr);
+                       hdr_abd = hdr->b_l1hdr.b_pabd;
+
+                       if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
+                               zio_flags |= ZIO_FLAG_RAW_COMPRESS;
+                       }
+
+                       /*
+                        * For authenticated bp's, we do not ask the ZIO layer
+                        * to authenticate them since this will cause the entire
+                        * IO to fail if the key isn't loaded. Instead, we
+                        * defer authentication until arc_buf_fill(), which will
+                        * verify the data when the key is available.
+                        */
+                       if (BP_IS_AUTHENTICATED(bp))
+                               zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
                }
 
+               if (*arc_flags & ARC_FLAG_PREFETCH &&
+                   refcount_is_zero(&hdr->b_l1hdr.b_refcnt))
+                       arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
+               if (*arc_flags & ARC_FLAG_L2CACHE)
+                       arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
+               if (BP_IS_AUTHENTICATED(bp))
+                       arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
+               if (BP_GET_LEVEL(bp) > 0)
+                       arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
+               if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
+                       arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
                ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
 
                acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
                acb->acb_done = done;
                acb->acb_private = private;
+               acb->acb_compressed = compressed_read;
+               acb->acb_encrypted = encrypted_read;
+               acb->acb_noauth = noauth_read;
 
-               ASSERT(hdr->b_l1hdr.b_acb == NULL);
+               ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
                hdr->b_l1hdr.b_acb = acb;
-               hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
+               arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
 
                if (HDR_HAS_L2HDR(hdr) &&
                    (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
                        devw = hdr->b_l2hdr.b_dev->l2ad_writing;
                        addr = hdr->b_l2hdr.b_daddr;
-                       b_compress = HDR_GET_COMPRESS(hdr);
-                       b_asize = hdr->b_l2hdr.b_asize;
                        /*
                         * Lock out device removal.
                         */
@@ -4019,6 +6148,11 @@ top:
                                vd = NULL;
                }
 
+               if (priority == ZIO_PRIORITY_ASYNC_READ)
+                       arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
+               else
+                       arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
+
                if (hash_lock != NULL)
                        mutex_exit(hash_lock);
 
@@ -4026,9 +6160,10 @@ top:
                 * At this point, we have a level 1 cache miss.  Try again in
                 * L2ARC if possible.
                 */
-               ASSERT3U(hdr->b_size, ==, size);
+               ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
+
                DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
-                   uint64_t, size, zbookmark_phys_t *, zb);
+                   uint64_t, lsize, zbookmark_phys_t *, zb);
                ARCSTAT_BUMP(arcstat_misses);
                ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
                    demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
@@ -4048,6 +6183,8 @@ top:
                            !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
                            !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
                                l2arc_read_callback_t *cb;
+                               abd_t *abd;
+                               uint64_t asize;
 
                                DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
                                ARCSTAT_BUMP(arcstat_l2_hits);
@@ -4055,15 +6192,22 @@ top:
 
                                cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
                                    KM_SLEEP);
-                               cb->l2rcb_buf = buf;
-                               cb->l2rcb_spa = spa;
+                               cb->l2rcb_hdr = hdr;
                                cb->l2rcb_bp = *bp;
                                cb->l2rcb_zb = *zb;
                                cb->l2rcb_flags = zio_flags;
-                               cb->l2rcb_compress = b_compress;
+
+                               asize = vdev_psize_to_asize(vd, size);
+                               if (asize != size) {
+                                       abd = abd_alloc_for_io(asize,
+                                           HDR_ISTYPE_METADATA(hdr));
+                                       cb->l2rcb_abd = abd;
+                               } else {
+                                       abd = hdr_abd;
+                               }
 
                                ASSERT(addr >= VDEV_LABEL_START_SIZE &&
-                                   addr + size < vd->vdev_psize -
+                                   addr + asize <= vd->vdev_psize -
                                    VDEV_LABEL_END_SIZE);
 
                                /*
@@ -4072,26 +6216,21 @@ top:
                                 * Issue a null zio if the underlying buffer
                                 * was squashed to zero size by compression.
                                 */
-                               if (b_compress == ZIO_COMPRESS_EMPTY) {
-                                       rzio = zio_null(pio, spa, vd,
-                                           l2arc_read_done, cb,
-                                           zio_flags | ZIO_FLAG_DONT_CACHE |
-                                           ZIO_FLAG_CANFAIL |
-                                           ZIO_FLAG_DONT_PROPAGATE |
-                                           ZIO_FLAG_DONT_RETRY);
-                               } else {
-                                       rzio = zio_read_phys(pio, vd, addr,
-                                           b_asize, buf->b_data,
-                                           ZIO_CHECKSUM_OFF,
-                                           l2arc_read_done, cb, priority,
-                                           zio_flags | ZIO_FLAG_DONT_CACHE |
-                                           ZIO_FLAG_CANFAIL |
-                                           ZIO_FLAG_DONT_PROPAGATE |
-                                           ZIO_FLAG_DONT_RETRY, B_FALSE);
-                               }
+                               ASSERT3U(arc_hdr_get_compress(hdr), !=,
+                                   ZIO_COMPRESS_EMPTY);
+                               rzio = zio_read_phys(pio, vd, addr,
+                                   asize, abd,
+                                   ZIO_CHECKSUM_OFF,
+                                   l2arc_read_done, cb, priority,
+                                   zio_flags | ZIO_FLAG_DONT_CACHE |
+                                   ZIO_FLAG_CANFAIL |
+                                   ZIO_FLAG_DONT_PROPAGATE |
+                                   ZIO_FLAG_DONT_RETRY, B_FALSE);
+
                                DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
                                    zio_t *, rzio);
-                               ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
+                               ARCSTAT_INCR(arcstat_l2_read_bytes,
+                                   HDR_GET_PSIZE(hdr));
 
                                if (*arc_flags & ARC_FLAG_NOWAIT) {
                                        zio_nowait(rzio);
@@ -4121,8 +6260,8 @@ top:
                        }
                }
 
-               rzio = zio_read(pio, spa, bp, buf->b_data, size,
-                   arc_read_done, buf, priority, zio_flags, zb);
+               rzio = zio_read(pio, spa, bp, hdr_abd, size,
+                   arc_read_done, hdr, priority, zio_flags, zb);
 
                if (*arc_flags & ARC_FLAG_WAIT) {
                        rc = zio_wait(rzio);
@@ -4160,27 +6299,19 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private)
 void
 arc_remove_prune_callback(arc_prune_t *p)
 {
+       boolean_t wait = B_FALSE;
        mutex_enter(&arc_prune_mtx);
        list_remove(&arc_prune_list, p);
-       if (refcount_remove(&p->p_refcnt, &arc_prune_list) == 0) {
-               refcount_destroy(&p->p_refcnt);
-               kmem_free(p, sizeof (*p));
-       }
+       if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
+               wait = B_TRUE;
        mutex_exit(&arc_prune_mtx);
-}
-
-void
-arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
-{
-       ASSERT(buf->b_hdr != NULL);
-       ASSERT(buf->b_hdr->b_l1hdr.b_state != arc_anon);
-       ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt) ||
-           func == NULL);
-       ASSERT(buf->b_efunc == NULL);
-       ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
 
-       buf->b_efunc = func;
-       buf->b_private = private;
+       /* wait for arc_prune_task to finish */
+       if (wait)
+               taskq_wait_outstanding(arc_prune_taskq, 0);
+       ASSERT0(refcount_count(&p->p_refcnt));
+       refcount_destroy(&p->p_refcnt);
+       kmem_free(p, sizeof (*p));
 }
 
 /*
@@ -4198,85 +6329,38 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
        hdr = buf_hash_find(guid, bp, &hash_lock);
        if (hdr == NULL)
                return;
-       if (HDR_BUF_AVAILABLE(hdr)) {
-               arc_buf_t *buf = hdr->b_l1hdr.b_buf;
-               add_reference(hdr, hash_lock, FTAG);
-               hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
-               mutex_exit(hash_lock);
 
-               arc_release(buf, FTAG);
-               (void) arc_buf_remove_ref(buf, FTAG);
-       } else {
+       /*
+        * We might be trying to free a block that is still doing I/O
+        * (i.e. prefetch) or has a reference (i.e. a dedup-ed,
+        * dmu_sync-ed block). If this block is being prefetched, then it
+        * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
+        * until the I/O completes. A block may also have a reference if it is
+        * part of a dedup-ed, dmu_synced write. The dmu_sync() function would
+        * have written the new block to its final resting place on disk but
+        * without the dedup flag set. This would have left the hdr in the MRU
+        * state and discoverable. When the txg finally syncs it detects that
+        * the block was overridden in open context and issues an override I/O.
+        * Since this is a dedup block, the override I/O will determine if the
+        * block is already in the DDT. If so, then it will replace the io_bp
+        * with the bp from the DDT and allow the I/O to finish. When the I/O
+        * reaches the done callback, dbuf_write_override_done, it will
+        * check to see if the io_bp and io_bp_override are identical.
+        * If they are not, then it indicates that the bp was replaced with
+        * the bp in the DDT and the override bp is freed. This allows
+        * us to arrive here with a reference on a block that is being
+        * freed. So if we have an I/O in progress, or a reference to
+        * this hdr, then we don't destroy the hdr.
+        */
+       if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
+           refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
+               arc_change_state(arc_anon, hdr, hash_lock);
+               arc_hdr_destroy(hdr);
                mutex_exit(hash_lock);
-       }
-
-}
-
-/*
- * Clear the user eviction callback set by arc_set_callback(), first calling
- * it if it exists.  Because the presence of a callback keeps an arc_buf cached
- * clearing the callback may result in the arc_buf being destroyed.  However,
- * it will not result in the *last* arc_buf being destroyed, hence the data
- * will remain cached in the ARC. We make a copy of the arc buffer here so
- * that we can process the callback without holding any locks.
- *
- * It's possible that the callback is already in the process of being cleared
- * by another thread.  In this case we can not clear the callback.
- *
- * Returns B_TRUE if the callback was successfully called and cleared.
- */
-boolean_t
-arc_clear_callback(arc_buf_t *buf)
-{
-       arc_buf_hdr_t *hdr;
-       kmutex_t *hash_lock;
-       arc_evict_func_t *efunc = buf->b_efunc;
-       void *private = buf->b_private;
-
-       mutex_enter(&buf->b_evict_lock);
-       hdr = buf->b_hdr;
-       if (hdr == NULL) {
-               /*
-                * We are in arc_do_user_evicts().
-                */
-               ASSERT(buf->b_data == NULL);
-               mutex_exit(&buf->b_evict_lock);
-               return (B_FALSE);
-       } else if (buf->b_data == NULL) {
-               /*
-                * We are on the eviction list; process this buffer now
-                * but let arc_do_user_evicts() do the reaping.
-                */
-               buf->b_efunc = NULL;
-               mutex_exit(&buf->b_evict_lock);
-               VERIFY0(efunc(private));
-               return (B_TRUE);
-       }
-       hash_lock = HDR_LOCK(hdr);
-       mutex_enter(hash_lock);
-       hdr = buf->b_hdr;
-       ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
-
-       ASSERT3U(refcount_count(&hdr->b_l1hdr.b_refcnt), <,
-           hdr->b_l1hdr.b_datacnt);
-       ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
-           hdr->b_l1hdr.b_state == arc_mfu);
-
-       buf->b_efunc = NULL;
-       buf->b_private = NULL;
-
-       if (hdr->b_l1hdr.b_datacnt > 1) {
-               mutex_exit(&buf->b_evict_lock);
-               arc_buf_destroy(buf, TRUE);
        } else {
-               ASSERT(buf == hdr->b_l1hdr.b_buf);
-               hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
-               mutex_exit(&buf->b_evict_lock);
+               mutex_exit(hash_lock);
        }
 
-       mutex_exit(hash_lock);
-       VERIFY0(efunc(private));
-       return (B_TRUE);
 }
 
 /*
@@ -4312,16 +6396,19 @@ arc_release(arc_buf_t *buf, void *tag)
                ASSERT(!HDR_IO_IN_PROGRESS(hdr));
                ASSERT(!HDR_IN_HASH_TABLE(hdr));
                ASSERT(!HDR_HAS_L2HDR(hdr));
-               ASSERT(BUF_EMPTY(hdr));
+               ASSERT(HDR_EMPTY(hdr));
 
-               ASSERT3U(hdr->b_l1hdr.b_datacnt, ==, 1);
+               ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
                ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
                ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
 
-               ASSERT3P(buf->b_efunc, ==, NULL);
-               ASSERT3P(buf->b_private, ==, NULL);
-
                hdr->b_l1hdr.b_arc_access = 0;
+
+               /*
+                * If the buf is being overridden then it may already
+                * have a hdr that is not empty.
+                */
+               buf_discard_identity(hdr);
                arc_buf_thaw(buf);
 
                return;
@@ -4340,100 +6427,148 @@ arc_release(arc_buf_t *buf, void *tag)
        ASSERT3P(state, !=, arc_anon);
 
        /* this buffer is not on any list */
-       ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) > 0);
+       ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
 
        if (HDR_HAS_L2HDR(hdr)) {
-               ARCSTAT_INCR(arcstat_l2_asize, -hdr->b_l2hdr.b_asize);
-               ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
-
                mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
-               list_remove(&hdr->b_l2hdr.b_dev->l2ad_buflist, hdr);
 
                /*
-                * We don't want to leak the b_tmp_cdata buffer that was
-                * allocated in l2arc_write_buffers()
+                * We have to recheck this conditional again now that
+                * we're holding the l2ad_mtx to prevent a race with
+                * another thread which might be concurrently calling
+                * l2arc_evict(). In that case, l2arc_evict() might have
+                * destroyed the header's L2 portion as we were waiting
+                * to acquire the l2ad_mtx.
                 */
-               arc_buf_l2_cdata_free(hdr);
+               if (HDR_HAS_L2HDR(hdr))
+                       arc_hdr_l2hdr_destroy(hdr);
 
                mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
-
-               hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
        }
 
        /*
         * Do we have more than one buf?
         */
-       if (hdr->b_l1hdr.b_datacnt > 1) {
+       if (hdr->b_l1hdr.b_bufcnt > 1) {
                arc_buf_hdr_t *nhdr;
-               arc_buf_t **bufp;
-               uint64_t blksz = hdr->b_size;
                uint64_t spa = hdr->b_spa;
+               uint64_t psize = HDR_GET_PSIZE(hdr);
+               uint64_t lsize = HDR_GET_LSIZE(hdr);
+               boolean_t protected = HDR_PROTECTED(hdr);
+               enum zio_compress compress = arc_hdr_get_compress(hdr);
                arc_buf_contents_t type = arc_buf_type(hdr);
-               uint32_t flags = hdr->b_flags;
+               VERIFY3U(hdr->b_type, ==, type);
 
                ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
+               (void) remove_reference(hdr, hash_lock, tag);
+
+               if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
+                       ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
+                       ASSERT(ARC_BUF_LAST(buf));
+               }
+
                /*
                 * Pull the data off of this hdr and attach it to
-                * a new anonymous hdr.
+                * a new anonymous hdr. Also find the last buffer
+                * in the hdr's buffer list.
                 */
-               (void) remove_reference(hdr, hash_lock, tag);
-               bufp = &hdr->b_l1hdr.b_buf;
-               while (*bufp != buf)
-                       bufp = &(*bufp)->b_next;
-               *bufp = buf->b_next;
-               buf->b_next = NULL;
+               arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
+               ASSERT3P(lastbuf, !=, NULL);
+
+               /*
+                * If the current arc_buf_t and the hdr are sharing their data
+                * buffer, then we must stop sharing that block.
+                */
+               if (arc_buf_is_shared(buf)) {
+                       ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
+                       VERIFY(!arc_buf_is_shared(lastbuf));
+
+                       /*
+                        * First, sever the block sharing relationship between
+                        * buf and the arc_buf_hdr_t.
+                        */
+                       arc_unshare_buf(hdr, buf);
+
+                       /*
+                        * Now we need to recreate the hdr's b_pabd. Since we
+                        * have lastbuf handy, we try to share with it, but if
+                        * we can't then we allocate a new b_pabd and copy the
+                        * data from buf into it.
+                        */
+                       if (arc_can_share(hdr, lastbuf)) {
+                               arc_share_buf(hdr, lastbuf);
+                       } else {
+                               arc_hdr_alloc_abd(hdr, B_FALSE);
+                               abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
+                                   buf->b_data, psize);
+                       }
+                       VERIFY3P(lastbuf->b_data, !=, NULL);
+               } else if (HDR_SHARED_DATA(hdr)) {
+                       /*
+                        * Uncompressed shared buffers are always at the end
+                        * of the list. Compressed buffers don't have the
+                        * same requirements. This makes it hard to
+                        * simply assert that the lastbuf is shared so
+                        * we rely on the hdr's compression flags to determine
+                        * if we have a compressed, shared buffer.
+                        */
+                       ASSERT(arc_buf_is_shared(lastbuf) ||
+                           arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
+                       ASSERT(!ARC_BUF_SHARED(buf));
+               }
 
+               ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
                ASSERT3P(state, !=, arc_l2c_only);
-               ASSERT3U(state->arcs_size, >=, hdr->b_size);
-               atomic_add_64(&state->arcs_size, -hdr->b_size);
-               if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
-                       uint64_t *size;
 
+               (void) refcount_remove_many(&state->arcs_size,
+                   arc_buf_size(buf), buf);
+
+               if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
                        ASSERT3P(state, !=, arc_l2c_only);
-                       size = &state->arcs_lsize[type];
-                       ASSERT3U(*size, >=, hdr->b_size);
-                       atomic_add_64(size, -hdr->b_size);
+                       (void) refcount_remove_many(&state->arcs_esize[type],
+                           arc_buf_size(buf), buf);
                }
 
-               /*
-                * We're releasing a duplicate user data buffer, update
-                * our statistics accordingly.
-                */
-               if (HDR_ISTYPE_DATA(hdr)) {
-                       ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
-                       ARCSTAT_INCR(arcstat_duplicate_buffers_size,
-                           -hdr->b_size);
-               }
-               hdr->b_l1hdr.b_datacnt -= 1;
+               hdr->b_l1hdr.b_bufcnt -= 1;
+               if (ARC_BUF_ENCRYPTED(buf))
+                       hdr->b_crypt_hdr.b_ebufcnt -= 1;
+
                arc_cksum_verify(buf);
                arc_buf_unwatch(buf);
 
+               /* if this is the last uncompressed buf free the checksum */
+               if (!arc_hdr_has_uncompressed_buf(hdr))
+                       arc_cksum_free(hdr);
+
                mutex_exit(hash_lock);
 
-               nhdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
-               nhdr->b_size = blksz;
-               nhdr->b_spa = spa;
+               /*
+                * Allocate a new hdr. The new hdr will contain a b_pabd
+                * buffer which will be freed in arc_write().
+                */
+               nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
+                   compress, type, HDR_HAS_RABD(hdr));
+               ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
+               ASSERT0(nhdr->b_l1hdr.b_bufcnt);
+               ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
+               VERIFY3U(nhdr->b_type, ==, type);
+               ASSERT(!HDR_SHARED_DATA(nhdr));
 
+               nhdr->b_l1hdr.b_buf = buf;
+               nhdr->b_l1hdr.b_bufcnt = 1;
+               if (ARC_BUF_ENCRYPTED(buf))
+                       nhdr->b_crypt_hdr.b_ebufcnt = 1;
                nhdr->b_l1hdr.b_mru_hits = 0;
                nhdr->b_l1hdr.b_mru_ghost_hits = 0;
                nhdr->b_l1hdr.b_mfu_hits = 0;
                nhdr->b_l1hdr.b_mfu_ghost_hits = 0;
                nhdr->b_l1hdr.b_l2_hits = 0;
-               nhdr->b_flags = flags & ARC_FLAG_L2_WRITING;
-               nhdr->b_flags |= arc_bufc_to_flags(type);
-               nhdr->b_flags |= ARC_FLAG_HAS_L1HDR;
-
-               nhdr->b_l1hdr.b_buf = buf;
-               nhdr->b_l1hdr.b_datacnt = 1;
-               nhdr->b_l1hdr.b_state = arc_anon;
-               nhdr->b_l1hdr.b_arc_access = 0;
-               nhdr->b_l1hdr.b_tmp_cdata = NULL;
-               nhdr->b_freeze_cksum = NULL;
-
                (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
                buf->b_hdr = nhdr;
+
                mutex_exit(&buf->b_evict_lock);
-               atomic_add_64(&arc_anon->arcs_size, blksz);
+               (void) refcount_add_many(&arc_anon->arcs_size,
+                   HDR_GET_LSIZE(nhdr), buf);
        } else {
                mutex_exit(&buf->b_evict_lock);
                ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
@@ -4447,13 +6582,11 @@ arc_release(arc_buf_t *buf, void *tag)
                hdr->b_l1hdr.b_l2_hits = 0;
                arc_change_state(arc_anon, hdr, hash_lock);
                hdr->b_l1hdr.b_arc_access = 0;
-               mutex_exit(hash_lock);
 
+               mutex_exit(hash_lock);
                buf_discard_identity(hdr);
                arc_buf_thaw(buf);
        }
-       buf->b_efunc = NULL;
-       buf->b_private = NULL;
 }
 
 int
@@ -4487,28 +6620,147 @@ arc_write_ready(zio_t *zio)
        arc_write_callback_t *callback = zio->io_private;
        arc_buf_t *buf = callback->awcb_buf;
        arc_buf_hdr_t *hdr = buf->b_hdr;
+       blkptr_t *bp = zio->io_bp;
+       uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
+       enum zio_compress compress;
+       fstrans_cookie_t cookie = spl_fstrans_mark();
 
        ASSERT(HDR_HAS_L1HDR(hdr));
        ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
-       ASSERT(hdr->b_l1hdr.b_datacnt > 0);
+       ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
+
+       /*
+        * If we're reexecuting this zio because the pool suspended, then
+        * cleanup any state that was previously set the first time the
+        * callback was invoked.
+        */
+       if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
+               arc_cksum_free(hdr);
+               arc_buf_unwatch(buf);
+               if (hdr->b_l1hdr.b_pabd != NULL) {
+                       if (arc_buf_is_shared(buf)) {
+                               arc_unshare_buf(hdr, buf);
+                       } else {
+                               arc_hdr_free_abd(hdr, B_FALSE);
+                       }
+               }
+
+               if (HDR_HAS_RABD(hdr))
+                       arc_hdr_free_abd(hdr, B_TRUE);
+       }
+       ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+       ASSERT(!HDR_HAS_RABD(hdr));
+       ASSERT(!HDR_SHARED_DATA(hdr));
+       ASSERT(!arc_buf_is_shared(buf));
+
        callback->awcb_ready(zio, buf, callback->awcb_private);
 
+       if (HDR_IO_IN_PROGRESS(hdr))
+               ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
+
+       arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
+
+       if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr))
+               hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp));
+
+       if (BP_IS_PROTECTED(bp)) {
+               /* ZIL blocks are written through zio_rewrite */
+               ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
+               ASSERT(HDR_PROTECTED(hdr));
+
+               hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
+               hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
+               zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
+                   hdr->b_crypt_hdr.b_iv);
+               zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
+       }
+
+       /*
+        * If this block was written for raw encryption but the zio layer
+        * ended up only authenticating it, adjust the buffer flags now.
+        */
+       if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
+               arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
+               buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
+               if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
+                       buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
+       }
+
+       /* this must be done after the buffer flags are adjusted */
+       arc_cksum_compute(buf);
+
+       if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
+               compress = ZIO_COMPRESS_OFF;
+       } else {
+               ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
+               compress = BP_GET_COMPRESS(bp);
+       }
+       HDR_SET_PSIZE(hdr, psize);
+       arc_hdr_set_compress(hdr, compress);
+
+       if (zio->io_error != 0 || psize == 0)
+               goto out;
+
        /*
-        * If the IO is already in progress, then this is a re-write
-        * attempt, so we need to thaw and re-compute the cksum.
-        * It is the responsibility of the callback to handle the
-        * accounting for any re-write attempt.
+        * Fill the hdr with data. If the buffer is encrypted we have no choice
+        * but to copy the data into b_radb. If the hdr is compressed, the data
+        * we want is available from the zio, otherwise we can take it from
+        * the buf.
+        *
+        * We might be able to share the buf's data with the hdr here. However,
+        * doing so would cause the ARC to be full of linear ABDs if we write a
+        * lot of shareable data. As a compromise, we check whether scattered
+        * ABDs are allowed, and assume that if they are then the user wants
+        * the ARC to be primarily filled with them regardless of the data being
+        * written. Therefore, if they're allowed then we allocate one and copy
+        * the data into it; otherwise, we share the data directly if we can.
         */
-       if (HDR_IO_IN_PROGRESS(hdr)) {
-               mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
-               if (hdr->b_freeze_cksum != NULL) {
-                       kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
-                       hdr->b_freeze_cksum = NULL;
+       if (ARC_BUF_ENCRYPTED(buf)) {
+               ASSERT3U(psize, >, 0);
+               ASSERT(ARC_BUF_COMPRESSED(buf));
+               arc_hdr_alloc_abd(hdr, B_TRUE);
+               abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
+       } else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
+               /*
+                * Ideally, we would always copy the io_abd into b_pabd, but the
+                * user may have disabled compressed ARC, thus we must check the
+                * hdr's compression setting rather than the io_bp's.
+                */
+               if (BP_IS_ENCRYPTED(bp)) {
+                       ASSERT3U(psize, >, 0);
+                       arc_hdr_alloc_abd(hdr, B_TRUE);
+                       abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
+               } else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
+                   !ARC_BUF_COMPRESSED(buf)) {
+                       ASSERT3U(psize, >, 0);
+                       arc_hdr_alloc_abd(hdr, B_FALSE);
+                       abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
+               } else {
+                       ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
+                       arc_hdr_alloc_abd(hdr, B_FALSE);
+                       abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
+                           arc_buf_size(buf));
                }
-               mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
+       } else {
+               ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
+               ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
+               ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
+
+               arc_share_buf(hdr, buf);
        }
-       arc_cksum_compute(buf, B_FALSE);
-       hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
+
+out:
+       arc_hdr_verify(hdr, bp);
+       spl_fstrans_unmark(cookie);
+}
+
+static void
+arc_write_children_ready(zio_t *zio)
+{
+       arc_write_callback_t *callback = zio->io_private;
+       arc_buf_t *buf = callback->awcb_buf;
+
+       callback->awcb_children_ready(zio, buf, callback->awcb_private);
 }
 
 /*
@@ -4530,9 +6782,11 @@ arc_write_done(zio_t *zio)
        arc_buf_t *buf = callback->awcb_buf;
        arc_buf_hdr_t *hdr = buf->b_hdr;
 
-       ASSERT(hdr->b_l1hdr.b_acb == NULL);
+       ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
 
        if (zio->io_error == 0) {
+               arc_hdr_verify(hdr, zio->io_bp);
+
                if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
                        buf_discard_identity(hdr);
                } else {
@@ -4540,7 +6794,7 @@ arc_write_done(zio_t *zio)
                        hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
                }
        } else {
-               ASSERT(BUF_EMPTY(hdr));
+               ASSERT(HDR_EMPTY(hdr));
        }
 
        /*
@@ -4549,11 +6803,11 @@ arc_write_done(zio_t *zio)
         * dva/birth/checksum.  The buffer must therefore remain anonymous
         * (and uncached).
         */
-       if (!BUF_EMPTY(hdr)) {
+       if (!HDR_EMPTY(hdr)) {
                arc_buf_hdr_t *exists;
                kmutex_t *hash_lock;
 
-               ASSERT(zio->io_error == 0);
+               ASSERT3U(zio->io_error, ==, 0);
 
                arc_cksum_verify(buf);
 
@@ -4583,57 +6837,114 @@ arc_write_done(zio_t *zio)
                                            (void *)hdr, (void *)exists);
                        } else {
                                /* Dedup */
-                               ASSERT(hdr->b_l1hdr.b_datacnt == 1);
+                               ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
                                ASSERT(hdr->b_l1hdr.b_state == arc_anon);
                                ASSERT(BP_GET_DEDUP(zio->io_bp));
                                ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
                        }
                }
-               hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
+               arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
                /* if it's not anon, we are doing a scrub */
                if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
                        arc_access(hdr, hash_lock);
                mutex_exit(hash_lock);
        } else {
-               hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
+               arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
        }
 
        ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
        callback->awcb_done(zio, buf, callback->awcb_private);
 
+       abd_put(zio->io_abd);
        kmem_free(callback, sizeof (arc_write_callback_t));
 }
 
 zio_t *
 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
-    blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
-    const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
-    arc_done_func_t *done, void *private, zio_priority_t priority,
+    blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc,
+    const zio_prop_t *zp, arc_write_done_func_t *ready,
+    arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
+    arc_write_done_func_t *done, void *private, zio_priority_t priority,
     int zio_flags, const zbookmark_phys_t *zb)
 {
        arc_buf_hdr_t *hdr = buf->b_hdr;
        arc_write_callback_t *callback;
        zio_t *zio;
+       zio_prop_t localprop = *zp;
 
-       ASSERT(ready != NULL);
-       ASSERT(done != NULL);
+       ASSERT3P(ready, !=, NULL);
+       ASSERT3P(done, !=, NULL);
        ASSERT(!HDR_IO_ERROR(hdr));
        ASSERT(!HDR_IO_IN_PROGRESS(hdr));
-       ASSERT(hdr->b_l1hdr.b_acb == NULL);
-       ASSERT(hdr->b_l1hdr.b_datacnt > 0);
+       ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+       ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
        if (l2arc)
-               hdr->b_flags |= ARC_FLAG_L2CACHE;
-       if (l2arc_compress)
-               hdr->b_flags |= ARC_FLAG_L2COMPRESS;
+               arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
+
+       if (ARC_BUF_ENCRYPTED(buf)) {
+               ASSERT(ARC_BUF_COMPRESSED(buf));
+               localprop.zp_encrypt = B_TRUE;
+               localprop.zp_compress = HDR_GET_COMPRESS(hdr);
+               localprop.zp_byteorder =
+                   (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
+                   ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
+               bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt,
+                   ZIO_DATA_SALT_LEN);
+               bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv,
+                   ZIO_DATA_IV_LEN);
+               bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac,
+                   ZIO_DATA_MAC_LEN);
+               if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
+                       localprop.zp_nopwrite = B_FALSE;
+                       localprop.zp_copies =
+                           MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
+               }
+               zio_flags |= ZIO_FLAG_RAW;
+       } else if (ARC_BUF_COMPRESSED(buf)) {
+               ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
+               localprop.zp_compress = HDR_GET_COMPRESS(hdr);
+               zio_flags |= ZIO_FLAG_RAW_COMPRESS;
+       }
        callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
        callback->awcb_ready = ready;
+       callback->awcb_children_ready = children_ready;
        callback->awcb_physdone = physdone;
        callback->awcb_done = done;
        callback->awcb_private = private;
        callback->awcb_buf = buf;
 
-       zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
-           arc_write_ready, arc_write_physdone, arc_write_done, callback,
+       /*
+        * The hdr's b_pabd is now stale, free it now. A new data block
+        * will be allocated when the zio pipeline calls arc_write_ready().
+        */
+       if (hdr->b_l1hdr.b_pabd != NULL) {
+               /*
+                * If the buf is currently sharing the data block with
+                * the hdr then we need to break that relationship here.
+                * The hdr will remain with a NULL data pointer and the
+                * buf will take sole ownership of the block.
+                */
+               if (arc_buf_is_shared(buf)) {
+                       arc_unshare_buf(hdr, buf);
+               } else {
+                       arc_hdr_free_abd(hdr, B_FALSE);
+               }
+               VERIFY3P(buf->b_data, !=, NULL);
+       }
+
+       if (HDR_HAS_RABD(hdr))
+               arc_hdr_free_abd(hdr, B_TRUE);
+
+       arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
+
+       ASSERT(!arc_buf_is_shared(buf));
+       ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+
+       zio = zio_write(pio, spa, txg, bp,
+           abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
+           HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
+           (children_ready != NULL) ? arc_write_children_ready : NULL,
+           arc_write_physdone, arc_write_done, callback,
            priority, zio_flags, zb);
 
        return (zio);
@@ -4643,14 +6954,42 @@ static int
 arc_memory_throttle(uint64_t reserve, uint64_t txg)
 {
 #ifdef _KERNEL
-       if (zfs_arc_memory_throttle_disable)
+       uint64_t available_memory = arc_free_memory();
+       static uint64_t page_load = 0;
+       static uint64_t last_txg = 0;
+
+#if defined(_ILP32)
+       available_memory =
+           MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
+#endif
+
+       if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100)
                return (0);
 
-       if (freemem <= physmem * arc_lotsfree_percent / 100) {
+       if (txg > last_txg) {
+               last_txg = txg;
+               page_load = 0;
+       }
+       /*
+        * If we are in pageout, we know that memory is already tight,
+        * the arc is already going to be evicting, so we just want to
+        * continue to let page writes occur as quickly as possible.
+        */
+       if (current_is_kswapd()) {
+               if (page_load > MAX(arc_sys_free / 4, available_memory) / 4) {
+                       DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
+                       return (SET_ERROR(ERESTART));
+               }
+               /* Note: reserve is inflated, so we deflate */
+               page_load += reserve / 8;
+               return (0);
+       } else if (page_load > 0 && arc_reclaim_needed()) {
+               /* memory is low, delay before restarting */
                ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
                DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
                return (SET_ERROR(EAGAIN));
        }
+       page_load = 0;
 #endif
        return (0);
 }
@@ -4668,7 +7007,9 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
        int error;
        uint64_t anon_size;
 
-       if (reserve > arc_c/4 && !arc_no_grow)
+       if (!arc_no_grow &&
+           reserve > arc_c/4 &&
+           reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
                arc_c = MIN(arc_c_max, reserve * 4);
 
        /*
@@ -4685,7 +7026,12 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
         * network delays from blocking transactions that are ready to be
         * assigned to a txg.
         */
-       anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
+
+       /* assert that it has not wrapped around */
+       ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
+
+       anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
+           arc_loaned_bytes), 0);
 
        /*
         * Writes will, almost always, require additional memory allocations
@@ -4706,12 +7052,14 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
 
        if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
            anon_size > arc_c / 4) {
+               uint64_t meta_esize =
+                   refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+               uint64_t data_esize =
+                   refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
                dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
                    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
-                   arc_tempreserve>>10,
-                   arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
-                   arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
-                   reserve>>10, arc_c>>10);
+                   arc_tempreserve >> 10, meta_esize >> 10,
+                   data_esize >> 10, reserve >> 10, arc_c >> 10);
                DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
                return (SET_ERROR(ERESTART));
        }
@@ -4723,9 +7071,11 @@ static void
 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
     kstat_named_t *evict_data, kstat_named_t *evict_metadata)
 {
-       size->value.ui64 = state->arcs_size;
-       evict_data->value.ui64 = state->arcs_lsize[ARC_BUFC_DATA];
-       evict_metadata->value.ui64 = state->arcs_lsize[ARC_BUFC_METADATA];
+       size->value.ui64 = refcount_count(&state->arcs_size);
+       evict_data->value.ui64 =
+           refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
+       evict_metadata->value.ui64 =
+           refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
 }
 
 static int
@@ -4738,24 +7088,31 @@ arc_kstat_update(kstat_t *ksp, int rw)
        } else {
                arc_kstat_update_state(arc_anon,
                    &as->arcstat_anon_size,
-                   &as->arcstat_anon_evict_data,
-                   &as->arcstat_anon_evict_metadata);
+                   &as->arcstat_anon_evictable_data,
+                   &as->arcstat_anon_evictable_metadata);
                arc_kstat_update_state(arc_mru,
                    &as->arcstat_mru_size,
-                   &as->arcstat_mru_evict_data,
-                   &as->arcstat_mru_evict_metadata);
+                   &as->arcstat_mru_evictable_data,
+                   &as->arcstat_mru_evictable_metadata);
                arc_kstat_update_state(arc_mru_ghost,
                    &as->arcstat_mru_ghost_size,
-                   &as->arcstat_mru_ghost_evict_data,
-                   &as->arcstat_mru_ghost_evict_metadata);
+                   &as->arcstat_mru_ghost_evictable_data,
+                   &as->arcstat_mru_ghost_evictable_metadata);
                arc_kstat_update_state(arc_mfu,
                    &as->arcstat_mfu_size,
-                   &as->arcstat_mfu_evict_data,
-                   &as->arcstat_mfu_evict_metadata);
+                   &as->arcstat_mfu_evictable_data,
+                   &as->arcstat_mfu_evictable_metadata);
                arc_kstat_update_state(arc_mfu_ghost,
                    &as->arcstat_mfu_ghost_size,
-                   &as->arcstat_mfu_ghost_evict_data,
-                   &as->arcstat_mfu_ghost_evict_metadata);
+                   &as->arcstat_mfu_ghost_evictable_data,
+                   &as->arcstat_mfu_ghost_evictable_metadata);
+
+               as->arcstat_memory_all_bytes.value.ui64 =
+                   arc_all_memory();
+               as->arcstat_memory_free_bytes.value.ui64 =
+                   arc_free_memory();
+               as->arcstat_memory_available_bytes.value.i64 =
+                   arc_available_memory();
        }
 
        return (0);
@@ -4778,7 +7135,7 @@ arc_state_multilist_index_func(multilist_t *ml, void *obj)
         * numbers using buf_hash below. So, as an added precaution,
         * let's make sure we never add empty buffers to the arc lists.
         */
-       ASSERT(!BUF_EMPTY(hdr));
+       ASSERT(!HDR_EMPTY(hdr));
 
        /*
         * The assumption here, is the hash value for a given
@@ -4796,140 +7153,292 @@ arc_state_multilist_index_func(multilist_t *ml, void *obj)
            multilist_get_num_sublists(ml));
 }
 
+/*
+ * Called during module initialization and periodically thereafter to
+ * apply reasonable changes to the exposed performance tunings.  Non-zero
+ * zfs_* values which differ from the currently set values will be applied.
+ */
+static void
+arc_tuning_update(void)
+{
+       uint64_t allmem = arc_all_memory();
+       unsigned long limit;
+
+       /* Valid range: 64M - <all physical memory> */
+       if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
+           (zfs_arc_max > 64 << 20) && (zfs_arc_max < allmem) &&
+           (zfs_arc_max > arc_c_min)) {
+               arc_c_max = zfs_arc_max;
+               arc_c = arc_c_max;
+               arc_p = (arc_c >> 1);
+               if (arc_meta_limit > arc_c_max)
+                       arc_meta_limit = arc_c_max;
+               if (arc_dnode_limit > arc_meta_limit)
+                       arc_dnode_limit = arc_meta_limit;
+       }
+
+       /* Valid range: 32M - <arc_c_max> */
+       if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
+           (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
+           (zfs_arc_min <= arc_c_max)) {
+               arc_c_min = zfs_arc_min;
+               arc_c = MAX(arc_c, arc_c_min);
+       }
+
+       /* Valid range: 16M - <arc_c_max> */
+       if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) &&
+           (zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) &&
+           (zfs_arc_meta_min <= arc_c_max)) {
+               arc_meta_min = zfs_arc_meta_min;
+               if (arc_meta_limit < arc_meta_min)
+                       arc_meta_limit = arc_meta_min;
+               if (arc_dnode_limit < arc_meta_min)
+                       arc_dnode_limit = arc_meta_min;
+       }
+
+       /* Valid range: <arc_meta_min> - <arc_c_max> */
+       limit = zfs_arc_meta_limit ? zfs_arc_meta_limit :
+           MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100;
+       if ((limit != arc_meta_limit) &&
+           (limit >= arc_meta_min) &&
+           (limit <= arc_c_max))
+               arc_meta_limit = limit;
+
+       /* Valid range: <arc_meta_min> - <arc_meta_limit> */
+       limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
+           MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100;
+       if ((limit != arc_dnode_limit) &&
+           (limit >= arc_meta_min) &&
+           (limit <= arc_meta_limit))
+               arc_dnode_limit = limit;
+
+       /* Valid range: 1 - N */
+       if (zfs_arc_grow_retry)
+               arc_grow_retry = zfs_arc_grow_retry;
+
+       /* Valid range: 1 - N */
+       if (zfs_arc_shrink_shift) {
+               arc_shrink_shift = zfs_arc_shrink_shift;
+               arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
+       }
+
+       /* Valid range: 1 - N */
+       if (zfs_arc_p_min_shift)
+               arc_p_min_shift = zfs_arc_p_min_shift;
+
+       /* Valid range: 1 - N ticks */
+       if (zfs_arc_min_prefetch_lifespan)
+               arc_min_prefetch_lifespan = zfs_arc_min_prefetch_lifespan;
+
+       /* Valid range: 0 - 100 */
+       if ((zfs_arc_lotsfree_percent >= 0) &&
+           (zfs_arc_lotsfree_percent <= 100))
+               arc_lotsfree_percent = zfs_arc_lotsfree_percent;
+
+       /* Valid range: 0 - <all physical memory> */
+       if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
+               arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem);
+
+}
+
+static void
+arc_state_init(void)
+{
+       arc_anon = &ARC_anon;
+       arc_mru = &ARC_mru;
+       arc_mru_ghost = &ARC_mru_ghost;
+       arc_mfu = &ARC_mfu;
+       arc_mfu_ghost = &ARC_mfu_ghost;
+       arc_l2c_only = &ARC_l2c_only;
+
+       arc_mru->arcs_list[ARC_BUFC_METADATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_mru->arcs_list[ARC_BUFC_DATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_mru_ghost->arcs_list[ARC_BUFC_METADATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_mru_ghost->arcs_list[ARC_BUFC_DATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_mfu->arcs_list[ARC_BUFC_METADATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_mfu->arcs_list[ARC_BUFC_DATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_mfu_ghost->arcs_list[ARC_BUFC_DATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_l2c_only->arcs_list[ARC_BUFC_METADATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+       arc_l2c_only->arcs_list[ARC_BUFC_DATA] =
+           multilist_create(sizeof (arc_buf_hdr_t),
+           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+           arc_state_multilist_index_func);
+
+       refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+       refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+       refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+       refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+       refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+       refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+
+       refcount_create(&arc_anon->arcs_size);
+       refcount_create(&arc_mru->arcs_size);
+       refcount_create(&arc_mru_ghost->arcs_size);
+       refcount_create(&arc_mfu->arcs_size);
+       refcount_create(&arc_mfu_ghost->arcs_size);
+       refcount_create(&arc_l2c_only->arcs_size);
+
+       arc_anon->arcs_state = ARC_STATE_ANON;
+       arc_mru->arcs_state = ARC_STATE_MRU;
+       arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
+       arc_mfu->arcs_state = ARC_STATE_MFU;
+       arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
+       arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
+}
+
+static void
+arc_state_fini(void)
+{
+       refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+       refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+       refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+       refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+       refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+       refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+       refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+
+       refcount_destroy(&arc_anon->arcs_size);
+       refcount_destroy(&arc_mru->arcs_size);
+       refcount_destroy(&arc_mru_ghost->arcs_size);
+       refcount_destroy(&arc_mfu->arcs_size);
+       refcount_destroy(&arc_mfu_ghost->arcs_size);
+       refcount_destroy(&arc_l2c_only->arcs_size);
+
+       multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
+       multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
+       multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_METADATA]);
+       multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
+       multilist_destroy(arc_mru->arcs_list[ARC_BUFC_DATA]);
+       multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
+       multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_DATA]);
+       multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
+       multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
+       multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
+}
+
+uint64_t
+arc_target_bytes(void)
+{
+       return (arc_c);
+}
+
 void
 arc_init(void)
 {
+       uint64_t percent, allmem = arc_all_memory();
+
        mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
        cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL);
        cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL);
 
-       mutex_init(&arc_user_evicts_lock, NULL, MUTEX_DEFAULT, NULL);
-       cv_init(&arc_user_evicts_cv, NULL, CV_DEFAULT, NULL);
-
        /* Convert seconds to clock ticks */
-       zfs_arc_min_prefetch_lifespan = 1 * hz;
-
-       /* Start out with 1/8 of all memory */
-       arc_c = physmem * PAGESIZE / 8;
+       arc_min_prefetch_lifespan = 1 * hz;
 
 #ifdef _KERNEL
-       /*
-        * On architectures where the physical memory can be larger
-        * than the addressable space (intel in 32-bit mode), we may
-        * need to limit the cache to 1/8 of VM size.
-        */
-       arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
        /*
         * Register a shrinker to support synchronous (direct) memory
         * reclaim from the arc.  This is done to prevent kswapd from
         * swapping out pages when it is preferable to shrink the arc.
         */
        spl_register_shrinker(&arc_shrinker);
+
+       /* Set to 1/64 of all memory or a minimum of 512K */
+       arc_sys_free = MAX(allmem / 64, (512 * 1024));
+       arc_need_free = 0;
 #endif
 
-       /* set min cache to allow safe operation of arc_adapt() */
-       arc_c_min = 2ULL << SPA_MAXBLOCKSHIFT;
-       /* set max to 1/2 of all memory */
-       arc_c_max = arc_c * 4;
+       /* Set max to 1/2 of all memory */
+       arc_c_max = allmem / 2;
 
+#ifdef _KERNEL
+       /* Set min cache to 1/32 of all memory, or 32MB, whichever is more */
+       arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
+#else
        /*
-        * Allow the tunables to override our calculations if they are
-        * reasonable (ie. over 64MB)
+        * In userland, there's only the memory pressure that we artificially
+        * create (see arc_available_memory()).  Don't let arc_c get too
+        * small, because it can cause transactions to be larger than
+        * arc_c, causing arc_tempreserve_space() to fail.
         */
-       if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
-               arc_c_max = zfs_arc_max;
-       if (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT &&
-           zfs_arc_min <= arc_c_max)
-               arc_c_min = zfs_arc_min;
+       arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
+#endif
 
        arc_c = arc_c_max;
        arc_p = (arc_c >> 1);
-
-       /* limit meta-data to 3/4 of the arc capacity */
-       arc_meta_limit = (3 * arc_c_max) / 4;
-       arc_meta_max = 0;
-
-       /* Allow the tunable to override if it is reasonable */
-       if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
-               arc_meta_limit = zfs_arc_meta_limit;
-
-       if (zfs_arc_num_sublists_per_state < 1)
-               zfs_arc_num_sublists_per_state = num_online_cpus();
-
-       /* if kmem_flags are set, lets try to use less memory */
-       if (kmem_debugging())
-               arc_c = arc_c / 2;
-       if (arc_c < arc_c_min)
-               arc_c = arc_c_min;
-
-       arc_anon = &ARC_anon;
-       arc_mru = &ARC_mru;
-       arc_mru_ghost = &ARC_mru_ghost;
-       arc_mfu = &ARC_mfu;
-       arc_mfu_ghost = &ARC_mfu_ghost;
-       arc_l2c_only = &ARC_l2c_only;
        arc_size = 0;
 
-       multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
-       multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
-           sizeof (arc_buf_hdr_t),
-           offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
-           zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+       /* Set min to 1/2 of arc_c_min */
+       arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT;
+       /* Initialize maximum observed usage to zero */
+       arc_meta_max = 0;
+       /*
+        * Set arc_meta_limit to a percent of arc_c_max with a floor of
+        * arc_meta_min, and a ceiling of arc_c_max.
+        */
+       percent = MIN(zfs_arc_meta_limit_percent, 100);
+       arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100);
+       percent = MIN(zfs_arc_dnode_limit_percent, 100);
+       arc_dnode_limit = (percent * arc_meta_limit) / 100;
 
-       arc_anon->arcs_state = ARC_STATE_ANON;
-       arc_mru->arcs_state = ARC_STATE_MRU;
-       arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
-       arc_mfu->arcs_state = ARC_STATE_MFU;
-       arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
-       arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
+       /* Apply user specified tunings */
+       arc_tuning_update();
+
+       /* if kmem_flags are set, lets try to use less memory */
+       if (kmem_debugging())
+               arc_c = arc_c / 2;
+       if (arc_c < arc_c_min)
+               arc_c = arc_c_min;
 
+       arc_state_init();
        buf_init();
 
-       arc_reclaim_thread_exit = FALSE;
-       arc_user_evicts_thread_exit = FALSE;
        list_create(&arc_prune_list, sizeof (arc_prune_t),
            offsetof(arc_prune_t, p_node));
-       arc_eviction_list = NULL;
        mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
-       bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
 
-       arc_prune_taskq = taskq_create("arc_prune", max_ncpus, minclsyspri,
-           max_ncpus, INT_MAX, TASKQ_PREPOPULATE);
+       arc_prune_taskq = taskq_create("arc_prune", max_ncpus, defclsyspri,
+           max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
+
+       arc_reclaim_thread_exit = B_FALSE;
 
        arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
            sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
@@ -4940,13 +7449,10 @@ arc_init(void)
                kstat_install(arc_ksp);
        }
 
-       (void) thread_create(NULL, 0, arc_adapt_thread, NULL, 0, &p0,
-           TS_RUN, minclsyspri);
+       (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
+           TS_RUN, defclsyspri);
 
-       (void) thread_create(NULL, 0, arc_user_evicts_thread, NULL, 0, &p0,
-           TS_RUN, minclsyspri);
-
-       arc_dead = FALSE;
+       arc_dead = B_FALSE;
        arc_warm = B_FALSE;
 
        /*
@@ -4955,14 +7461,14 @@ arc_init(void)
         * If it has been set by a module parameter, take that.
         * Otherwise, use a percentage of physical memory defined by
         * zfs_dirty_data_max_percent (default 10%) with a cap at
-        * zfs_dirty_data_max_max (default 25% of physical memory).
+        * zfs_dirty_data_max_max (default 4G or 25% of physical memory).
         */
        if (zfs_dirty_data_max_max == 0)
-               zfs_dirty_data_max_max = physmem * PAGESIZE *
-                   zfs_dirty_data_max_max_percent / 100;
+               zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
+                   allmem * zfs_dirty_data_max_max_percent / 100);
 
        if (zfs_dirty_data_max == 0) {
-               zfs_dirty_data_max = physmem * PAGESIZE *
+               zfs_dirty_data_max = allmem *
                    zfs_dirty_data_max_percent / 100;
                zfs_dirty_data_max = MIN(zfs_dirty_data_max,
                    zfs_dirty_data_max_max);
@@ -4979,10 +7485,10 @@ arc_fini(void)
 #endif /* _KERNEL */
 
        mutex_enter(&arc_reclaim_lock);
-       arc_reclaim_thread_exit = TRUE;
+       arc_reclaim_thread_exit = B_TRUE;
        /*
         * The reclaim thread will set arc_reclaim_thread_exit back to
-        * FALSE when it is finished exiting; we're waiting for that.
+        * B_FALSE when it is finished exiting; we're waiting for that.
         */
        while (arc_reclaim_thread_exit) {
                cv_signal(&arc_reclaim_thread_cv);
@@ -4990,22 +7496,10 @@ arc_fini(void)
        }
        mutex_exit(&arc_reclaim_lock);
 
-       mutex_enter(&arc_user_evicts_lock);
-       arc_user_evicts_thread_exit = TRUE;
-       /*
-        * The user evicts thread will set arc_user_evicts_thread_exit
-        * to FALSE when it is finished exiting; we're waiting for that.
-        */
-       while (arc_user_evicts_thread_exit) {
-               cv_signal(&arc_user_evicts_cv);
-               cv_wait(&arc_user_evicts_cv, &arc_user_evicts_lock);
-       }
-       mutex_exit(&arc_user_evicts_lock);
-
-       /* Use TRUE to ensure *all* buffers are evicted */
-       arc_flush(NULL, TRUE);
+       /* Use B_TRUE to ensure *all* buffers are evicted */
+       arc_flush(NULL, B_TRUE);
 
-       arc_dead = TRUE;
+       arc_dead = B_TRUE;
 
        if (arc_ksp != NULL) {
                kstat_delete(arc_ksp);
@@ -5030,20 +7524,7 @@ arc_fini(void)
        cv_destroy(&arc_reclaim_thread_cv);
        cv_destroy(&arc_reclaim_waiters_cv);
 
-       mutex_destroy(&arc_user_evicts_lock);
-       cv_destroy(&arc_user_evicts_cv);
-
-       multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
-       multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
-       multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
-       multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
-       multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
-       multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
-       multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
-       multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
-       multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
-       multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
-
+       arc_state_fini();
        buf_fini();
 
        ASSERT0(arc_loaned_bytes);
@@ -5173,7 +7654,6 @@ arc_fini(void)
  *     l2arc_write_max         max write bytes per interval
  *     l2arc_write_boost       extra write bytes during device warmup
  *     l2arc_noprefetch        skip caching prefetched buffers
- *     l2arc_nocompress        skip compressing buffers
  *     l2arc_headroom          number of max device writes to precache
  *     l2arc_headroom_boost    when we find compressed buffers during ARC
  *                             scanning, we multiply headroom by this
@@ -5333,9 +7813,8 @@ l2arc_do_free_on_write(void)
 
        for (df = list_tail(buflist); df; df = df_prev) {
                df_prev = list_prev(buflist, df);
-               ASSERT(df->l2df_data != NULL);
-               ASSERT(df->l2df_func != NULL);
-               df->l2df_func(df->l2df_data, df->l2df_size);
+               ASSERT3P(df->l2df_abd, !=, NULL);
+               abd_free(df->l2df_abd);
                list_remove(buflist, df);
                kmem_free(df, sizeof (l2arc_data_free_t));
        }
@@ -5358,13 +7837,13 @@ l2arc_write_done(zio_t *zio)
        int64_t bytes_dropped = 0;
 
        cb = zio->io_private;
-       ASSERT(cb != NULL);
+       ASSERT3P(cb, !=, NULL);
        dev = cb->l2wcb_dev;
-       ASSERT(dev != NULL);
+       ASSERT3P(dev, !=, NULL);
        head = cb->l2wcb_head;
-       ASSERT(head != NULL);
+       ASSERT3P(head, !=, NULL);
        buflist = &dev->l2ad_buflist;
-       ASSERT(buflist != NULL);
+       ASSERT3P(buflist, !=, NULL);
        DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
            l2arc_write_callback_t *, cb);
 
@@ -5423,27 +7902,29 @@ top:
                ASSERT(HDR_HAS_L1HDR(hdr));
 
                /*
-                * We may have allocated a buffer for L2ARC compression,
-                * we must release it to avoid leaking this data.
+                * Skipped - drop L2ARC entry and mark the header as no
+                * longer L2 eligibile.
                 */
-               l2arc_release_cdata_buf(hdr);
-
                if (zio->io_error != 0) {
                        /*
                         * Error - drop L2ARC entry.
                         */
                        list_remove(buflist, hdr);
-                       hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
+                       arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
 
-                       ARCSTAT_INCR(arcstat_l2_asize, -hdr->b_l2hdr.b_asize);
-                       ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
+                       ARCSTAT_INCR(arcstat_l2_psize, -arc_hdr_size(hdr));
+                       ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
+
+                       bytes_dropped += arc_hdr_size(hdr);
+                       (void) refcount_remove_many(&dev->l2ad_alloc,
+                           arc_hdr_size(hdr), hdr);
                }
 
                /*
                 * Allow ARC to begin reads and ghost list evictions to
                 * this L2ARC entry.
                 */
-               hdr->b_flags &= ~ARC_FLAG_L2_WRITING;
+               arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
 
                mutex_exit(hash_lock);
        }
@@ -5461,6 +7942,102 @@ top:
        kmem_free(cb, sizeof (l2arc_write_callback_t));
 }
 
+static int
+l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
+{
+       int ret;
+       spa_t *spa = zio->io_spa;
+       arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
+       blkptr_t *bp = zio->io_bp;
+       dsl_crypto_key_t *dck = NULL;
+       uint8_t salt[ZIO_DATA_SALT_LEN];
+       uint8_t iv[ZIO_DATA_IV_LEN];
+       uint8_t mac[ZIO_DATA_MAC_LEN];
+       boolean_t no_crypt = B_FALSE;
+
+       /*
+        * ZIL data is never be written to the L2ARC, so we don't need
+        * special handling for its unique MAC storage.
+        */
+       ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
+       ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
+
+       /* If the data was encrypted, decrypt it now */
+       if (HDR_ENCRYPTED(hdr)) {
+               abd_t *eabd = arc_get_data_abd(hdr,
+                   arc_hdr_size(hdr), hdr);
+
+               zio_crypt_decode_params_bp(bp, salt, iv);
+               zio_crypt_decode_mac_bp(bp, mac);
+
+               ret = spa_keystore_lookup_key(spa,
+                   cb->l2rcb_zb.zb_objset, FTAG, &dck);
+               if (ret != 0) {
+                       arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
+                       goto error;
+               }
+
+               ret = zio_do_crypt_abd(B_FALSE, &dck->dck_key,
+                   salt, BP_GET_TYPE(bp), iv, mac, HDR_GET_PSIZE(hdr),
+                   BP_SHOULD_BYTESWAP(bp), eabd, hdr->b_l1hdr.b_pabd,
+                   &no_crypt);
+               if (ret != 0) {
+                       arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
+                       spa_keystore_dsl_key_rele(spa, dck, FTAG);
+                       goto error;
+               }
+
+               spa_keystore_dsl_key_rele(spa, dck, FTAG);
+
+               /*
+                * If we actually performed decryption, replace b_pabd
+                * with the decrypted data. Otherwise we can just throw
+                * our decryption buffer away.
+                */
+               if (!no_crypt) {
+                       arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
+                           arc_hdr_size(hdr), hdr);
+                       hdr->b_l1hdr.b_pabd = eabd;
+                       zio->io_abd = eabd;
+               } else {
+                       arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
+               }
+       }
+
+       /*
+        * If the L2ARC block was compressed, but ARC compression
+        * is disabled we decompress the data into a new buffer and
+        * replace the existing data.
+        */
+       if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
+           !HDR_COMPRESSION_ENABLED(hdr)) {
+               abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
+               void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
+
+               ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
+                   hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
+                   HDR_GET_LSIZE(hdr));
+               if (ret != 0) {
+                       abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
+                       arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
+                       goto error;
+               }
+
+               abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
+               arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
+                   arc_hdr_size(hdr), hdr);
+               hdr->b_l1hdr.b_pabd = cabd;
+               zio->io_abd = cabd;
+               zio->io_size = HDR_GET_LSIZE(hdr);
+       }
+
+       return (0);
+
+error:
+       return (ret);
+}
+
+
 /*
  * A read to a cache device completed.  Validate buffer contents before
  * handing over to the regular ARC routines.
@@ -5468,43 +8045,79 @@ top:
 static void
 l2arc_read_done(zio_t *zio)
 {
+       int tfm_error = 0;
        l2arc_read_callback_t *cb;
        arc_buf_hdr_t *hdr;
-       arc_buf_t *buf;
        kmutex_t *hash_lock;
-       int equal;
+       boolean_t valid_cksum, using_rdata;
 
-       ASSERT(zio->io_vd != NULL);
+       ASSERT3P(zio->io_vd, !=, NULL);
        ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
 
        spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
 
        cb = zio->io_private;
-       ASSERT(cb != NULL);
-       buf = cb->l2rcb_buf;
-       ASSERT(buf != NULL);
+       ASSERT3P(cb, !=, NULL);
+       hdr = cb->l2rcb_hdr;
+       ASSERT3P(hdr, !=, NULL);
 
-       hash_lock = HDR_LOCK(buf->b_hdr);
+       hash_lock = HDR_LOCK(hdr);
        mutex_enter(hash_lock);
-       hdr = buf->b_hdr;
        ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
 
        /*
-        * If the buffer was compressed, decompress it first.
+        * If the data was read into a temporary buffer,
+        * move it and free the buffer.
         */
-       if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
-               l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
-       ASSERT(zio->io_data != NULL);
+       if (cb->l2rcb_abd != NULL) {
+               ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
+               if (zio->io_error == 0) {
+                       abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd,
+                           arc_hdr_size(hdr));
+               }
+
+               /*
+                * The following must be done regardless of whether
+                * there was an error:
+                * - free the temporary buffer
+                * - point zio to the real ARC buffer
+                * - set zio size accordingly
+                * These are required because zio is either re-used for
+                * an I/O of the block in the case of the error
+                * or the zio is passed to arc_read_done() and it
+                * needs real data.
+                */
+               abd_free(cb->l2rcb_abd);
+               zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
+               zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
+       }
+
+       ASSERT3P(zio->io_abd, !=, NULL);
 
        /*
         * Check this survived the L2ARC journey.
         */
-       equal = arc_cksum_equal(buf);
-       if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
+       ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
+           (HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
+       zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
+       zio->io_bp = &zio->io_bp_copy;  /* XXX fix in L2ARC 2.0 */
+
+       valid_cksum = arc_cksum_is_equal(hdr, zio);
+       using_rdata = (HDR_HAS_RABD(hdr) &&
+           zio->io_abd == hdr->b_crypt_hdr.b_rabd);
+
+       /*
+        * b_rabd will always match the data as it exists on disk if it is
+        * being used. Therefore if we are reading into b_rabd we do not
+        * attempt to untransform the data.
+        */
+       if (valid_cksum && !using_rdata)
+               tfm_error = l2arc_untransform(zio, cb);
+
+       if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
+           !HDR_L2_EVICTED(hdr)) {
                mutex_exit(hash_lock);
-               zio->io_private = buf;
-               zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
-               zio->io_bp = &zio->io_bp_copy;  /* XXX fix in L2ARC 2.0 */
+               zio->io_private = hdr;
                arc_read_done(zio);
        } else {
                mutex_exit(hash_lock);
@@ -5517,7 +8130,7 @@ l2arc_read_done(zio_t *zio)
                } else {
                        zio->io_error = SET_ERROR(EIO);
                }
-               if (!equal)
+               if (!valid_cksum || tfm_error != 0)
                        ARCSTAT_BUMP(arcstat_l2_cksum_bad);
 
                /*
@@ -5527,12 +8140,15 @@ l2arc_read_done(zio_t *zio)
                 */
                if (zio->io_waiter == NULL) {
                        zio_t *pio = zio_unique_parent(zio);
+                       void *abd = (using_rdata) ?
+                           hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
 
                        ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
 
-                       zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
-                           buf->b_data, zio->io_size, arc_read_done, buf,
-                           zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
+                       zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp,
+                           abd, zio->io_size, arc_read_done,
+                           hdr, zio->io_priority, cb->l2rcb_flags,
+                           &cb->l2rcb_zb));
                }
        }
 
@@ -5555,21 +8171,23 @@ l2arc_sublist_lock(int list_num)
        multilist_t *ml = NULL;
        unsigned int idx;
 
-       ASSERT(list_num >= 0 && list_num <= 3);
+       ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
 
        switch (list_num) {
        case 0:
-               ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
+               ml = arc_mfu->arcs_list[ARC_BUFC_METADATA];
                break;
        case 1:
-               ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
+               ml = arc_mru->arcs_list[ARC_BUFC_METADATA];
                break;
        case 2:
-               ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
+               ml = arc_mfu->arcs_list[ARC_BUFC_DATA];
                break;
        case 3:
-               ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
+               ml = arc_mru->arcs_list[ARC_BUFC_DATA];
                break;
+       default:
+               return (NULL);
        }
 
        /*
@@ -5595,7 +8213,6 @@ l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
        arc_buf_hdr_t *hdr, *hdr_prev;
        kmutex_t *hash_lock;
        uint64_t taddr;
-       int64_t bytes_evicted = 0;
 
        buflist = &dev->l2ad_buflist;
 
@@ -5642,18 +8259,16 @@ top:
                        goto top;
                }
 
-               if (HDR_L2_WRITE_HEAD(hdr)) {
-                       /*
-                        * We hit a write head node.  Leave it for
-                        * l2arc_write_done().
-                        */
-                       list_remove(buflist, hdr);
-                       mutex_exit(hash_lock);
-                       continue;
-               }
+               /*
+                * A header can't be on this list if it doesn't have L2 header.
+                */
+               ASSERT(HDR_HAS_L2HDR(hdr));
 
-               if (!all && HDR_HAS_L2HDR(hdr) &&
-                   (hdr->b_l2hdr.b_daddr > taddr ||
+               /* Ensure this header has finished being written. */
+               ASSERT(!HDR_L2_WRITING(hdr));
+               ASSERT(!HDR_L2_WRITE_HEAD(hdr));
+
+               if (!all && (hdr->b_l2hdr.b_daddr >= taddr ||
                    hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
                        /*
                         * We've evicted to the target address,
@@ -5663,13 +8278,12 @@ top:
                        break;
                }
 
-               ASSERT(HDR_HAS_L2HDR(hdr));
                if (!HDR_HAS_L1HDR(hdr)) {
                        ASSERT(!HDR_L2_READING(hdr));
                        /*
                         * This doesn't exist in the ARC.  Destroy.
                         * arc_hdr_destroy() will call list_remove()
-                        * and decrement arcstat_l2_size.
+                        * and decrement arcstat_l2_lsize.
                         */
                        arc_change_state(arc_anon, hdr, hash_lock);
                        arc_hdr_destroy(hdr);
@@ -5683,28 +8297,131 @@ top:
                         */
                        if (HDR_L2_READING(hdr)) {
                                ARCSTAT_BUMP(arcstat_l2_evict_reading);
-                               hdr->b_flags |= ARC_FLAG_L2_EVICTED;
+                               arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
                        }
 
-                       /*
-                        * Tell ARC this no longer exists in L2ARC.
-                        */
-                       /* Tell ARC this no longer exists in L2ARC. */
-                       ARCSTAT_INCR(arcstat_l2_asize, -hdr->b_l2hdr.b_asize);
-                       ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
-                       hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
-                       list_remove(buflist, hdr);
-
-                       /* Ensure this header has finished being written */
-                       ASSERT(!HDR_L2_WRITING(hdr));
-                       ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
+                       arc_hdr_l2hdr_destroy(hdr);
                }
                mutex_exit(hash_lock);
        }
        mutex_exit(&dev->l2ad_mtx);
+}
+
+/*
+ * Handle any abd transforms that might be required for writing to the L2ARC.
+ * If successful, this function will always return an abd with the data
+ * transformed as it is on disk in a new abd of asize bytes.
+ */
+static int
+l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
+    abd_t **abd_out)
+{
+       int ret;
+       void *tmp = NULL;
+       abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
+       enum zio_compress compress = HDR_GET_COMPRESS(hdr);
+       uint64_t psize = HDR_GET_PSIZE(hdr);
+       uint64_t size = arc_hdr_size(hdr);
+       boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
+       boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
+       dsl_crypto_key_t *dck = NULL;
+       uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
+       boolean_t no_crypt = B_FALSE;
+
+       ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
+           !HDR_COMPRESSION_ENABLED(hdr)) ||
+           HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
+       ASSERT3U(psize, <=, asize);
+
+       /*
+        * If this data simply needs its own buffer, we simply allocate it
+        * and copy the data. This may be done to elimiate a depedency on a
+        * shared buffer or to reallocate the buffer to match asize.
+        */
+       if (HDR_HAS_RABD(hdr) && asize != psize) {
+               ASSERT3U(size, ==, psize);
+               to_write = abd_alloc_for_io(asize, ismd);
+               abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, size);
+               if (size != asize)
+                       abd_zero_off(to_write, size, asize - size);
+               goto out;
+       }
+
+       if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
+           !HDR_ENCRYPTED(hdr)) {
+               ASSERT3U(size, ==, psize);
+               to_write = abd_alloc_for_io(asize, ismd);
+               abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
+               if (size != asize)
+                       abd_zero_off(to_write, size, asize - size);
+               goto out;
+       }
+
+       if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
+               cabd = abd_alloc_for_io(asize, ismd);
+               tmp = abd_borrow_buf(cabd, asize);
+
+               psize = zio_compress_data(compress, to_write, tmp, size);
+               ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
+               if (psize < asize)
+                       bzero((char *)tmp + psize, asize - psize);
+               psize = HDR_GET_PSIZE(hdr);
+               abd_return_buf_copy(cabd, tmp, asize);
+               to_write = cabd;
+       }
+
+       if (HDR_ENCRYPTED(hdr)) {
+               eabd = abd_alloc_for_io(asize, ismd);
+
+               /*
+                * If the dataset was disowned before the buffer
+                * made it to this point, the key to re-encrypt
+                * it won't be available. In this case we simply
+                * won't write the buffer to the L2ARC.
+                */
+               ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
+                   FTAG, &dck);
+               if (ret != 0)
+                       goto error;
+
+               ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
+                   hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_ot,
+                   hdr->b_crypt_hdr.b_iv, mac, psize, bswap, to_write,
+                   eabd, &no_crypt);
+               if (ret != 0)
+                       goto error;
+
+               if (no_crypt)
+                       abd_copy(eabd, to_write, psize);
+
+               if (psize != asize)
+                       abd_zero_off(eabd, psize, asize - psize);
+
+               /* assert that the MAC we got here matches the one we saved */
+               ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
+               spa_keystore_dsl_key_rele(spa, dck, FTAG);
+
+               if (to_write == cabd)
+                       abd_free(cabd);
+
+               to_write = eabd;
+       }
 
-       vdev_space_update(dev->l2ad_vdev, -bytes_evicted, 0, 0);
-       dev->l2ad_evict = taddr;
+out:
+       ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
+       *abd_out = to_write;
+       return (0);
+
+error:
+       if (dck != NULL)
+               spa_keystore_dsl_key_rele(spa, dck, FTAG);
+       if (cabd != NULL)
+               abd_free(cabd);
+       if (eabd != NULL)
+               abd_free(eabd);
+
+       *abd_out = NULL;
+       return (ret);
 }
 
 /*
@@ -5719,45 +8436,33 @@ top:
  * the delta by which the device hand has changed due to alignment).
  */
 static uint64_t
-l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
-    boolean_t *headroom_boost)
+l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
 {
        arc_buf_hdr_t *hdr, *hdr_prev, *head;
-       uint64_t write_asize, write_psize, write_sz, headroom,
-           buf_compress_minsz;
-       void *buf_data;
+       uint64_t write_asize, write_psize, write_lsize, headroom;
        boolean_t full;
        l2arc_write_callback_t *cb;
        zio_t *pio, *wzio;
        uint64_t guid = spa_load_guid(spa);
        int try;
-       const boolean_t do_headroom_boost = *headroom_boost;
 
-       ASSERT(dev->l2ad_vdev != NULL);
-
-       /* Lower the flag now, we might want to raise it again later. */
-       *headroom_boost = B_FALSE;
+       ASSERT3P(dev->l2ad_vdev, !=, NULL);
 
        pio = NULL;
-       write_sz = write_asize = write_psize = 0;
+       write_lsize = write_asize = write_psize = 0;
        full = B_FALSE;
        head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
-       head->b_flags |= ARC_FLAG_L2_WRITE_HEAD;
-       head->b_flags |= ARC_FLAG_HAS_L2HDR;
-
-       /*
-        * We will want to try to compress buffers that are at least 2x the
-        * device sector size.
-        */
-       buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
+       arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
 
        /*
         * Copy buffers for L2ARC writing.
         */
-       for (try = 0; try <= 3; try++) {
+       for (try = 0; try < L2ARC_FEED_TYPES; try++) {
                multilist_sublist_t *mls = l2arc_sublist_lock(try);
                uint64_t passed_sz = 0;
 
+               VERIFY3P(mls, !=, NULL);
+
                /*
                 * L2ARC fast warmup.
                 *
@@ -5770,12 +8475,12 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
                        hdr = multilist_sublist_tail(mls);
 
                headroom = target_sz * l2arc_headroom;
-               if (do_headroom_boost)
+               if (zfs_compressed_arc_enabled)
                        headroom = (headroom * l2arc_headroom_boost) / 100;
 
                for (; hdr; hdr = hdr_prev) {
                        kmutex_t *hash_lock;
-                       uint64_t buf_sz;
+                       abd_t *to_write = NULL;
 
                        if (arc_warm == B_FALSE)
                                hdr_prev = multilist_sublist_next(mls, hdr);
@@ -5790,7 +8495,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
                                continue;
                        }
 
-                       passed_sz += hdr->b_size;
+                       passed_sz += HDR_GET_LSIZE(hdr);
                        if (passed_sz > headroom) {
                                /*
                                 * Searched too far.
@@ -5804,12 +8509,79 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
                                continue;
                        }
 
-                       if ((write_sz + hdr->b_size) > target_sz) {
+                       /*
+                        * We rely on the L1 portion of the header below, so
+                        * it's invalid for this header to have been evicted out
+                        * of the ghost cache, prior to being written out. The
+                        * ARC_FLAG_L2_WRITING bit ensures this won't happen.
+                        */
+                       ASSERT(HDR_HAS_L1HDR(hdr));
+
+                       ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
+                       ASSERT3U(arc_hdr_size(hdr), >, 0);
+                       ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
+                           HDR_HAS_RABD(hdr));
+                       uint64_t psize = HDR_GET_PSIZE(hdr);
+                       uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
+                           psize);
+
+                       if ((write_asize + asize) > target_sz) {
                                full = B_TRUE;
                                mutex_exit(hash_lock);
                                break;
                        }
 
+                       /*
+                        * We rely on the L1 portion of the header below, so
+                        * it's invalid for this header to have been evicted out
+                        * of the ghost cache, prior to being written out. The
+                        * ARC_FLAG_L2_WRITING bit ensures this won't happen.
+                        */
+                       arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
+                       ASSERT(HDR_HAS_L1HDR(hdr));
+
+                       ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
+                       ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
+                           HDR_HAS_RABD(hdr));
+                       ASSERT3U(arc_hdr_size(hdr), >, 0);
+
+                       /*
+                        * If this header has b_rabd, we can use this since it
+                        * must always match the data exactly as it exists on
+                        * disk. Otherwise, the L2ARC can  normally use the
+                        * hdr's data, but if we're sharing data between the
+                        * hdr and one of its bufs, L2ARC needs its own copy of
+                        * the data so that the ZIO below can't race with the
+                        * buf consumer. To ensure that this copy will be
+                        * available for the lifetime of the ZIO and be cleaned
+                        * up afterwards, we add it to the l2arc_free_on_write
+                        * queue. If we need to apply any transforms to the
+                        * data (compression, encryption) we will also need the
+                        * extra buffer.
+                        */
+                       if (HDR_HAS_RABD(hdr) && psize == asize) {
+                               to_write = hdr->b_crypt_hdr.b_rabd;
+                       } else if ((HDR_COMPRESSION_ENABLED(hdr) ||
+                           HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
+                           !HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
+                           psize == asize) {
+                               to_write = hdr->b_l1hdr.b_pabd;
+                       } else {
+                               int ret;
+                               arc_buf_contents_t type = arc_buf_type(hdr);
+
+                               ret = l2arc_apply_transforms(spa, hdr, asize,
+                                   &to_write);
+                               if (ret != 0) {
+                                       arc_hdr_clear_flags(hdr,
+                                           ARC_FLAG_L2_WRITING);
+                                       mutex_exit(hash_lock);
+                                       continue;
+                               }
+
+                               l2arc_free_abd_on_write(to_write, asize, type);
+                       }
+
                        if (pio == NULL) {
                                /*
                                 * Insert a dummy header on the buflist so
@@ -5820,50 +8592,44 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
                                list_insert_head(&dev->l2ad_buflist, head);
                                mutex_exit(&dev->l2ad_mtx);
 
-                               cb = kmem_alloc(sizeof (l2arc_write_callback_t),
-                                   KM_SLEEP);
+                               cb = kmem_alloc(
+                                   sizeof (l2arc_write_callback_t), KM_SLEEP);
                                cb->l2wcb_dev = dev;
                                cb->l2wcb_head = head;
                                pio = zio_root(spa, l2arc_write_done, cb,
                                    ZIO_FLAG_CANFAIL);
                        }
 
-                       /*
-                        * Create and add a new L2ARC header.
-                        */
                        hdr->b_l2hdr.b_dev = dev;
-                       arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
-                       hdr->b_flags |= ARC_FLAG_L2_WRITING;
-                       /*
-                        * Temporarily stash the data buffer in b_tmp_cdata.
-                        * The subsequent write step will pick it up from
-                        * there. This is because can't access b_l1hdr.b_buf
-                        * without holding the hash_lock, which we in turn
-                        * can't access without holding the ARC list locks
-                        * (which we want to avoid during compression/writing)
-                        */
-                       HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
-                       hdr->b_l2hdr.b_asize = hdr->b_size;
                        hdr->b_l2hdr.b_hits = 0;
-                       hdr->b_l1hdr.b_tmp_cdata = hdr->b_l1hdr.b_buf->b_data;
 
-                       buf_sz = hdr->b_size;
-                       hdr->b_flags |= ARC_FLAG_HAS_L2HDR;
+                       hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
+                       arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
 
                        mutex_enter(&dev->l2ad_mtx);
                        list_insert_head(&dev->l2ad_buflist, hdr);
                        mutex_exit(&dev->l2ad_mtx);
 
-                       /*
-                        * Compute and store the buffer cksum before
-                        * writing.  On debug the cksum is verified first.
-                        */
-                       arc_cksum_verify(hdr->b_l1hdr.b_buf);
-                       arc_cksum_compute(hdr->b_l1hdr.b_buf, B_TRUE);
+                       (void) refcount_add_many(&dev->l2ad_alloc,
+                           arc_hdr_size(hdr), hdr);
+
+                       wzio = zio_write_phys(pio, dev->l2ad_vdev,
+                           hdr->b_l2hdr.b_daddr, asize, to_write,
+                           ZIO_CHECKSUM_OFF, NULL, hdr,
+                           ZIO_PRIORITY_ASYNC_WRITE,
+                           ZIO_FLAG_CANFAIL, B_FALSE);
+
+                       write_lsize += HDR_GET_LSIZE(hdr);
+                       DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
+                           zio_t *, wzio);
+
+                       write_psize += psize;
+                       write_asize += asize;
+                       dev->l2ad_hand += asize;
 
                        mutex_exit(hash_lock);
 
-                       write_sz += buf_sz;
+                       (void) zio_nowait(wzio);
                }
 
                multilist_sublist_unlock(mls);
@@ -5874,89 +8640,18 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
 
        /* No buffers selected for writing? */
        if (pio == NULL) {
-               ASSERT0(write_sz);
+               ASSERT0(write_lsize);
                ASSERT(!HDR_HAS_L1HDR(head));
                kmem_cache_free(hdr_l2only_cache, head);
                return (0);
        }
 
-       mutex_enter(&dev->l2ad_mtx);
-
-       /*
-        * Now start writing the buffers. We're starting at the write head
-        * and work backwards, retracing the course of the buffer selector
-        * loop above.
-        */
-       for (hdr = list_prev(&dev->l2ad_buflist, head); hdr;
-           hdr = list_prev(&dev->l2ad_buflist, hdr)) {
-               uint64_t buf_sz;
-
-               /*
-                * We rely on the L1 portion of the header below, so
-                * it's invalid for this header to have been evicted out
-                * of the ghost cache, prior to being written out. The
-                * ARC_FLAG_L2_WRITING bit ensures this won't happen.
-                */
-               ASSERT(HDR_HAS_L1HDR(hdr));
-
-               /*
-                * We shouldn't need to lock the buffer here, since we flagged
-                * it as ARC_FLAG_L2_WRITING in the previous step, but we must
-                * take care to only access its L2 cache parameters. In
-                * particular, hdr->l1hdr.b_buf may be invalid by now due to
-                * ARC eviction.
-                */
-               hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
-
-               if ((!l2arc_nocompress && HDR_L2COMPRESS(hdr)) &&
-                   hdr->b_l2hdr.b_asize >= buf_compress_minsz) {
-                       if (l2arc_compress_buf(hdr)) {
-                               /*
-                                * If compression succeeded, enable headroom
-                                * boost on the next scan cycle.
-                                */
-                               *headroom_boost = B_TRUE;
-                       }
-               }
-
-               /*
-                * Pick up the buffer data we had previously stashed away
-                * (and now potentially also compressed).
-                */
-               buf_data = hdr->b_l1hdr.b_tmp_cdata;
-               buf_sz = hdr->b_l2hdr.b_asize;
-
-               /* Compression may have squashed the buffer to zero length. */
-               if (buf_sz != 0) {
-                       uint64_t buf_p_sz;
-
-                       wzio = zio_write_phys(pio, dev->l2ad_vdev,
-                           dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
-                           NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
-                           ZIO_FLAG_CANFAIL, B_FALSE);
-
-                       DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
-                           zio_t *, wzio);
-                       (void) zio_nowait(wzio);
-
-                       write_asize += buf_sz;
-                       /*
-                        * Keep the clock hand suitably device-aligned.
-                        */
-                       buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
-                       write_psize += buf_p_sz;
-                       dev->l2ad_hand += buf_p_sz;
-               }
-       }
-
-       mutex_exit(&dev->l2ad_mtx);
-
        ASSERT3U(write_asize, <=, target_sz);
        ARCSTAT_BUMP(arcstat_l2_writes_sent);
-       ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
-       ARCSTAT_INCR(arcstat_l2_size, write_sz);
-       ARCSTAT_INCR(arcstat_l2_asize, write_asize);
-       vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0);
+       ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
+       ARCSTAT_INCR(arcstat_l2_lsize, write_lsize);
+       ARCSTAT_INCR(arcstat_l2_psize, write_psize);
+       vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
 
        /*
         * Bump device hand to the device start if it is approaching the end.
@@ -5964,7 +8659,6 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
         */
        if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
                dev->l2ad_hand = dev->l2ad_start;
-               dev->l2ad_evict = dev->l2ad_start;
                dev->l2ad_first = B_FALSE;
        }
 
@@ -5975,197 +8669,18 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
        return (write_asize);
 }
 
-/*
- * Compresses an L2ARC buffer.
- * The data to be compressed must be prefilled in l1hdr.b_tmp_cdata and its
- * size in l2hdr->b_asize. This routine tries to compress the data and
- * depending on the compression result there are three possible outcomes:
- * *) The buffer was incompressible. The original l2hdr contents were left
- *    untouched and are ready for writing to an L2 device.
- * *) The buffer was all-zeros, so there is no need to write it to an L2
- *    device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
- *    set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
- * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
- *    data buffer which holds the compressed data to be written, and b_asize
- *    tells us how much data there is. b_compress is set to the appropriate
- *    compression algorithm. Once writing is done, invoke
- *    l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
- *
- * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
- * buffer was incompressible).
- */
-static boolean_t
-l2arc_compress_buf(arc_buf_hdr_t *hdr)
-{
-       void *cdata;
-       size_t csize, len, rounded;
-       l2arc_buf_hdr_t *l2hdr;
-
-       ASSERT(HDR_HAS_L2HDR(hdr));
-
-       l2hdr = &hdr->b_l2hdr;
-
-       ASSERT(HDR_HAS_L1HDR(hdr));
-       ASSERT(HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF);
-       ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
-
-       len = l2hdr->b_asize;
-       cdata = zio_data_buf_alloc(len);
-       ASSERT3P(cdata, !=, NULL);
-       csize = zio_compress_data(ZIO_COMPRESS_LZ4, hdr->b_l1hdr.b_tmp_cdata,
-           cdata, l2hdr->b_asize);
-
-       rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE);
-       if (rounded > csize) {
-               bzero((char *)cdata + csize, rounded - csize);
-               csize = rounded;
-       }
-
-       if (csize == 0) {
-               /* zero block, indicate that there's nothing to write */
-               zio_data_buf_free(cdata, len);
-               HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_EMPTY);
-               l2hdr->b_asize = 0;
-               hdr->b_l1hdr.b_tmp_cdata = NULL;
-               ARCSTAT_BUMP(arcstat_l2_compress_zeros);
-               return (B_TRUE);
-       } else if (csize > 0 && csize < len) {
-               /*
-                * Compression succeeded, we'll keep the cdata around for
-                * writing and release it afterwards.
-                */
-               HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_LZ4);
-               l2hdr->b_asize = csize;
-               hdr->b_l1hdr.b_tmp_cdata = cdata;
-               ARCSTAT_BUMP(arcstat_l2_compress_successes);
-               return (B_TRUE);
-       } else {
-               /*
-                * Compression failed, release the compressed buffer.
-                * l2hdr will be left unmodified.
-                */
-               zio_data_buf_free(cdata, len);
-               ARCSTAT_BUMP(arcstat_l2_compress_failures);
-               return (B_FALSE);
-       }
-}
-
-/*
- * Decompresses a zio read back from an l2arc device. On success, the
- * underlying zio's io_data buffer is overwritten by the uncompressed
- * version. On decompression error (corrupt compressed stream), the
- * zio->io_error value is set to signal an I/O error.
- *
- * Please note that the compressed data stream is not checksummed, so
- * if the underlying device is experiencing data corruption, we may feed
- * corrupt data to the decompressor, so the decompressor needs to be
- * able to handle this situation (LZ4 does).
- */
-static void
-l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
-{
-       uint64_t csize;
-       void *cdata;
-
-       ASSERT(L2ARC_IS_VALID_COMPRESS(c));
-
-       if (zio->io_error != 0) {
-               /*
-                * An io error has occured, just restore the original io
-                * size in preparation for a main pool read.
-                */
-               zio->io_orig_size = zio->io_size = hdr->b_size;
-               return;
-       }
-
-       if (c == ZIO_COMPRESS_EMPTY) {
-               /*
-                * An empty buffer results in a null zio, which means we
-                * need to fill its io_data after we're done restoring the
-                * buffer's contents.
-                */
-               ASSERT(hdr->b_l1hdr.b_buf != NULL);
-               bzero(hdr->b_l1hdr.b_buf->b_data, hdr->b_size);
-               zio->io_data = zio->io_orig_data = hdr->b_l1hdr.b_buf->b_data;
-       } else {
-               ASSERT(zio->io_data != NULL);
-               /*
-                * We copy the compressed data from the start of the arc buffer
-                * (the zio_read will have pulled in only what we need, the
-                * rest is garbage which we will overwrite at decompression)
-                * and then decompress back to the ARC data buffer. This way we
-                * can minimize copying by simply decompressing back over the
-                * original compressed data (rather than decompressing to an
-                * aux buffer and then copying back the uncompressed buffer,
-                * which is likely to be much larger).
-                */
-               csize = zio->io_size;
-               cdata = zio_data_buf_alloc(csize);
-               bcopy(zio->io_data, cdata, csize);
-               if (zio_decompress_data(c, cdata, zio->io_data, csize,
-                   hdr->b_size) != 0)
-                       zio->io_error = SET_ERROR(EIO);
-               zio_data_buf_free(cdata, csize);
-       }
-
-       /* Restore the expected uncompressed IO size. */
-       zio->io_orig_size = zio->io_size = hdr->b_size;
-}
-
-/*
- * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
- * This buffer serves as a temporary holder of compressed data while
- * the buffer entry is being written to an l2arc device. Once that is
- * done, we can dispose of it.
- */
-static void
-l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
-{
-       enum zio_compress comp = HDR_GET_COMPRESS(hdr);
-
-       ASSERT(HDR_HAS_L1HDR(hdr));
-       ASSERT(comp == ZIO_COMPRESS_OFF || L2ARC_IS_VALID_COMPRESS(comp));
-
-       if (comp == ZIO_COMPRESS_OFF) {
-               /*
-                * In this case, b_tmp_cdata points to the same buffer
-                * as the arc_buf_t's b_data field. We don't want to
-                * free it, since the arc_buf_t will handle that.
-                */
-               hdr->b_l1hdr.b_tmp_cdata = NULL;
-       } else if (comp == ZIO_COMPRESS_EMPTY) {
-               /*
-                * In this case, b_tmp_cdata was compressed to an empty
-                * buffer, thus there's nothing to free and b_tmp_cdata
-                * should have been set to NULL in l2arc_write_buffers().
-                */
-               ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
-       } else {
-               /*
-                * If the data was compressed, then we've allocated a
-                * temporary buffer for it, so now we need to release it.
-                */
-               ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
-               zio_data_buf_free(hdr->b_l1hdr.b_tmp_cdata,
-                   hdr->b_size);
-               hdr->b_l1hdr.b_tmp_cdata = NULL;
-       }
-
-}
-
 /*
  * This thread feeds the L2ARC at regular intervals.  This is the beating
  * heart of the L2ARC.
  */
 static void
-l2arc_feed_thread(void)
+l2arc_feed_thread(void *unused)
 {
        callb_cpr_t cpr;
        l2arc_dev_t *dev;
        spa_t *spa;
        uint64_t size, wrote;
        clock_t begin, next = ddi_get_lbolt();
-       boolean_t headroom_boost = B_FALSE;
        fstrans_cookie_t cookie;
 
        CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
@@ -6175,7 +8690,7 @@ l2arc_feed_thread(void)
        cookie = spl_fstrans_mark();
        while (l2arc_thread_exit == 0) {
                CALLB_CPR_SAFE_BEGIN(&cpr);
-               (void) cv_timedwait_interruptible(&l2arc_feed_thr_cv,
+               (void) cv_timedwait_sig(&l2arc_feed_thr_cv,
                    &l2arc_feed_thr_lock, next);
                CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
                next = ddi_get_lbolt() + hz;
@@ -6205,7 +8720,7 @@ l2arc_feed_thread(void)
                        continue;
 
                spa = dev->l2ad_spa;
-               ASSERT(spa != NULL);
+               ASSERT3P(spa, !=, NULL);
 
                /*
                 * If the pool is read-only then force the feed thread to
@@ -6220,7 +8735,7 @@ l2arc_feed_thread(void)
                /*
                 * Avoid contributing to memory pressure.
                 */
-               if (arc_no_grow) {
+               if (arc_reclaim_needed()) {
                        ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
                        spa_config_exit(spa, SCL_L2ARC, dev);
                        continue;
@@ -6238,7 +8753,7 @@ l2arc_feed_thread(void)
                /*
                 * Write ARC buffers.
                 */
-               wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
+               wrote = l2arc_write_buffers(spa, dev, size);
 
                /*
                 * Calculate interval between writes.
@@ -6290,7 +8805,6 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
        adddev->l2ad_start = VDEV_LABEL_START_SIZE;
        adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
        adddev->l2ad_hand = adddev->l2ad_start;
-       adddev->l2ad_evict = adddev->l2ad_start;
        adddev->l2ad_first = B_TRUE;
        adddev->l2ad_writing = B_FALSE;
        list_link_init(&adddev->l2ad_node);
@@ -6304,6 +8818,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
            offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
 
        vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
+       refcount_create(&adddev->l2ad_alloc);
 
        /*
         * Add device to global list
@@ -6333,7 +8848,7 @@ l2arc_remove_vdev(vdev_t *vd)
                        break;
                }
        }
-       ASSERT(remdev != NULL);
+       ASSERT3P(remdev, !=, NULL);
 
        /*
         * Remove device from global list
@@ -6349,6 +8864,7 @@ l2arc_remove_vdev(vdev_t *vd)
        l2arc_evict(remdev, 0, B_TRUE);
        list_destroy(&remdev->l2ad_buflist);
        mutex_destroy(&remdev->l2ad_mtx);
+       refcount_destroy(&remdev->l2ad_alloc);
        kmem_free(remdev, sizeof (l2arc_dev_t));
 }
 
@@ -6400,7 +8916,7 @@ l2arc_start(void)
                return;
 
        (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
-           TS_RUN, minclsyspri);
+           TS_RUN, defclsyspri);
 }
 
 void
@@ -6421,12 +8937,12 @@ l2arc_stop(void)
 EXPORT_SYMBOL(arc_buf_size);
 EXPORT_SYMBOL(arc_write);
 EXPORT_SYMBOL(arc_read);
-EXPORT_SYMBOL(arc_buf_remove_ref);
 EXPORT_SYMBOL(arc_buf_info);
 EXPORT_SYMBOL(arc_getbuf_func);
 EXPORT_SYMBOL(arc_add_prune_callback);
 EXPORT_SYMBOL(arc_remove_prune_callback);
 
+/* BEGIN CSTYLED */
 module_param(zfs_arc_min, ulong, 0644);
 MODULE_PARM_DESC(zfs_arc_min, "Min arc size");
 
@@ -6436,13 +8952,17 @@ MODULE_PARM_DESC(zfs_arc_max, "Max arc size");
 module_param(zfs_arc_meta_limit, ulong, 0644);
 MODULE_PARM_DESC(zfs_arc_meta_limit, "Meta limit for arc size");
 
+module_param(zfs_arc_meta_limit_percent, ulong, 0644);
+MODULE_PARM_DESC(zfs_arc_meta_limit_percent,
+       "Percent of arc size for arc meta limit");
+
 module_param(zfs_arc_meta_min, ulong, 0644);
 MODULE_PARM_DESC(zfs_arc_meta_min, "Min arc metadata");
 
 module_param(zfs_arc_meta_prune, int, 0644);
 MODULE_PARM_DESC(zfs_arc_meta_prune, "Meta objects to scan for prune");
 
-module_param(zfs_arc_meta_adjust_restarts, ulong, 0644);
+module_param(zfs_arc_meta_adjust_restarts, int, 0644);
 MODULE_PARM_DESC(zfs_arc_meta_adjust_restarts,
        "Limit number of restarts in arc_adjust_meta");
 
@@ -6461,22 +8981,22 @@ MODULE_PARM_DESC(zfs_arc_p_dampener_disable, "disable arc_p adapt dampener");
 module_param(zfs_arc_shrink_shift, int, 0644);
 MODULE_PARM_DESC(zfs_arc_shrink_shift, "log2(fraction of arc to reclaim)");
 
-module_param(zfs_disable_dup_eviction, int, 0644);
-MODULE_PARM_DESC(zfs_disable_dup_eviction, "disable duplicate buffer eviction");
+module_param(zfs_arc_pc_percent, uint, 0644);
+MODULE_PARM_DESC(zfs_arc_pc_percent,
+       "Percent of pagecache to reclaim arc to");
+
+module_param(zfs_arc_p_min_shift, int, 0644);
+MODULE_PARM_DESC(zfs_arc_p_min_shift, "arc_c shift to calc min/max arc_p");
 
 module_param(zfs_arc_average_blocksize, int, 0444);
 MODULE_PARM_DESC(zfs_arc_average_blocksize, "Target average block size");
 
-module_param(zfs_arc_memory_throttle_disable, int, 0644);
-MODULE_PARM_DESC(zfs_arc_memory_throttle_disable, "disable memory throttle");
+module_param(zfs_compressed_arc_enabled, int, 0644);
+MODULE_PARM_DESC(zfs_compressed_arc_enabled, "Disable compressed arc buffers");
 
 module_param(zfs_arc_min_prefetch_lifespan, int, 0644);
 MODULE_PARM_DESC(zfs_arc_min_prefetch_lifespan, "Min life of prefetch block");
 
-module_param(zfs_arc_num_sublists_per_state, int, 0644);
-MODULE_PARM_DESC(zfs_arc_num_sublists_per_state,
-       "Number of sublists used in each of the ARC state lists");
-
 module_param(l2arc_write_max, ulong, 0644);
 MODULE_PARM_DESC(l2arc_write_max, "Max write bytes per interval");
 
@@ -6498,13 +9018,28 @@ MODULE_PARM_DESC(l2arc_feed_min_ms, "Min feed interval in milliseconds");
 module_param(l2arc_noprefetch, int, 0644);
 MODULE_PARM_DESC(l2arc_noprefetch, "Skip caching prefetched buffers");
 
-module_param(l2arc_nocompress, int, 0644);
-MODULE_PARM_DESC(l2arc_nocompress, "Skip compressing L2ARC buffers");
-
 module_param(l2arc_feed_again, int, 0644);
 MODULE_PARM_DESC(l2arc_feed_again, "Turbo L2ARC warmup");
 
 module_param(l2arc_norw, int, 0644);
 MODULE_PARM_DESC(l2arc_norw, "No reads during writes");
 
+module_param(zfs_arc_lotsfree_percent, int, 0644);
+MODULE_PARM_DESC(zfs_arc_lotsfree_percent,
+       "System free memory I/O throttle in bytes");
+
+module_param(zfs_arc_sys_free, ulong, 0644);
+MODULE_PARM_DESC(zfs_arc_sys_free, "System free memory target size in bytes");
+
+module_param(zfs_arc_dnode_limit, ulong, 0644);
+MODULE_PARM_DESC(zfs_arc_dnode_limit, "Minimum bytes of dnodes in arc");
+
+module_param(zfs_arc_dnode_limit_percent, ulong, 0644);
+MODULE_PARM_DESC(zfs_arc_dnode_limit_percent,
+       "Percent of ARC meta buffers for dnodes");
+
+module_param(zfs_arc_dnode_reduce_percent, ulong, 0644);
+MODULE_PARM_DESC(zfs_arc_dnode_reduce_percent,
+       "Percentage of excess dnodes to try to unpin");
+/* END CSTYLED */
 #endif