]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/dbuf.c
cstyle: Resolve C style issues
[mirror_zfs.git] / module / zfs / dbuf.c
index 205abaada491ecdc1b9ef0e77d057573a19967ff..c8a52617178e573f12f4426b48f80dee302b45a1 100644 (file)
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  */
 
 #include <sys/zfs_context.h>
 #include <sys/arc.h>
 #include <sys/dmu.h>
+#include <sys/dmu_send.h>
 #include <sys/dmu_impl.h>
 #include <sys/dbuf.h>
 #include <sys/dmu_objset.h>
@@ -62,8 +64,14 @@ static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
     void *tag, dmu_buf_impl_t **dbp, int depth);
 static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
 
+/*
+ * Number of times that zfs_free_range() took the slow path while doing
+ * a zfs receive.  A nonzero value indicates a potential performance problem.
+ */
+uint64_t zfs_free_range_recv_miss;
+
 static void dbuf_destroy(dmu_buf_impl_t *db);
-static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
+static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
 
 /*
@@ -297,8 +305,10 @@ dbuf_init(void)
 retry:
        h->hash_table_mask = hsize - 1;
 #if defined(_KERNEL) && defined(HAVE_SPL)
-       /* Large allocations which do not require contiguous pages
-        * should be using vmem_alloc() in the linux kernel */
+       /*
+        * Large allocations which do not require contiguous pages
+        * should be using vmem_alloc() in the linux kernel
+        */
        h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
 #else
        h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
@@ -316,6 +326,8 @@ retry:
 
        for (i = 0; i < DBUF_MUTEXES; i++)
                mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
+
+       dbuf_stats_init(h);
 }
 
 void
@@ -324,11 +336,15 @@ dbuf_fini(void)
        dbuf_hash_table_t *h = &dbuf_hash_table;
        int i;
 
+       dbuf_stats_destroy();
+
        for (i = 0; i < DBUF_MUTEXES; i++)
                mutex_destroy(&h->hash_mutexes[i]);
 #if defined(_KERNEL) && defined(HAVE_SPL)
-       /* Large allocations which do not require contiguous pages
-        * should be using vmem_free() in the linux kernel */
+       /*
+        * Large allocations which do not require contiguous pages
+        * should be using vmem_free() in the linux kernel
+        */
        vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
 #else
        kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
@@ -373,7 +389,7 @@ dbuf_verify(dmu_buf_impl_t *db)
        } else if (db->db_blkid == DMU_SPILL_BLKID) {
                ASSERT(dn != NULL);
                ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
-               ASSERT3U(db->db.db_offset, ==, 0);
+               ASSERT0(db->db.db_offset);
        } else {
                ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
        }
@@ -545,7 +561,7 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
        } else {
                ASSERT(db->db_blkid != DMU_BONUS_BLKID);
                ASSERT3P(db->db_buf, ==, NULL);
-               VERIFY(arc_buf_remove_ref(buf, db) == 1);
+               VERIFY(arc_buf_remove_ref(buf, db));
                db->db_state = DB_UNCACHED;
        }
        cv_broadcast(&db->db_changed);
@@ -559,7 +575,6 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
        spa_t *spa;
        zbookmark_t zb;
        uint32_t aflags = ARC_NOWAIT;
-       arc_buf_t *pbuf;
 
        DB_DNODE_ENTER(db);
        dn = DB_DNODE(db);
@@ -615,20 +630,16 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
 
        if (DBUF_IS_L2CACHEABLE(db))
                aflags |= ARC_L2CACHE;
+       if (DBUF_IS_L2COMPRESSIBLE(db))
+               aflags |= ARC_L2COMPRESS;
 
        SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
            db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
            db->db.db_object, db->db_level, db->db_blkid);
 
        dbuf_add_ref(db, NULL);
-       /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
-
-       if (db->db_parent)
-               pbuf = db->db_parent->db_buf;
-       else
-               pbuf = db->db_objset->os_phys_buf;
 
-       (void) dsl_read(zio, spa, db->db_blkptr, pbuf,
+       (void) arc_read(zio, spa, db->db_blkptr,
            dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
            (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
            &aflags, &zb);
@@ -651,7 +662,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
        ASSERT(!refcount_is_zero(&db->db_holds));
 
        if (db->db_state == DB_NOFILL)
-               return (EIO);
+               return (SET_ERROR(EIO));
 
        DB_DNODE_ENTER(db);
        dn = DB_DNODE(db);
@@ -691,6 +702,14 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
                if (!havepzio)
                        err = zio_wait(zio);
        } else {
+               /*
+                * Another reader came in while the dbuf was in flight
+                * between UNCACHED and CACHED.  Either a writer will finish
+                * writing the buffer (sending the dbuf to CACHED) or the
+                * first reader's request will reach the read_done callback
+                * and send the dbuf to CACHED.  Otherwise, a failure
+                * occurred and the dbuf went to UNCACHED.
+                */
                mutex_exit(&db->db_mtx);
                if (prefetch)
                        dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
@@ -699,6 +718,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
                        rw_exit(&dn->dn_struct_rwlock);
                DB_DNODE_EXIT(db);
 
+               /* Skip the wait per the caller's request. */
                mutex_enter(&db->db_mtx);
                if ((flags & DB_RF_NEVERWAIT) == 0) {
                        while (db->db_state == DB_READ ||
@@ -708,7 +728,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
                                cv_wait(&db->db_changed, &db->db_mtx);
                        }
                        if (db->db_state == DB_UNCACHED)
-                               err = EIO;
+                               err = SET_ERROR(EIO);
                }
                mutex_exit(&db->db_mtx);
        }
@@ -814,13 +834,15 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
        ASSERT(db->db_data_pending != dr);
 
        /* free this block */
-       if (!BP_IS_HOLE(bp)) {
+       if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) {
                spa_t *spa;
 
                DB_GET_SPA(&spa, db);
                zio_free(spa, txg, bp);
        }
        dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
+       dr->dt.dl.dr_nopwrite = B_FALSE;
+
        /*
         * Release the already-written buffer, so we leave it in
         * a consistent dirty state.  Note that all callers are
@@ -835,9 +857,12 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
 /*
  * Evict (if its unreferenced) or clear (if its referenced) any level-0
  * data blocks in the free range, so that any future readers will find
- * empty blocks.  Also, if we happen accross any level-1 dbufs in the
+ * empty blocks.  Also, if we happen across any level-1 dbufs in the
  * range that have not already been marked dirty, mark them dirty so
  * they stay in memory.
+ *
+ * This is a no-op if the dataset is in the middle of an incremental
+ * receive; see comment below for details.
  */
 void
 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
@@ -853,8 +878,24 @@ dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
                last_l1 = end >> epbs;
        }
        dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
+
        mutex_enter(&dn->dn_dbufs_mtx);
-       for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
+       if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) {
+               /* There can't be any dbufs in this range; no need to search. */
+               mutex_exit(&dn->dn_dbufs_mtx);
+               return;
+       } else if (dmu_objset_is_receiving(dn->dn_objset)) {
+               /*
+                * If we are receiving, we expect there to be no dbufs in
+                * the range to be freed, because receive modifies each
+                * block at most once, and in offset order.  If this is
+                * not the case, it can lead to performance problems,
+                * so note that we unexpectedly took the slow path.
+                */
+               atomic_inc_64(&zfs_free_range_recv_miss);
+       }
+
+       for (db = list_head(&dn->dn_dbufs); db != NULL; db = db_next) {
                db_next = list_next(&dn->dn_dbufs, db);
                ASSERT(db->db_blkid != DMU_BONUS_BLKID);
 
@@ -879,10 +920,12 @@ dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
                        continue;
 
                /* found a level 0 buffer in the range */
-               if (dbuf_undirty(db, tx))
+               mutex_enter(&db->db_mtx);
+               if (dbuf_undirty(db, tx)) {
+                       /* mutex has been dropped and dbuf destroyed */
                        continue;
+               }
 
-               mutex_enter(&db->db_mtx);
                if (db->db_state == DB_UNCACHED ||
                    db->db_state == DB_NOFILL ||
                    db->db_state == DB_EVICTING) {
@@ -1009,7 +1052,7 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
 
        mutex_enter(&db->db_mtx);
        dbuf_set_data(db, buf);
-       VERIFY(arc_buf_remove_ref(obuf, db) == 1);
+       VERIFY(arc_buf_remove_ref(obuf, db));
        db->db.db_size = size;
 
        if (db->db_level == 0) {
@@ -1026,7 +1069,6 @@ void
 dbuf_release_bp(dmu_buf_impl_t *db)
 {
        objset_t *os;
-       zbookmark_t zb;
 
        DB_GET_OBJSET(&os, db);
        ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
@@ -1034,13 +1076,7 @@ dbuf_release_bp(dmu_buf_impl_t *db)
            list_link_active(&os->os_dsl_dataset->ds_synced_link));
        ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
 
-       zb.zb_objset = os->os_dsl_dataset ?
-           os->os_dsl_dataset->ds_object : 0;
-       zb.zb_object = db->db.db_object;
-       zb.zb_level = db->db_level;
-       zb.zb_blkid = db->db_blkid;
-       (void) arc_release_bp(db->db_buf, db,
-           db->db_blkptr, os->os_spa, &zb);
+       (void) arc_release(db->db_buf, db);
 }
 
 dbuf_dirty_record_t *
@@ -1206,6 +1242,8 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
                    sizeof (dbuf_dirty_record_t),
                    offsetof(dbuf_dirty_record_t, dr_dirty_node));
        }
+       if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
+               dr->dr_accounted = db->db.db_size;
        dr->dr_dbuf = db;
        dr->dr_txg = tx->tx_txg;
        dr->dr_next = *drp;
@@ -1289,7 +1327,10 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
                        dbuf_rele(parent, FTAG);
 
                mutex_enter(&db->db_mtx);
-               /*  possible race with dbuf_undirty() */
+               /*
+                * Since we've dropped the mutex, it's possible that
+                * dbuf_undirty() might have changed this out from under us.
+                */
                if (db->db_last_dirty == dr ||
                    dn->dn_object == DMU_META_DNODE_OBJECT) {
                        mutex_enter(&di->dt.di.dr_mtx);
@@ -1317,7 +1358,11 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
        return (dr);
 }
 
-static int
+/*
+ * Undirty a buffer in the transaction group referenced by the given
+ * transaction.  Return whether this evicted the dbuf.
+ */
+static boolean_t
 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
 {
        dnode_t *dn;
@@ -1326,18 +1371,17 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
 
        ASSERT(txg != 0);
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
+       ASSERT0(db->db_level);
+       ASSERT(MUTEX_HELD(&db->db_mtx));
 
-       mutex_enter(&db->db_mtx);
        /*
         * If this buffer is not dirty, we're done.
         */
        for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
                if (dr->dr_txg <= txg)
                        break;
-       if (dr == NULL || dr->dr_txg < txg) {
-               mutex_exit(&db->db_mtx);
-               return (0);
-       }
+       if (dr == NULL || dr->dr_txg < txg)
+               return (B_FALSE);
        ASSERT(dr->dr_txg == txg);
        ASSERT(dr->dr_dbuf == db);
 
@@ -1345,30 +1389,22 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
        dn = DB_DNODE(db);
 
        /*
-        * If this buffer is currently held, we cannot undirty
-        * it, since one of the current holders may be in the
-        * middle of an update.  Note that users of dbuf_undirty()
-        * should not place a hold on the dbuf before the call.
-        * Also note: we can get here with a spill block, so
-        * test for that similar to how dbuf_dirty does.
+        * Note:  This code will probably work even if there are concurrent
+        * holders, but it is untested in that scenerio, as the ZPL and
+        * ztest have additional locking (the range locks) that prevents
+        * that type of concurrent access.
         */
-       if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
-               mutex_exit(&db->db_mtx);
-               /* Make sure we don't toss this buffer at sync phase */
-               if (db->db_blkid != DMU_SPILL_BLKID) {
-                       mutex_enter(&dn->dn_mtx);
-                       dnode_clear_range(dn, db->db_blkid, 1, tx);
-                       mutex_exit(&dn->dn_mtx);
-               }
-               DB_DNODE_EXIT(db);
-               return (0);
-       }
+       ASSERT3U(refcount_count(&db->db_holds), ==, db->db_dirtycnt);
 
        dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
 
        ASSERT(db->db.db_size != 0);
 
-       /* XXX would be nice to fix up dn_towrite_space[] */
+       /*
+        * Any space we accounted for in dp_dirty_* will be cleaned up by
+        * dsl_pool_sync().  This is relatively rare so the discrepancy
+        * is not a big deal.
+        */
 
        *drp = dr->dr_next;
 
@@ -1391,21 +1427,13 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
        }
        DB_DNODE_EXIT(db);
 
-       if (db->db_level == 0) {
-               if (db->db_state != DB_NOFILL) {
-                       dbuf_unoverride(dr);
+       if (db->db_state != DB_NOFILL) {
+               dbuf_unoverride(dr);
 
-                       ASSERT(db->db_buf != NULL);
-                       ASSERT(dr->dt.dl.dr_data != NULL);
-                       if (dr->dt.dl.dr_data != db->db_buf)
-                               VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
-                                   db) == 1);
-               }
-       } else {
                ASSERT(db->db_buf != NULL);
-               ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
-               mutex_destroy(&dr->dt.di.dr_mtx);
-               list_destroy(&dr->dt.di.dr_children);
+               ASSERT(dr->dt.dl.dr_data != NULL);
+               if (dr->dt.dl.dr_data != db->db_buf)
+                       VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
        }
        kmem_free(dr, sizeof (dbuf_dirty_record_t));
 
@@ -1417,13 +1445,12 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
 
                ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
                dbuf_set_data(db, NULL);
-               VERIFY(arc_buf_remove_ref(buf, db) == 1);
+               VERIFY(arc_buf_remove_ref(buf, db));
                dbuf_evict(db);
-               return (1);
+               return (B_TRUE);
        }
 
-       mutex_exit(&db->db_mtx);
-       return (0);
+       return (B_FALSE);
 }
 
 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
@@ -1522,7 +1549,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
                mutex_exit(&db->db_mtx);
                (void) dbuf_dirty(db, tx);
                bcopy(buf->b_data, db->db.db_data, db->db.db_size);
-               VERIFY(arc_buf_remove_ref(buf, db) == 1);
+               VERIFY(arc_buf_remove_ref(buf, db));
                xuio_stat_wbuf_copied();
                return;
        }
@@ -1540,10 +1567,10 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
                                arc_release(db->db_buf, db);
                        }
                        dr->dt.dl.dr_data = buf;
-                       VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
+                       VERIFY(arc_buf_remove_ref(db->db_buf, db));
                } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
                        arc_release(db->db_buf, db);
-                       VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
+                       VERIFY(arc_buf_remove_ref(db->db_buf, db));
                }
                db->db_buf = NULL;
        }
@@ -1557,7 +1584,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
 
 /*
  * "Clear" the contents of this dbuf.  This will mark the dbuf
- * EVICTING and clear *most* of its references.  Unfortunetely,
+ * EVICTING and clear *most* of its references.  Unfortunately,
  * when we are not holding the dn_dbufs_mtx, we can't clear the
  * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
  * in this case.  For callers from the DMU we will usually see:
@@ -1670,15 +1697,14 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
        if (level >= nlevels ||
            (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
                /* the buffer has no parent yet */
-               return (ENOENT);
+               return (SET_ERROR(ENOENT));
        } else if (level < nlevels-1) {
                /* this block is referenced from an indirect block */
                int err;
                if (dh == NULL) {
                        err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
                                        fail_sparse, NULL, parentp);
-               }
-               else {
+               } else {
                        __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
                                        blkid >> epbs, fail_sparse, NULL,
                                        parentp, dh->dh_depth + 1);
@@ -1754,7 +1780,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
                db->db.db_offset = 0;
        } else {
                int blocksize =
-                   db->db_level ? 1<<dn->dn_indblkshift :  dn->dn_datablksz;
+                   db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
                db->db.db_size = blocksize;
                db->db.db_offset = db->db_blkid * blocksize;
        }
@@ -1775,6 +1801,9 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
                return (odb);
        }
        list_insert_head(&dn->dn_dbufs, db);
+       if (db->db_level == 0 && db->db_blkid >=
+           dn->dn_unlisted_l0_blkid)
+               dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
        db->db_state = DB_UNCACHED;
        mutex_exit(&dn->dn_dbufs_mtx);
        arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
@@ -1860,7 +1889,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
 }
 
 void
-dbuf_prefetch(dnode_t *dn, uint64_t blkid)
+dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio)
 {
        dmu_buf_impl_t *db = NULL;
        blkptr_t *bp = NULL;
@@ -1884,9 +1913,6 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
 
        if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) {
                if (bp && !BP_IS_HOLE(bp)) {
-                       int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
-                           ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
-                       arc_buf_t *pbuf;
                        dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
                        uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
                        zbookmark_t zb;
@@ -1894,13 +1920,8 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
                        SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
                            dn->dn_object, 0, blkid);
 
-                       if (db)
-                               pbuf = db->db_buf;
-                       else
-                               pbuf = dn->dn_objset->os_phys_buf;
-
-                       (void) dsl_read(NULL, dn->dn_objset->os_spa,
-                           bp, pbuf, NULL, NULL, priority,
+                       (void) arc_read(NULL, dn->dn_objset->os_spa,
+                           bp, NULL, NULL, prio,
                            ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
                            &aflags, &zb);
                }
@@ -1909,7 +1930,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
        }
 }
 
-#define DBUF_HOLD_IMPL_MAX_DEPTH       20
+#define        DBUF_HOLD_IMPL_MAX_DEPTH        20
 
 /*
  * Returns with db_holds incremented, and db_mtx not held.
@@ -1938,8 +1959,9 @@ top:
                                        dh->dh_fail_sparse, &dh->dh_parent,
                                        &dh->dh_bp, dh);
                if (dh->dh_fail_sparse) {
-                       if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
-                               dh->dh_err = ENOENT;
+                       if (dh->dh_err == 0 &&
+                           dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
+                               dh->dh_err = SET_ERROR(ENOENT);
                        if (dh->dh_err) {
                                if (dh->dh_parent)
                                        dbuf_rele(dh->dh_parent, NULL);
@@ -2019,13 +2041,13 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
        struct dbuf_hold_impl_data *dh;
        int error;
 
-       dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
+       dh = kmem_zalloc(sizeof (struct dbuf_hold_impl_data) *
            DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
        __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
 
        error = __dbuf_hold_impl(dh);
 
-       kmem_free(dh, sizeof(struct dbuf_hold_impl_data) *
+       kmem_free(dh, sizeof (struct dbuf_hold_impl_data) *
            DBUF_HOLD_IMPL_MAX_DEPTH);
 
        return (error);
@@ -2077,7 +2099,7 @@ dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
        dnode_t *dn;
 
        if (db->db_blkid != DMU_SPILL_BLKID)
-               return (ENOTSUP);
+               return (SET_ERROR(ENOTSUP));
        if (blksz == 0)
                blksz = SPA_MINBLOCKSIZE;
        if (blksz > SPA_MAXBLOCKSIZE)
@@ -2185,10 +2207,10 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
                         * This dbuf has anonymous data associated with it.
                         */
                        dbuf_set_data(db, NULL);
-                       VERIFY(arc_buf_remove_ref(buf, db) == 1);
+                       VERIFY(arc_buf_remove_ref(buf, db));
                        dbuf_evict(db);
                } else {
-                       VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
+                       VERIFY(!arc_buf_remove_ref(db->db_buf, db));
 
                        /*
                         * A dbuf will be eligible for eviction if either the
@@ -2289,6 +2311,13 @@ dmu_buf_freeable(dmu_buf_t *dbuf)
        return (res);
 }
 
+blkptr_t *
+dmu_buf_get_blkptr(dmu_buf_t *db)
+{
+       dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
+       return (dbi->db_blkptr);
+}
+
 static void
 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
 {
@@ -2334,7 +2363,8 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
        }
 }
 
-/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
+/*
+ * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
  * is critical the we not allow the compiler to inline this function in to
  * dbuf_sync_list() thereby drastically bloating the stack usage.
  */
@@ -2354,6 +2384,7 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
        ASSERT(db->db_level > 0);
        DBUF_VERIFY(db);
 
+       /* Read the block if it hasn't been read yet. */
        if (db->db_buf == NULL) {
                mutex_exit(&db->db_mtx);
                (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
@@ -2364,10 +2395,12 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
 
        DB_DNODE_ENTER(db);
        dn = DB_DNODE(db);
+       /* Indirect block size must match what the dnode thinks it is. */
        ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
        dbuf_check_blkptr(dn, db);
        DB_DNODE_EXIT(db);
 
+       /* Provide the pending dirty record to child dbufs */
        db->db_data_pending = dr;
 
        mutex_exit(&db->db_mtx);
@@ -2381,7 +2414,8 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
        zio_nowait(zio);
 }
 
-/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
+/*
+ * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
  * critical the we not allow the compiler to inline this function in to
  * dbuf_sync_list() thereby drastically bloating the stack usage.
  */
@@ -2433,7 +2467,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
                dbuf_dirty_record_t **drp;
 
                ASSERT(*datap != NULL);
-               ASSERT3U(db->db_level, ==, 0);
+               ASSERT0(db->db_level);
                ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
                bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
                DB_DNODE_EXIT(db);
@@ -2626,6 +2660,38 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
        mutex_exit(&db->db_mtx);
 }
 
+/*
+ * The SPA will call this callback several times for each zio - once
+ * for every physical child i/o (zio->io_phys_children times).  This
+ * allows the DMU to monitor the progress of each logical i/o.  For example,
+ * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
+ * block.  There may be a long delay before all copies/fragments are completed,
+ * so this callback allows us to retire dirty space gradually, as the physical
+ * i/os complete.
+ */
+/* ARGSUSED */
+static void
+dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
+{
+       dmu_buf_impl_t *db = arg;
+       objset_t *os = db->db_objset;
+       dsl_pool_t *dp = dmu_objset_pool(os);
+       dbuf_dirty_record_t *dr;
+       int delta = 0;
+
+       dr = db->db_data_pending;
+       ASSERT3U(dr->dr_txg, ==, zio->io_txg);
+
+       /*
+        * The callback will be called io_phys_children times.  Retire one
+        * portion of our dirty space each time we are called.  Any rounding
+        * error will be cleaned up by dsl_pool_sync()'s call to
+        * dsl_pool_undirty_space().
+        */
+       delta = dr->dr_accounted / zio->io_phys_children;
+       dsl_pool_undirty_space(dp, delta, zio->io_txg);
+}
+
 /* ARGSUSED */
 static void
 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
@@ -2636,10 +2702,14 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
        uint64_t txg = zio->io_txg;
        dbuf_dirty_record_t **drp, *dr;
 
-       ASSERT3U(zio->io_error, ==, 0);
+       ASSERT0(zio->io_error);
        ASSERT(db->db_blkptr == bp);
 
-       if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
+       /*
+        * For nopwrites and rewrites we ensure that the bp matches our
+        * original and bypass all the accounting.
+        */
+       if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
                ASSERT(BP_EQUAL(bp, bp_orig));
        } else {
                objset_t *os;
@@ -2686,7 +2756,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
                if (db->db_state != DB_NOFILL) {
                        if (dr->dt.dl.dr_data != db->db_buf)
                                VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
-                                   db) == 1);
+                                   db));
                        else if (!arc_released(db->db_buf))
                                arc_set_callback(db->db_buf, dbuf_do_evict, db);
                }
@@ -2716,6 +2786,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
        ASSERT(db->db_dirtycnt > 0);
        db->db_dirtycnt -= 1;
        db->db_data_pending = NULL;
+
        dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
 }
 
@@ -2758,6 +2829,7 @@ dbuf_write_override_done(zio_t *zio)
        dbuf_write_done(zio, NULL, db);
 }
 
+/* Issue I/O to commit a dirty buffer to disk. */
 static void
 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
 {
@@ -2792,11 +2864,19 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
        }
 
        if (parent != dn->dn_dbuf) {
+               /* Our parent is an indirect block. */
+               /* We have a dirty parent that has been scheduled for write. */
                ASSERT(parent && parent->db_data_pending);
+               /* Our parent's buffer is one level closer to the dnode. */
                ASSERT(db->db_level == parent->db_level-1);
+               /*
+                * We're about to modify our parent's db_data by modifying
+                * our block pointer, so the parent must be released.
+                */
                ASSERT(arc_released(parent->db_buf));
                zio = parent->db_data_pending->dr_zio;
        } else {
+               /* Our parent is the dnode itself. */
                ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
                    db->db_blkid != DMU_SPILL_BLKID) ||
                    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
@@ -2825,25 +2905,26 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
                ASSERT(db->db_state != DB_NOFILL);
                dr->dr_zio = zio_write(zio, os->os_spa, txg,
                    db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
-                   dbuf_write_override_ready, dbuf_write_override_done, dr,
-                   ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
+                   dbuf_write_override_ready, NULL, dbuf_write_override_done,
+                   dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
                mutex_enter(&db->db_mtx);
                dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
                zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
-                   dr->dt.dl.dr_copies);
+                   dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
                mutex_exit(&db->db_mtx);
        } else if (db->db_state == DB_NOFILL) {
                ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
                dr->dr_zio = zio_write(zio, os->os_spa, txg,
                    db->db_blkptr, NULL, db->db.db_size, &zp,
-                   dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
+                   dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
                    ZIO_PRIORITY_ASYNC_WRITE,
                    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
        } else {
                ASSERT(arc_released(data));
                dr->dr_zio = arc_write(zio, os->os_spa, txg,
-                   db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
-                   dbuf_write_ready, dbuf_write_done, db,
+                   db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
+                   DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
+                   dbuf_write_physdone, dbuf_write_done, db,
                    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
        }
 }