* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
+ * Copyright (c) 2019 Datto Inc.
*/
#include <sys/dmu.h>
int zfs_nopwrite_enabled = 1;
/*
- * Tunable to control percentage of dirtied blocks from frees in one TXG.
- * After this threshold is crossed, additional dirty blocks from frees
- * wait until the next TXG.
+ * Tunable to control percentage of dirtied L1 blocks from frees allowed into
+ * one TXG. After this threshold is crossed, additional dirty blocks from frees
+ * will wait until the next TXG.
* A value of zero will disable this throttle.
*/
-unsigned long zfs_per_txg_dirty_frees_percent = 30;
+unsigned long zfs_per_txg_dirty_frees_percent = 5;
/*
* Enable/disable forcing txg sync when dirty in dmu_offset_next.
*/
int zfs_dmu_offset_next_sync = 0;
+/*
+ * This can be used for testing, to ensure that certain actions happen
+ * while in the middle of a remap (which might otherwise complete too
+ * quickly). Used by ztest(8).
+ */
+int zfs_object_remap_one_indirect_delay_ms = 0;
+
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
- { DMU_BSWAP_UINT8, TRUE, FALSE, "unallocated" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "object directory" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "object array" },
- { DMU_BSWAP_UINT8, TRUE, FALSE, "packed nvlist" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "packed nvlist size" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj header" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "SPA space map header" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "SPA space map" },
- { DMU_BSWAP_UINT64, TRUE, TRUE, "ZIL intent log" },
- { DMU_BSWAP_DNODE, TRUE, TRUE, "DMU dnode" },
- { DMU_BSWAP_OBJSET, TRUE, FALSE, "DMU objset" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "DSL directory" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DSL directory child map"},
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DSL dataset snap map" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DSL props" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "DSL dataset" },
- { DMU_BSWAP_ZNODE, TRUE, FALSE, "ZFS znode" },
- { DMU_BSWAP_OLDACL, TRUE, TRUE, "ZFS V0 ACL" },
- { DMU_BSWAP_UINT8, FALSE, TRUE, "ZFS plain file" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS directory" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "ZFS master node" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS delete queue" },
- { DMU_BSWAP_UINT8, FALSE, TRUE, "zvol object" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "zvol prop" },
- { DMU_BSWAP_UINT8, FALSE, TRUE, "other uint8[]" },
- { DMU_BSWAP_UINT64, FALSE, TRUE, "other uint64[]" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "other ZAP" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "persistent error log" },
- { DMU_BSWAP_UINT8, TRUE, FALSE, "SPA history" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "SPA history offsets" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "Pool properties" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DSL permissions" },
- { DMU_BSWAP_ACL, TRUE, TRUE, "ZFS ACL" },
- { DMU_BSWAP_UINT8, TRUE, TRUE, "ZFS SYSACL" },
- { DMU_BSWAP_UINT8, TRUE, TRUE, "FUID table" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "FUID table size" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DSL dataset next clones"},
- { DMU_BSWAP_ZAP, TRUE, FALSE, "scan work queue" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group/project used" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group/project quota"},
- { DMU_BSWAP_ZAP, TRUE, FALSE, "snapshot refcount tags"},
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DDT ZAP algorithm" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DDT statistics" },
- { DMU_BSWAP_UINT8, TRUE, TRUE, "System attributes" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "SA master node" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "SA attr registration" },
- { DMU_BSWAP_ZAP, TRUE, TRUE, "SA attr layouts" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "scan translations" },
- { DMU_BSWAP_UINT8, FALSE, TRUE, "deduplicated block" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DSL deadlist map" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "DSL deadlist map hdr" },
- { DMU_BSWAP_ZAP, TRUE, FALSE, "DSL dir clones" },
- { DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj subobj" }
+ {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
+ {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" },
+ {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" },
+ {DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" },
+ {DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" },
+ {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"},
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" },
+ {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" },
+ {DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" },
+ {DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" },
+ {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" },
+ {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" },
+ {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" },
+ {DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" },
+ {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" },
+ {DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" },
+ {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" },
+ {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"},
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"},
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"},
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" },
+ {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" },
+ {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" },
+ {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" },
+ {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" },
+ {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" },
+ {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" }
};
const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
}
/*
- * returns ENOENT, EIO, or 0.
+ * Lookup and hold the bonus buffer for the provided dnode. If the dnode
+ * has not yet been allocated a new bonus dbuf a will be allocated.
+ * Returns ENOENT, EIO, or 0.
*/
-int
-dmu_bonus_hold_impl(objset_t *os, uint64_t object, void *tag, uint32_t flags,
- dmu_buf_t **dbp)
+int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp,
+ uint32_t flags)
{
- dnode_t *dn;
dmu_buf_impl_t *db;
int error;
uint32_t db_flags = DB_RF_MUST_SUCCEED;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
- error = dnode_hold(os, object, FTAG, &dn);
- if (error)
- return (error);
-
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus == NULL) {
rw_exit(&dn->dn_struct_rwlock);
db = dn->dn_bonus;
/* as long as the bonus buf is held, the dnode will be held */
- if (refcount_add(&db->db_holds, tag) == 1) {
+ if (zfs_refcount_add(&db->db_holds, tag) == 1) {
VERIFY(dnode_add_ref(dn, db));
atomic_inc_32(&dn->dn_dbufs_count);
}
*/
rw_exit(&dn->dn_struct_rwlock);
- dnode_rele(dn, FTAG);
-
error = dbuf_read(db, NULL, db_flags);
if (error) {
dnode_evict_bonus(dn);
}
int
-dmu_bonus_hold(objset_t *os, uint64_t obj, void *tag, dmu_buf_t **dbp)
+dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
{
- return (dmu_bonus_hold_impl(os, obj, tag, DMU_READ_NO_PREFETCH, dbp));
+ dnode_t *dn;
+ int error;
+
+ error = dnode_hold(os, object, FTAG, &dn);
+ if (error)
+ return (error);
+
+ error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
+ dnode_rele(dn, FTAG);
+
+ return (error);
}
/*
}
int
-dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
+dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
+ dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
+ uint32_t db_flags = DB_RF_CANFAIL;
+
+ if (flags & DMU_READ_NO_DECRYPT)
+ db_flags |= DB_RF_NO_DECRYPT;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
- err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
+ err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
DB_DNODE_EXIT(db);
return (err);
*
* On input, *start should be the first offset that does not need to be
* freed (e.g. "offset + length"). On return, *start will be the first
- * offset that should be freed.
+ * offset that should be freed and l1blks is set to the number of level 1
+ * indirect blocks found within the chunk.
*/
static int
-get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
+get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
{
+ uint64_t blks;
uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
/* bytes of data covered by a level-1 indirect block */
uint64_t iblkrange =
if (*start - minimum <= iblkrange * maxblks) {
*start = minimum;
+ /*
+ * Assume full L1 blocks and 128k recordsize to approximate the
+ * expected number of L1 blocks in this chunk
+ */
+ *l1blks = minimum / (1024 * 128 * 1024);
return (0);
}
ASSERT(ISP2(iblkrange));
- for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
+ for (blks = 0; *start > minimum && blks < maxblks; blks++) {
int err;
/*
*start = minimum;
break;
} else if (err != 0) {
+ *l1blks = blks;
return (err);
}
}
if (*start < minimum)
*start = minimum;
+ *l1blks = blks;
return (0);
}
static int
dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
- uint64_t length, boolean_t raw)
+ uint64_t length)
{
uint64_t object_size;
int err;
dirty_frees_threshold =
zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
else
- dirty_frees_threshold = zfs_dirty_data_max / 4;
+ dirty_frees_threshold = zfs_dirty_data_max / 20;
if (length == DMU_OBJECT_END || offset + length > object_size)
length = object_size - offset;
while (length != 0) {
uint64_t chunk_end, chunk_begin, chunk_len;
uint64_t long_free_dirty_all_txgs = 0;
+ uint64_t l1blks;
dmu_tx_t *tx;
if (dmu_objset_zfs_unmounting(dn->dn_objset))
chunk_end = chunk_begin = offset + length;
/* move chunk_begin backwards to the beginning of this chunk */
- err = get_next_chunk(dn, &chunk_begin, offset);
+ err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
if (err)
return (err);
ASSERT3U(chunk_begin, >=, offset);
*/
if (dirty_frees_threshold != 0 &&
long_free_dirty_all_txgs >= dirty_frees_threshold) {
+ DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
txg_wait_open(dp, 0);
continue;
}
return (err);
}
+ /*
+ * In order to prevent unnecessary write throttling, for each
+ * TXG, we track the cumulative size of L1 blocks being dirtied
+ * in dnode_free_range() below. We compare this number to a
+ * tunable threshold, past which we prevent new L1 dirty freeing
+ * blocks from being added into the open TXG. See
+ * dmu_free_long_range_impl() for details. The threshold
+ * prevents write throttle activation due to dirty freeing L1
+ * blocks taking up a large percentage of zfs_dirty_data_max.
+ */
mutex_enter(&dp->dp_lock);
dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] +=
- chunk_len;
+ l1blks << dn->dn_indblkshift;
mutex_exit(&dp->dp_lock);
DTRACE_PROBE3(free__long__range,
uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len,
uint64_t, dmu_tx_get_txg(tx));
dnode_free_range(dn, chunk_begin, chunk_len, tx);
- /* if this is a raw free, mark the dirty record as such */
- if (raw) {
- dbuf_dirty_record_t *dr = dn->dn_dbuf->db_last_dirty;
-
- while (dr != NULL && dr->dr_txg > tx->tx_txg)
- dr = dr->dr_next;
- if (dr != NULL && dr->dr_txg == tx->tx_txg) {
- dr->dt.dl.dr_raw = B_TRUE;
- dn->dn_objset->os_next_write_raw
- [tx->tx_txg & TXG_MASK] = B_TRUE;
- }
- }
-
dmu_tx_commit(tx);
length -= chunk_len;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
- err = dmu_free_long_range_impl(os, dn, offset, length, B_FALSE);
+ err = dmu_free_long_range_impl(os, dn, offset, length);
/*
* It is important to zero out the maxblkid when freeing the entire
return (err);
}
-/*
- * This function is equivalent to dmu_free_long_range(), but also
- * marks the new dirty record as a raw write.
- */
int
-dmu_free_long_range_raw(objset_t *os, uint64_t object,
- uint64_t offset, uint64_t length)
-{
- dnode_t *dn;
- int err;
-
- err = dnode_hold(os, object, FTAG, &dn);
- if (err != 0)
- return (err);
- err = dmu_free_long_range_impl(os, dn, offset, length, B_TRUE);
-
- /*
- * It is important to zero out the maxblkid when freeing the entire
- * file, so that (a) subsequent calls to dmu_free_long_range_impl()
- * will take the fast path, and (b) dnode_reallocate() can verify
- * that the entire file has been freed.
- */
- if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
- dn->dn_maxblkid = 0;
-
- dnode_rele(dn, FTAG);
- return (err);
-}
-
-static int
-dmu_free_long_object_impl(objset_t *os, uint64_t object, boolean_t raw)
+dmu_free_long_object(objset_t *os, uint64_t object)
{
dmu_tx_t *tx;
int err;
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err == 0) {
- if (raw)
- err = dmu_object_dirty_raw(os, object, tx);
if (err == 0)
err = dmu_object_free(os, object, tx);
return (err);
}
-int
-dmu_free_long_object(objset_t *os, uint64_t object)
-{
- return (dmu_free_long_object_impl(os, object, B_FALSE));
-}
-
-int
-dmu_free_long_object_raw(objset_t *os, uint64_t object)
-{
- return (dmu_free_long_object_impl(os, object, B_TRUE));
-}
-
-
int
dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
+static int
+dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
+ uint64_t last_removal_txg, uint64_t offset)
+{
+ uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
+ dnode_t *dn_tx;
+ int err = 0;
+
+ rw_enter(&dn->dn_struct_rwlock, RW_READER);
+ dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
+ ASSERT3P(dbuf, !=, NULL);
+
+ /*
+ * If the block hasn't been written yet, this default will ensure
+ * we don't try to remap it.
+ */
+ uint64_t birth = UINT64_MAX;
+ ASSERT3U(last_removal_txg, !=, UINT64_MAX);
+ if (dbuf->db_blkptr != NULL)
+ birth = dbuf->db_blkptr->blk_birth;
+ rw_exit(&dn->dn_struct_rwlock);
+
+ /*
+ * If this L1 was already written after the last removal, then we've
+ * already tried to remap it. An additional hold is taken after the
+ * dmu_tx_assign() to handle the case where the dnode is freed while
+ * waiting for the next open txg.
+ */
+ if (birth <= last_removal_txg &&
+ dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
+ dbuf_can_remap(dbuf)) {
+ dmu_tx_t *tx = dmu_tx_create(os);
+ dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err == 0) {
+ err = dnode_hold(os, dn->dn_object, FTAG, &dn_tx);
+ if (err == 0) {
+ (void) dbuf_dirty(dbuf, tx);
+ dnode_rele(dn_tx, FTAG);
+ }
+ dmu_tx_commit(tx);
+ } else {
+ dmu_tx_abort(tx);
+ }
+ }
+
+ dbuf_rele(dbuf, FTAG);
+
+ delay(MSEC_TO_TICK(zfs_object_remap_one_indirect_delay_ms));
+
+ return (err);
+}
+
+/*
+ * Remap all blockpointers in the object, if possible, so that they reference
+ * only concrete vdevs.
+ *
+ * To do this, iterate over the L0 blockpointers and remap any that reference
+ * an indirect vdev. Note that we only examine L0 blockpointers; since we
+ * cannot guarantee that we can remap all blockpointer anyways (due to split
+ * blocks), we do not want to make the code unnecessarily complicated to
+ * catch the unlikely case that there is an L1 block on an indirect vdev that
+ * contains no indirect blockpointers.
+ */
+int
+dmu_object_remap_indirects(objset_t *os, uint64_t object,
+ uint64_t last_removal_txg)
+{
+ uint64_t offset, l1span;
+ int err;
+ dnode_t *dn, *dn_tx;
+
+ err = dnode_hold(os, object, FTAG, &dn);
+ if (err != 0) {
+ return (err);
+ }
+
+ if (dn->dn_nlevels <= 1) {
+ if (issig(JUSTLOOKING) && issig(FORREAL)) {
+ err = SET_ERROR(EINTR);
+ }
+
+ /*
+ * If the dnode has no indirect blocks, we cannot dirty them.
+ * We still want to remap the blkptr(s) in the dnode if
+ * appropriate, so mark it as dirty. An additional hold is
+ * taken after the dmu_tx_assign() to handle the case where
+ * the dnode is freed while waiting for the next open txg.
+ */
+ if (err == 0 && dnode_needs_remap(dn)) {
+ dmu_tx_t *tx = dmu_tx_create(os);
+ dmu_tx_hold_bonus(tx, object);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err == 0) {
+ err = dnode_hold(os, object, FTAG, &dn_tx);
+ if (err == 0) {
+ dnode_setdirty(dn_tx, tx);
+ dnode_rele(dn_tx, FTAG);
+ }
+ dmu_tx_commit(tx);
+ } else {
+ dmu_tx_abort(tx);
+ }
+ }
+
+ dnode_rele(dn, FTAG);
+ return (err);
+ }
+
+ offset = 0;
+ l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
+ dn->dn_datablkshift);
+ /*
+ * Find the next L1 indirect that is not a hole.
+ */
+ while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
+ if (issig(JUSTLOOKING) && issig(FORREAL)) {
+ err = SET_ERROR(EINTR);
+ break;
+ }
+ if ((err = dmu_object_remap_one_indirect(os, dn,
+ last_removal_txg, offset)) != 0) {
+ break;
+ }
+ offset += l1span;
+ }
+
+ dnode_rele(dn, FTAG);
+ return (err);
+}
+
void
dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
arc_buf_destroy(buf, FTAG);
}
-void
-dmu_convert_to_raw(dmu_buf_t *handle, boolean_t byteorder, const uint8_t *salt,
- const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
-{
- dmu_object_type_t type;
- dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
- uint64_t dsobj = dmu_objset_id(db->db_objset);
-
- ASSERT3P(db->db_buf, !=, NULL);
- ASSERT3U(dsobj, !=, 0);
-
- dmu_buf_will_change_crypt_params(handle, tx);
-
- DB_DNODE_ENTER(db);
- type = DB_DNODE(db)->dn_type;
- DB_DNODE_EXIT(db);
-
- /*
- * This technically violates the assumption the dmu code makes
- * that dnode blocks are only released in syncing context.
- */
- (void) arc_release(db->db_buf, db);
- arc_convert_to_raw(db->db_buf, dsobj, byteorder, type, salt, iv, mac);
-}
-
void
dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset,
dmu_buf_t *handle, dmu_tx_t *tx)
dmu_buf_t *dst_handle;
dmu_buf_impl_t *dstdb;
dmu_buf_impl_t *srcdb = (dmu_buf_impl_t *)handle;
+ dmu_object_type_t type;
arc_buf_t *abuf;
uint64_t datalen;
boolean_t byteorder;
dstdb = (dmu_buf_impl_t *)dst_handle;
datalen = arc_buf_size(srcdb->db_buf);
+ DB_DNODE_ENTER(dstdb);
+ type = DB_DNODE(dstdb)->dn_type;
+ DB_DNODE_EXIT(dstdb);
+
/* allocated an arc buffer that matches the type of srcdb->db_buf */
if (arc_is_encrypted(srcdb->db_buf)) {
arc_get_raw_params(srcdb->db_buf, &byteorder, salt, iv, mac);
abuf = arc_loan_raw_buf(os->os_spa, dmu_objset_id(os),
- byteorder, salt, iv, mac, DB_DNODE(dstdb)->dn_type,
+ byteorder, salt, iv, mac, type,
datalen, arc_buf_lsize(srcdb->db_buf),
arc_get_compression(srcdb->db_buf));
} else {
ASSERT3U(arc_get_compression(srcdb->db_buf),
==, ZIO_COMPRESS_OFF);
abuf = arc_loan_buf(os->os_spa,
- DMU_OT_IS_METADATA(DB_DNODE(dstdb)->dn_type), datalen);
+ DMU_OT_IS_METADATA(type), datalen);
}
ASSERT3U(datalen, ==, arc_buf_size(abuf));
* If this is not possible copy the contents of passed arc buf via
* dmu_write().
*/
-void
+int
dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
- VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
+ db = dbuf_hold(dn, blkid, FTAG);
+ if (db == NULL)
+ return (SET_ERROR(EIO));
rw_exit(&dn->dn_struct_rwlock);
/*
dmu_return_arcbuf(buf);
XUIOSTAT_BUMP(xuiostat_wbuf_copied);
}
+
+ return (0);
}
-void
+int
dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
+ int err;
dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
DB_DNODE_ENTER(dbuf);
- dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
+ err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
DB_DNODE_EXIT(dbuf);
+
+ return (err);
}
typedef struct {
dmu_sync_arg_t *dsa = varg;
dbuf_dirty_record_t *dr = dsa->dsa_dr;
dmu_buf_impl_t *db = dr->dr_dbuf;
+ zgd_t *zgd = dsa->dsa_zgd;
+
+ /*
+ * Record the vdev(s) backing this blkptr so they can be flushed after
+ * the writes for the lwb have completed.
+ */
+ if (zio->io_error == 0) {
+ zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
+ }
mutex_enter(&db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
{
blkptr_t *bp = zio->io_bp;
dmu_sync_arg_t *dsa = zio->io_private;
- ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig);
-
- if (zio->io_error == 0 && !BP_IS_HOLE(bp)) {
- ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
- ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
- ASSERT(zio->io_bp->blk_birth == zio->io_txg);
- ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
- zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
+ zgd_t *zgd = dsa->dsa_zgd;
+
+ if (zio->io_error == 0) {
+ /*
+ * Record the vdev(s) backing this blkptr so they can be
+ * flushed after the writes for the lwb have completed.
+ */
+ zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
+
+ if (!BP_IS_HOLE(bp)) {
+ ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig);
+ ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
+ ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
+ ASSERT(zio->io_bp->blk_birth == zio->io_txg);
+ ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
+ zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
+ }
}
dmu_tx_commit(dsa->dsa_tx);
ASSERT(pio != NULL);
ASSERT(txg != 0);
- /* dbuf is within the locked range */
- ASSERT3U(db->db.db_offset, >=, zgd->zgd_rl->r_off);
- ASSERT3U(db->db.db_offset + db->db.db_size, <=,
- zgd->zgd_rl->r_off + zgd->zgd_rl->r_len);
-
SET_BOOKMARK(&zb, ds->ds_object,
db->db.db_object, db->db_level, db->db_blkid);
dnode_rele(dn, FTAG);
}
-/*
- * Dirty an object and set the dirty record's raw flag. This is used
- * when writing raw data to an object that will not effect the
- * encryption parameters, specifically during raw receives.
- */
-int
-dmu_object_dirty_raw(objset_t *os, uint64_t object, dmu_tx_t *tx)
-{
- dnode_t *dn;
- int err;
-
- err = dnode_hold(os, object, FTAG, &dn);
- if (err)
- return (err);
- dmu_buf_will_change_crypt_params((dmu_buf_t *)dn->dn_dbuf, tx);
- dnode_rele(dn, FTAG);
- return (err);
-}
-
/*
* When the "redundant_metadata" property is set to "most", only indirect
* blocks of this level and higher will have an additional ditto block.
bzero(zp->zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp->zp_iv, ZIO_DATA_IV_LEN);
bzero(zp->zp_mac, ZIO_DATA_MAC_LEN);
+ zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
+ os->os_zpl_special_smallblock : 0;
ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
}
* Check if dnode is dirty
*/
for (i = 0; i < TXG_SIZE; i++) {
- if (list_link_active(&dn->dn_dirty_link[i])) {
+ if (multilist_link_active(&dn->dn_dirty_link[i])) {
clean = B_FALSE;
break;
}
abd_fini();
}
-#if defined(_KERNEL) && defined(HAVE_SPL)
+#if defined(_KERNEL)
EXPORT_SYMBOL(dmu_bonus_hold);
+EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
EXPORT_SYMBOL(dmu_buf_rele_array);
EXPORT_SYMBOL(dmu_prefetch);
EXPORT_SYMBOL(dmu_free_range);
EXPORT_SYMBOL(dmu_free_long_range);
-EXPORT_SYMBOL(dmu_free_long_range_raw);
EXPORT_SYMBOL(dmu_free_long_object);
-EXPORT_SYMBOL(dmu_free_long_object_raw);
EXPORT_SYMBOL(dmu_read);
EXPORT_SYMBOL(dmu_read_by_dnode);
EXPORT_SYMBOL(dmu_write);