]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dmu_tx.c
Implement large_dnode pool feature
[mirror_zfs.git] / module / zfs / dmu_tx.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
22cd4a46 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
4bda3bd0 24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
22cd4a46 25 */
34dc7c2f 26
34dc7c2f
BB
27#include <sys/dmu.h>
28#include <sys/dmu_impl.h>
29#include <sys/dbuf.h>
30#include <sys/dmu_tx.h>
31#include <sys/dmu_objset.h>
32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34#include <sys/dsl_pool.h>
35#include <sys/zap_impl.h> /* for fzap_default_block_shift */
36#include <sys/spa.h>
428870ff
BB
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
34dc7c2f 39#include <sys/zfs_context.h>
428870ff 40#include <sys/varargs.h>
49ee64e5 41#include <sys/trace_dmu.h>
34dc7c2f
BB
42
43typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
44 uint64_t arg1, uint64_t arg2);
45
570827e1
BB
46dmu_tx_stats_t dmu_tx_stats = {
47 { "dmu_tx_assigned", KSTAT_DATA_UINT64 },
48 { "dmu_tx_delay", KSTAT_DATA_UINT64 },
49 { "dmu_tx_error", KSTAT_DATA_UINT64 },
50 { "dmu_tx_suspended", KSTAT_DATA_UINT64 },
51 { "dmu_tx_group", KSTAT_DATA_UINT64 },
570827e1
BB
52 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
53 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
570827e1 54 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
e8b96c60
MA
55 { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 },
56 { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 },
570827e1
BB
57 { "dmu_tx_quota", KSTAT_DATA_UINT64 },
58};
59
60static kstat_t *dmu_tx_ksp;
34dc7c2f
BB
61
62dmu_tx_t *
63dmu_tx_create_dd(dsl_dir_t *dd)
64{
79c76d5b 65 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
34dc7c2f 66 tx->tx_dir = dd;
6f1ffb06 67 if (dd != NULL)
34dc7c2f
BB
68 tx->tx_pool = dd->dd_pool;
69 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
70 offsetof(dmu_tx_hold_t, txh_node));
428870ff
BB
71 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
72 offsetof(dmu_tx_callback_t, dcb_node));
e8b96c60 73 tx->tx_start = gethrtime();
1c5de20a 74#ifdef DEBUG_DMU_TX
34dc7c2f
BB
75 refcount_create(&tx->tx_space_written);
76 refcount_create(&tx->tx_space_freed);
77#endif
78 return (tx);
79}
80
81dmu_tx_t *
82dmu_tx_create(objset_t *os)
83{
428870ff 84 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
34dc7c2f 85 tx->tx_objset = os;
428870ff 86 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
34dc7c2f
BB
87 return (tx);
88}
89
90dmu_tx_t *
91dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
92{
93 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
94
95 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
96 tx->tx_pool = dp;
97 tx->tx_txg = txg;
98 tx->tx_anyobj = TRUE;
99
100 return (tx);
101}
102
103int
104dmu_tx_is_syncing(dmu_tx_t *tx)
105{
106 return (tx->tx_anyobj);
107}
108
109int
110dmu_tx_private_ok(dmu_tx_t *tx)
111{
112 return (tx->tx_anyobj);
113}
114
115static dmu_tx_hold_t *
116dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
117 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
118{
119 dmu_tx_hold_t *txh;
120 dnode_t *dn = NULL;
121 int err;
122
123 if (object != DMU_NEW_OBJECT) {
428870ff 124 err = dnode_hold(os, object, tx, &dn);
34dc7c2f
BB
125 if (err) {
126 tx->tx_err = err;
127 return (NULL);
128 }
129
130 if (err == 0 && tx->tx_txg != 0) {
131 mutex_enter(&dn->dn_mtx);
132 /*
133 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
134 * problem, but there's no way for it to happen (for
135 * now, at least).
136 */
137 ASSERT(dn->dn_assigned_txg == 0);
138 dn->dn_assigned_txg = tx->tx_txg;
139 (void) refcount_add(&dn->dn_tx_holds, tx);
140 mutex_exit(&dn->dn_mtx);
141 }
142 }
143
79c76d5b 144 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
34dc7c2f
BB
145 txh->txh_tx = tx;
146 txh->txh_dnode = dn;
1c5de20a 147#ifdef DEBUG_DMU_TX
34dc7c2f
BB
148 txh->txh_type = type;
149 txh->txh_arg1 = arg1;
150 txh->txh_arg2 = arg2;
151#endif
152 list_insert_tail(&tx->tx_holds, txh);
153
154 return (txh);
155}
156
157void
158dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
159{
160 /*
161 * If we're syncing, they can manipulate any object anyhow, and
162 * the hold on the dnode_t can cause problems.
163 */
164 if (!dmu_tx_is_syncing(tx)) {
165 (void) dmu_tx_hold_object_impl(tx, os,
166 object, THT_NEWOBJECT, 0, 0);
167 }
168}
169
170static int
171dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
172{
173 int err;
174 dmu_buf_impl_t *db;
175
176 rw_enter(&dn->dn_struct_rwlock, RW_READER);
177 db = dbuf_hold_level(dn, level, blkid, FTAG);
178 rw_exit(&dn->dn_struct_rwlock);
179 if (db == NULL)
2e528b49 180 return (SET_ERROR(EIO));
34dc7c2f
BB
181 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
182 dbuf_rele(db, FTAG);
183 return (err);
184}
185
9babb374 186static void
428870ff
BB
187dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
188 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
9babb374 189{
428870ff
BB
190 objset_t *os = dn->dn_objset;
191 dsl_dataset_t *ds = os->os_dsl_dataset;
192 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
193 dmu_buf_impl_t *parent = NULL;
194 blkptr_t *bp = NULL;
195 uint64_t space;
196
197 if (level >= dn->dn_nlevels || history[level] == blkid)
9babb374
BB
198 return;
199
428870ff 200 history[level] = blkid;
9babb374 201
428870ff
BB
202 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
203
204 if (db == NULL || db == dn->dn_dbuf) {
205 ASSERT(level != 0);
206 db = NULL;
207 } else {
572e2857 208 ASSERT(DB_DNODE(db) == dn);
428870ff
BB
209 ASSERT(db->db_level == level);
210 ASSERT(db->db.db_size == space);
211 ASSERT(db->db_blkid == blkid);
212 bp = db->db_blkptr;
213 parent = db->db_parent;
9babb374
BB
214 }
215
428870ff
BB
216 freeable = (bp && (freeable ||
217 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
9babb374 218
428870ff
BB
219 if (freeable)
220 txh->txh_space_tooverwrite += space;
221 else
222 txh->txh_space_towrite += space;
223 if (bp)
224 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
225
226 dmu_tx_count_twig(txh, dn, parent, level + 1,
227 blkid >> epbs, freeable, history);
9babb374
BB
228}
229
34dc7c2f
BB
230/* ARGSUSED */
231static void
232dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
233{
234 dnode_t *dn = txh->txh_dnode;
235 uint64_t start, end, i;
236 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
237 int err = 0;
d6320ddb 238 int l;
34dc7c2f
BB
239
240 if (len == 0)
241 return;
242
243 min_bs = SPA_MINBLOCKSHIFT;
f1512ee6 244 max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1;
34dc7c2f
BB
245 min_ibs = DN_MIN_INDBLKSHIFT;
246 max_ibs = DN_MAX_INDBLKSHIFT;
247
34dc7c2f 248 if (dn) {
428870ff 249 uint64_t history[DN_MAX_LEVELS];
9babb374
BB
250 int nlvls = dn->dn_nlevels;
251 int delta;
252
253 /*
254 * For i/o error checking, read the first and last level-0
255 * blocks (if they are not aligned), and all the level-1 blocks.
256 */
34dc7c2f 257 if (dn->dn_maxblkid == 0) {
9babb374
BB
258 delta = dn->dn_datablksz;
259 start = (off < dn->dn_datablksz) ? 0 : 1;
260 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
261 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
b128c09f
BB
262 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
263 if (err)
264 goto out;
9babb374 265 delta -= off;
b128c09f 266 }
34dc7c2f
BB
267 } else {
268 zio_t *zio = zio_root(dn->dn_objset->os_spa,
269 NULL, NULL, ZIO_FLAG_CANFAIL);
270
271 /* first level-0 block */
272 start = off >> dn->dn_datablkshift;
273 if (P2PHASE(off, dn->dn_datablksz) ||
274 len < dn->dn_datablksz) {
275 err = dmu_tx_check_ioerr(zio, dn, 0, start);
276 if (err)
277 goto out;
278 }
279
280 /* last level-0 block */
281 end = (off+len-1) >> dn->dn_datablkshift;
b128c09f 282 if (end != start && end <= dn->dn_maxblkid &&
34dc7c2f
BB
283 P2PHASE(off+len, dn->dn_datablksz)) {
284 err = dmu_tx_check_ioerr(zio, dn, 0, end);
285 if (err)
286 goto out;
287 }
288
289 /* level-1 blocks */
9babb374
BB
290 if (nlvls > 1) {
291 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
292 for (i = (start>>shft)+1; i < end>>shft; i++) {
34dc7c2f
BB
293 err = dmu_tx_check_ioerr(zio, dn, 1, i);
294 if (err)
295 goto out;
296 }
297 }
298
299 err = zio_wait(zio);
300 if (err)
301 goto out;
9babb374 302 delta = P2NPHASE(off, dn->dn_datablksz);
34dc7c2f 303 }
34dc7c2f 304
295304be 305 min_ibs = max_ibs = dn->dn_indblkshift;
9babb374
BB
306 if (dn->dn_maxblkid > 0) {
307 /*
308 * The blocksize can't change,
309 * so we can make a more precise estimate.
310 */
311 ASSERT(dn->dn_datablkshift != 0);
34dc7c2f 312 min_bs = max_bs = dn->dn_datablkshift;
f1512ee6
MA
313 } else {
314 /*
315 * The blocksize can increase up to the recordsize,
316 * or if it is already more than the recordsize,
317 * up to the next power of 2.
318 */
319 min_bs = highbit64(dn->dn_datablksz - 1);
320 max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1));
9babb374
BB
321 }
322
323 /*
324 * If this write is not off the end of the file
325 * we need to account for overwrites/unref.
326 */
428870ff 327 if (start <= dn->dn_maxblkid) {
d6320ddb 328 for (l = 0; l < DN_MAX_LEVELS; l++)
428870ff
BB
329 history[l] = -1ULL;
330 }
9babb374 331 while (start <= dn->dn_maxblkid) {
9babb374
BB
332 dmu_buf_impl_t *db;
333
334 rw_enter(&dn->dn_struct_rwlock, RW_READER);
fcff0f35
PD
335 err = dbuf_hold_impl(dn, 0, start,
336 FALSE, FALSE, FTAG, &db);
9babb374 337 rw_exit(&dn->dn_struct_rwlock);
428870ff
BB
338
339 if (err) {
340 txh->txh_tx->tx_err = err;
341 return;
9babb374 342 }
428870ff
BB
343
344 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
345 history);
9babb374
BB
346 dbuf_rele(db, FTAG);
347 if (++start > end) {
348 /*
349 * Account for new indirects appearing
350 * before this IO gets assigned into a txg.
351 */
352 bits = 64 - min_bs;
353 epbs = min_ibs - SPA_BLKPTRSHIFT;
354 for (bits -= epbs * (nlvls - 1);
355 bits >= 0; bits -= epbs)
356 txh->txh_fudge += 1ULL << max_ibs;
357 goto out;
358 }
359 off += delta;
360 if (len >= delta)
361 len -= delta;
362 delta = dn->dn_datablksz;
363 }
34dc7c2f
BB
364 }
365
366 /*
367 * 'end' is the last thing we will access, not one past.
368 * This way we won't overflow when accessing the last byte.
369 */
370 start = P2ALIGN(off, 1ULL << max_bs);
371 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
372 txh->txh_space_towrite += end - start + 1;
373
374 start >>= min_bs;
375 end >>= min_bs;
376
377 epbs = min_ibs - SPA_BLKPTRSHIFT;
378
379 /*
380 * The object contains at most 2^(64 - min_bs) blocks,
381 * and each indirect level maps 2^epbs.
382 */
383 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
384 start >>= epbs;
385 end >>= epbs;
9babb374 386 ASSERT3U(end, >=, start);
34dc7c2f 387 txh->txh_space_towrite += (end - start + 1) << max_ibs;
9babb374
BB
388 if (start != 0) {
389 /*
390 * We also need a new blkid=0 indirect block
391 * to reference any existing file data.
392 */
393 txh->txh_space_towrite += 1ULL << max_ibs;
394 }
34dc7c2f
BB
395 }
396
34dc7c2f 397out:
9babb374
BB
398 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
399 2 * DMU_MAX_ACCESS)
2e528b49 400 err = SET_ERROR(EFBIG);
9babb374 401
34dc7c2f
BB
402 if (err)
403 txh->txh_tx->tx_err = err;
404}
405
406static void
407dmu_tx_count_dnode(dmu_tx_hold_t *txh)
408{
409 dnode_t *dn = txh->txh_dnode;
572e2857 410 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
34dc7c2f
BB
411 uint64_t space = mdn->dn_datablksz +
412 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
413
414 if (dn && dn->dn_dbuf->db_blkptr &&
415 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
428870ff 416 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
34dc7c2f 417 txh->txh_space_tooverwrite += space;
9babb374 418 txh->txh_space_tounref += space;
34dc7c2f
BB
419 } else {
420 txh->txh_space_towrite += space;
421 if (dn && dn->dn_dbuf->db_blkptr)
422 txh->txh_space_tounref += space;
423 }
424}
425
426void
427dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
428{
429 dmu_tx_hold_t *txh;
430
431 ASSERT(tx->tx_txg == 0);
ded576e2 432 ASSERT(len <= DMU_MAX_ACCESS);
34dc7c2f
BB
433 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
434
435 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
436 object, THT_WRITE, off, len);
437 if (txh == NULL)
438 return;
439
440 dmu_tx_count_write(txh, off, len);
441 dmu_tx_count_dnode(txh);
442}
443
444static void
445dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
446{
b128c09f
BB
447 uint64_t blkid, nblks, lastblk;
448 uint64_t space = 0, unref = 0, skipped = 0;
34dc7c2f
BB
449 dnode_t *dn = txh->txh_dnode;
450 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
451 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
b128c09f 452 int epbs;
ff80d9b1 453 uint64_t l0span = 0, nl1blks = 0;
34dc7c2f 454
b128c09f 455 if (dn->dn_nlevels == 0)
34dc7c2f
BB
456 return;
457
458 /*
b128c09f 459 * The struct_rwlock protects us against dn_nlevels
34dc7c2f
BB
460 * changing, in case (against all odds) we manage to dirty &
461 * sync out the changes after we check for being dirty.
428870ff 462 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
34dc7c2f
BB
463 */
464 rw_enter(&dn->dn_struct_rwlock, RW_READER);
b128c09f
BB
465 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
466 if (dn->dn_maxblkid == 0) {
34dc7c2f
BB
467 if (off == 0 && len >= dn->dn_datablksz) {
468 blkid = 0;
469 nblks = 1;
470 } else {
471 rw_exit(&dn->dn_struct_rwlock);
472 return;
473 }
474 } else {
475 blkid = off >> dn->dn_datablkshift;
b128c09f 476 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
34dc7c2f 477
383fc4a9 478 if (blkid > dn->dn_maxblkid) {
34dc7c2f
BB
479 rw_exit(&dn->dn_struct_rwlock);
480 return;
481 }
b128c09f 482 if (blkid + nblks > dn->dn_maxblkid)
383fc4a9 483 nblks = dn->dn_maxblkid - blkid + 1;
34dc7c2f 484
34dc7c2f 485 }
ff80d9b1 486 l0span = nblks; /* save for later use to calc level > 1 overhead */
b128c09f 487 if (dn->dn_nlevels == 1) {
34dc7c2f
BB
488 int i;
489 for (i = 0; i < nblks; i++) {
490 blkptr_t *bp = dn->dn_phys->dn_blkptr;
b128c09f 491 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
34dc7c2f 492 bp += blkid + i;
428870ff 493 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
34dc7c2f 494 dprintf_bp(bp, "can free old%s", "");
428870ff 495 space += bp_get_dsize(spa, bp);
34dc7c2f
BB
496 }
497 unref += BP_GET_ASIZE(bp);
498 }
ff80d9b1 499 nl1blks = 1;
34dc7c2f
BB
500 nblks = 0;
501 }
502
b128c09f 503 lastblk = blkid + nblks - 1;
34dc7c2f
BB
504 while (nblks) {
505 dmu_buf_impl_t *dbuf;
b128c09f
BB
506 uint64_t ibyte, new_blkid;
507 int epb = 1 << epbs;
508 int err, i, blkoff, tochk;
509 blkptr_t *bp;
510
511 ibyte = blkid << dn->dn_datablkshift;
512 err = dnode_next_offset(dn,
513 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
514 new_blkid = ibyte >> dn->dn_datablkshift;
515 if (err == ESRCH) {
516 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
517 break;
518 }
519 if (err) {
520 txh->txh_tx->tx_err = err;
521 break;
522 }
523 if (new_blkid > lastblk) {
524 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
525 break;
526 }
34dc7c2f 527
b128c09f
BB
528 if (new_blkid > blkid) {
529 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
530 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
531 nblks -= new_blkid - blkid;
532 blkid = new_blkid;
533 }
534 blkoff = P2PHASE(blkid, epb);
535 tochk = MIN(epb - blkoff, nblks);
34dc7c2f 536
fcff0f35
PD
537 err = dbuf_hold_impl(dn, 1, blkid >> epbs,
538 FALSE, FALSE, FTAG, &dbuf);
428870ff
BB
539 if (err) {
540 txh->txh_tx->tx_err = err;
b128c09f 541 break;
34dc7c2f 542 }
428870ff
BB
543
544 txh->txh_memory_tohold += dbuf->db.db_size;
545
546 /*
547 * We don't check memory_tohold against DMU_MAX_ACCESS because
548 * memory_tohold is an over-estimation (especially the >L1
549 * indirect blocks), so it could fail. Callers should have
550 * already verified that they will not be holding too much
551 * memory.
552 */
553
b128c09f
BB
554 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
555 if (err != 0) {
34dc7c2f 556 txh->txh_tx->tx_err = err;
b128c09f 557 dbuf_rele(dbuf, FTAG);
34dc7c2f
BB
558 break;
559 }
560
b128c09f
BB
561 bp = dbuf->db.db_data;
562 bp += blkoff;
563
564 for (i = 0; i < tochk; i++) {
428870ff
BB
565 if (dsl_dataset_block_freeable(ds, &bp[i],
566 bp[i].blk_birth)) {
b128c09f 567 dprintf_bp(&bp[i], "can free old%s", "");
428870ff 568 space += bp_get_dsize(spa, &bp[i]);
b128c09f
BB
569 }
570 unref += BP_GET_ASIZE(bp);
571 }
572 dbuf_rele(dbuf, FTAG);
573
ff80d9b1 574 ++nl1blks;
34dc7c2f
BB
575 blkid += tochk;
576 nblks -= tochk;
577 }
578 rw_exit(&dn->dn_struct_rwlock);
579
ff80d9b1
AJ
580 /*
581 * Add in memory requirements of higher-level indirects.
582 * This assumes a worst-possible scenario for dn_nlevels and a
583 * worst-possible distribution of l1-blocks over the region to free.
584 */
585 {
586 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
587 int level = 2;
588 /*
589 * Here we don't use DN_MAX_LEVEL, but calculate it with the
590 * given datablkshift and indblkshift. This makes the
591 * difference between 19 and 8 on large files.
592 */
593 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
594 (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
595
596 while (level++ < maxlevel) {
b077fd4c 597 txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
ff80d9b1
AJ
598 << dn->dn_indblkshift;
599 blkcnt = 1 + (blkcnt >> epbs);
600 }
601 }
602
b128c09f
BB
603 /* account for new level 1 indirect blocks that might show up */
604 if (skipped > 0) {
605 txh->txh_fudge += skipped << dn->dn_indblkshift;
606 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
607 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
608 }
34dc7c2f
BB
609 txh->txh_space_tofree += space;
610 txh->txh_space_tounref += unref;
611}
612
19d55079
MA
613/*
614 * This function marks the transaction as being a "net free". The end
615 * result is that refquotas will be disabled for this transaction, and
616 * this transaction will be able to use half of the pool space overhead
617 * (see dsl_pool_adjustedsize()). Therefore this function should only
618 * be called for transactions that we expect will not cause a net increase
619 * in the amount of space used (but it's OK if that is occasionally not true).
620 */
621void
622dmu_tx_mark_netfree(dmu_tx_t *tx)
623{
624 dmu_tx_hold_t *txh;
625
626 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
627 DMU_NEW_OBJECT, THT_FREE, 0, 0);
628
629 /*
630 * Pretend that this operation will free 1GB of space. This
631 * should be large enough to cancel out the largest write.
632 * We don't want to use something like UINT64_MAX, because that would
633 * cause overflows when doing math with these values (e.g. in
634 * dmu_tx_try_assign()).
635 */
636 txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024;
637}
638
34dc7c2f
BB
639void
640dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
641{
642 dmu_tx_hold_t *txh;
643 dnode_t *dn;
ea97f8ce 644 int err;
34dc7c2f
BB
645 zio_t *zio;
646
647 ASSERT(tx->tx_txg == 0);
648
649 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
650 object, THT_FREE, off, len);
651 if (txh == NULL)
652 return;
653 dn = txh->txh_dnode;
e8b96c60 654 dmu_tx_count_dnode(txh);
34dc7c2f 655
34dc7c2f
BB
656 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
657 return;
658 if (len == DMU_OBJECT_END)
659 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
660
ea97f8ce
MA
661 dmu_tx_count_dnode(txh);
662
34dc7c2f 663 /*
ea97f8ce
MA
664 * For i/o error checking, we read the first and last level-0
665 * blocks if they are not aligned, and all the level-1 blocks.
666 *
667 * Note: dbuf_free_range() assumes that we have not instantiated
668 * any level-0 dbufs that will be completely freed. Therefore we must
669 * exercise care to not read or count the first and last blocks
670 * if they are blocksize-aligned.
671 */
672 if (dn->dn_datablkshift == 0) {
b663a23d 673 if (off != 0 || len < dn->dn_datablksz)
92bc214c 674 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
ea97f8ce
MA
675 } else {
676 /* first block will be modified if it is not aligned */
677 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
678 dmu_tx_count_write(txh, off, 1);
679 /* last block will be modified if it is not aligned */
680 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
681 dmu_tx_count_write(txh, off+len, 1);
682 }
683
684 /*
685 * Check level-1 blocks.
34dc7c2f
BB
686 */
687 if (dn->dn_nlevels > 1) {
ea97f8ce 688 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
34dc7c2f 689 SPA_BLKPTRSHIFT;
ea97f8ce
MA
690 uint64_t start = off >> shift;
691 uint64_t end = (off + len) >> shift;
692 uint64_t i;
693
ea97f8ce 694 ASSERT(dn->dn_indblkshift != 0);
34dc7c2f 695
2e7b7657
MA
696 /*
697 * dnode_reallocate() can result in an object with indirect
698 * blocks having an odd data block size. In this case,
699 * just check the single block.
700 */
701 if (dn->dn_datablkshift == 0)
702 start = end = 0;
703
34dc7c2f
BB
704 zio = zio_root(tx->tx_pool->dp_spa,
705 NULL, NULL, ZIO_FLAG_CANFAIL);
706 for (i = start; i <= end; i++) {
707 uint64_t ibyte = i << shift;
b128c09f 708 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
34dc7c2f 709 i = ibyte >> shift;
4bda3bd0 710 if (err == ESRCH || i > end)
34dc7c2f
BB
711 break;
712 if (err) {
713 tx->tx_err = err;
714 return;
715 }
716
717 err = dmu_tx_check_ioerr(zio, dn, 1, i);
718 if (err) {
719 tx->tx_err = err;
720 return;
721 }
722 }
723 err = zio_wait(zio);
724 if (err) {
725 tx->tx_err = err;
726 return;
727 }
728 }
729
34dc7c2f
BB
730 dmu_tx_count_free(txh, off, len);
731}
732
733void
9babb374 734dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
34dc7c2f
BB
735{
736 dmu_tx_hold_t *txh;
737 dnode_t *dn;
d683ddbb 738 dsl_dataset_phys_t *ds_phys;
34dc7c2f
BB
739 uint64_t nblocks;
740 int epbs, err;
741
742 ASSERT(tx->tx_txg == 0);
743
744 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
745 object, THT_ZAP, add, (uintptr_t)name);
746 if (txh == NULL)
747 return;
748 dn = txh->txh_dnode;
749
750 dmu_tx_count_dnode(txh);
751
752 if (dn == NULL) {
753 /*
754 * We will be able to fit a new object's entries into one leaf
755 * block. So there will be at most 2 blocks total,
756 * including the header block.
757 */
758 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
759 return;
760 }
761
9ae529ec 762 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
34dc7c2f
BB
763
764 if (dn->dn_maxblkid == 0 && !add) {
22cd4a46
AL
765 blkptr_t *bp;
766
34dc7c2f
BB
767 /*
768 * If there is only one block (i.e. this is a micro-zap)
769 * and we are not adding anything, the accounting is simple.
770 */
771 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
772 if (err) {
773 tx->tx_err = err;
774 return;
775 }
776
777 /*
778 * Use max block size here, since we don't know how much
779 * the size will change between now and the dbuf dirty call.
780 */
22cd4a46 781 bp = &dn->dn_phys->dn_blkptr[0];
34dc7c2f 782 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
22cd4a46 783 bp, bp->blk_birth))
f1512ee6 784 txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ;
22cd4a46 785 else
f1512ee6 786 txh->txh_space_towrite += MZAP_MAX_BLKSZ;
22cd4a46 787 if (!BP_IS_HOLE(bp))
f1512ee6 788 txh->txh_space_tounref += MZAP_MAX_BLKSZ;
34dc7c2f
BB
789 return;
790 }
791
792 if (dn->dn_maxblkid > 0 && name) {
793 /*
794 * access the name in this fat-zap so that we'll check
795 * for i/o errors to the leaf blocks, etc.
796 */
428870ff 797 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
34dc7c2f
BB
798 8, 0, NULL);
799 if (err == EIO) {
800 tx->tx_err = err;
801 return;
802 }
803 }
804
428870ff 805 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
45d1cae3 806 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
34dc7c2f
BB
807
808 /*
809 * If the modified blocks are scattered to the four winds,
810 * we'll have to modify an indirect twig for each.
811 */
812 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
d683ddbb 813 ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset);
34dc7c2f 814 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
d683ddbb 815 if (ds_phys->ds_prev_snap_obj)
9babb374
BB
816 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
817 else
818 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
34dc7c2f
BB
819}
820
821void
822dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
823{
824 dmu_tx_hold_t *txh;
825
826 ASSERT(tx->tx_txg == 0);
827
828 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
829 object, THT_BONUS, 0, 0);
830 if (txh)
831 dmu_tx_count_dnode(txh);
832}
833
834void
835dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
836{
837 dmu_tx_hold_t *txh;
7d637211 838
34dc7c2f
BB
839 ASSERT(tx->tx_txg == 0);
840
841 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
842 DMU_NEW_OBJECT, THT_SPACE, space, 0);
7d637211
NC
843 if (txh)
844 txh->txh_space_towrite += space;
34dc7c2f
BB
845}
846
847int
848dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
849{
850 dmu_tx_hold_t *txh;
851 int holds = 0;
852
853 /*
854 * By asserting that the tx is assigned, we're counting the
855 * number of dn_tx_holds, which is the same as the number of
856 * dn_holds. Otherwise, we'd be counting dn_holds, but
857 * dn_tx_holds could be 0.
858 */
859 ASSERT(tx->tx_txg != 0);
860
861 /* if (tx->tx_anyobj == TRUE) */
862 /* return (0); */
863
864 for (txh = list_head(&tx->tx_holds); txh;
865 txh = list_next(&tx->tx_holds, txh)) {
866 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
867 holds++;
868 }
869
870 return (holds);
871}
872
1c5de20a 873#ifdef DEBUG_DMU_TX
34dc7c2f
BB
874void
875dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
876{
877 dmu_tx_hold_t *txh;
878 int match_object = FALSE, match_offset = FALSE;
572e2857 879 dnode_t *dn;
34dc7c2f 880
572e2857
BB
881 DB_DNODE_ENTER(db);
882 dn = DB_DNODE(db);
99ea23c5 883 ASSERT(dn != NULL);
34dc7c2f 884 ASSERT(tx->tx_txg != 0);
428870ff 885 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
34dc7c2f
BB
886 ASSERT3U(dn->dn_object, ==, db->db.db_object);
887
572e2857
BB
888 if (tx->tx_anyobj) {
889 DB_DNODE_EXIT(db);
34dc7c2f 890 return;
572e2857 891 }
34dc7c2f
BB
892
893 /* XXX No checking on the meta dnode for now */
572e2857
BB
894 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
895 DB_DNODE_EXIT(db);
34dc7c2f 896 return;
572e2857 897 }
34dc7c2f
BB
898
899 for (txh = list_head(&tx->tx_holds); txh;
900 txh = list_next(&tx->tx_holds, txh)) {
99ea23c5 901 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
34dc7c2f
BB
902 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
903 match_object = TRUE;
904 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
905 int datablkshift = dn->dn_datablkshift ?
906 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
907 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
908 int shift = datablkshift + epbs * db->db_level;
909 uint64_t beginblk = shift >= 64 ? 0 :
910 (txh->txh_arg1 >> shift);
911 uint64_t endblk = shift >= 64 ? 0 :
912 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
913 uint64_t blkid = db->db_blkid;
914
915 /* XXX txh_arg2 better not be zero... */
916
917 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
918 txh->txh_type, beginblk, endblk);
919
920 switch (txh->txh_type) {
921 case THT_WRITE:
922 if (blkid >= beginblk && blkid <= endblk)
923 match_offset = TRUE;
924 /*
925 * We will let this hold work for the bonus
428870ff
BB
926 * or spill buffer so that we don't need to
927 * hold it when creating a new object.
34dc7c2f 928 */
428870ff
BB
929 if (blkid == DMU_BONUS_BLKID ||
930 blkid == DMU_SPILL_BLKID)
34dc7c2f
BB
931 match_offset = TRUE;
932 /*
933 * They might have to increase nlevels,
934 * thus dirtying the new TLIBs. Or the
935 * might have to change the block size,
936 * thus dirying the new lvl=0 blk=0.
937 */
938 if (blkid == 0)
939 match_offset = TRUE;
940 break;
941 case THT_FREE:
b128c09f
BB
942 /*
943 * We will dirty all the level 1 blocks in
944 * the free range and perhaps the first and
945 * last level 0 block.
946 */
947 if (blkid >= beginblk && (blkid <= endblk ||
948 txh->txh_arg2 == DMU_OBJECT_END))
34dc7c2f
BB
949 match_offset = TRUE;
950 break;
428870ff
BB
951 case THT_SPILL:
952 if (blkid == DMU_SPILL_BLKID)
953 match_offset = TRUE;
954 break;
34dc7c2f 955 case THT_BONUS:
428870ff 956 if (blkid == DMU_BONUS_BLKID)
34dc7c2f
BB
957 match_offset = TRUE;
958 break;
959 case THT_ZAP:
960 match_offset = TRUE;
961 break;
962 case THT_NEWOBJECT:
963 match_object = TRUE;
964 break;
965 default:
989fd514
BB
966 cmn_err(CE_PANIC, "bad txh_type %d",
967 txh->txh_type);
34dc7c2f
BB
968 }
969 }
572e2857
BB
970 if (match_object && match_offset) {
971 DB_DNODE_EXIT(db);
34dc7c2f 972 return;
572e2857 973 }
34dc7c2f 974 }
572e2857 975 DB_DNODE_EXIT(db);
34dc7c2f
BB
976 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
977 (u_longlong_t)db->db.db_object, db->db_level,
978 (u_longlong_t)db->db_blkid);
979}
980#endif
981
e8b96c60
MA
982/*
983 * If we can't do 10 iops, something is wrong. Let us go ahead
984 * and hit zfs_dirty_data_max.
985 */
986hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
987int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
988
989/*
990 * We delay transactions when we've determined that the backend storage
991 * isn't able to accommodate the rate of incoming writes.
992 *
993 * If there is already a transaction waiting, we delay relative to when
994 * that transaction finishes waiting. This way the calculated min_time
995 * is independent of the number of threads concurrently executing
996 * transactions.
997 *
998 * If we are the only waiter, wait relative to when the transaction
999 * started, rather than the current time. This credits the transaction for
1000 * "time already served", e.g. reading indirect blocks.
1001 *
1002 * The minimum time for a transaction to take is calculated as:
1003 * min_time = scale * (dirty - min) / (max - dirty)
1004 * min_time is then capped at zfs_delay_max_ns.
1005 *
1006 * The delay has two degrees of freedom that can be adjusted via tunables.
1007 * The percentage of dirty data at which we start to delay is defined by
1008 * zfs_delay_min_dirty_percent. This should typically be at or above
1009 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
1010 * delay after writing at full speed has failed to keep up with the incoming
1011 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
1012 * speaking, this variable determines the amount of delay at the midpoint of
1013 * the curve.
1014 *
1015 * delay
1016 * 10ms +-------------------------------------------------------------*+
1017 * | *|
1018 * 9ms + *+
1019 * | *|
1020 * 8ms + *+
1021 * | * |
1022 * 7ms + * +
1023 * | * |
1024 * 6ms + * +
1025 * | * |
1026 * 5ms + * +
1027 * | * |
1028 * 4ms + * +
1029 * | * |
1030 * 3ms + * +
1031 * | * |
1032 * 2ms + (midpoint) * +
1033 * | | ** |
1034 * 1ms + v *** +
1035 * | zfs_delay_scale ----------> ******** |
1036 * 0 +-------------------------------------*********----------------+
1037 * 0% <- zfs_dirty_data_max -> 100%
1038 *
1039 * Note that since the delay is added to the outstanding time remaining on the
1040 * most recent transaction, the delay is effectively the inverse of IOPS.
1041 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1042 * was chosen such that small changes in the amount of accumulated dirty data
1043 * in the first 3/4 of the curve yield relatively small differences in the
1044 * amount of delay.
1045 *
1046 * The effects can be easier to understand when the amount of delay is
1047 * represented on a log scale:
1048 *
1049 * delay
1050 * 100ms +-------------------------------------------------------------++
1051 * + +
1052 * | |
1053 * + *+
1054 * 10ms + *+
1055 * + ** +
1056 * | (midpoint) ** |
1057 * + | ** +
1058 * 1ms + v **** +
1059 * + zfs_delay_scale ----------> ***** +
1060 * | **** |
1061 * + **** +
1062 * 100us + ** +
1063 * + * +
1064 * | * |
1065 * + * +
1066 * 10us + * +
1067 * + +
1068 * | |
1069 * + +
1070 * +--------------------------------------------------------------+
1071 * 0% <- zfs_dirty_data_max -> 100%
1072 *
1073 * Note here that only as the amount of dirty data approaches its limit does
1074 * the delay start to increase rapidly. The goal of a properly tuned system
1075 * should be to keep the amount of dirty data out of that range by first
1076 * ensuring that the appropriate limits are set for the I/O scheduler to reach
1077 * optimal throughput on the backend storage, and then by changing the value
1078 * of zfs_delay_scale to increase the steepness of the curve.
1079 */
1080static void
1081dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
1082{
1083 dsl_pool_t *dp = tx->tx_pool;
1084 uint64_t delay_min_bytes =
1085 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
1086 hrtime_t wakeup, min_tx_time, now;
1087
1088 if (dirty <= delay_min_bytes)
1089 return;
1090
1091 /*
1092 * The caller has already waited until we are under the max.
1093 * We make them pass us the amount of dirty data so we don't
1094 * have to handle the case of it being >= the max, which could
1095 * cause a divide-by-zero if it's == the max.
1096 */
1097 ASSERT3U(dirty, <, zfs_dirty_data_max);
1098
1099 now = gethrtime();
1100 min_tx_time = zfs_delay_scale *
1101 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
1102 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
1103 if (now > tx->tx_start + min_tx_time)
1104 return;
1105
1106 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
1107 uint64_t, min_tx_time);
1108
1109 mutex_enter(&dp->dp_lock);
1110 wakeup = MAX(tx->tx_start + min_tx_time,
1111 dp->dp_last_wakeup + min_tx_time);
1112 dp->dp_last_wakeup = wakeup;
1113 mutex_exit(&dp->dp_lock);
1114
1115 zfs_sleep_until(wakeup);
1116}
1117
34dc7c2f 1118static int
13fe0198 1119dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
34dc7c2f
BB
1120{
1121 dmu_tx_hold_t *txh;
1122 spa_t *spa = tx->tx_pool->dp_spa;
b128c09f
BB
1123 uint64_t memory, asize, fsize, usize;
1124 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
34dc7c2f 1125
c99c9001 1126 ASSERT0(tx->tx_txg);
34dc7c2f 1127
570827e1
BB
1128 if (tx->tx_err) {
1129 DMU_TX_STAT_BUMP(dmu_tx_error);
34dc7c2f 1130 return (tx->tx_err);
570827e1 1131 }
34dc7c2f 1132
b128c09f 1133 if (spa_suspended(spa)) {
570827e1
BB
1134 DMU_TX_STAT_BUMP(dmu_tx_suspended);
1135
34dc7c2f
BB
1136 /*
1137 * If the user has indicated a blocking failure mode
1138 * then return ERESTART which will block in dmu_tx_wait().
1139 * Otherwise, return EIO so that an error can get
1140 * propagated back to the VOP calls.
1141 *
1142 * Note that we always honor the txg_how flag regardless
1143 * of the failuremode setting.
1144 */
1145 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1146 txg_how != TXG_WAIT)
2e528b49 1147 return (SET_ERROR(EIO));
34dc7c2f 1148
2e528b49 1149 return (SET_ERROR(ERESTART));
34dc7c2f
BB
1150 }
1151
e8b96c60
MA
1152 if (!tx->tx_waited &&
1153 dsl_pool_need_dirty_delay(tx->tx_pool)) {
1154 tx->tx_wait_dirty = B_TRUE;
1155 DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
1156 return (ERESTART);
1157 }
1158
34dc7c2f
BB
1159 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1160 tx->tx_needassign_txh = NULL;
1161
1162 /*
1163 * NB: No error returns are allowed after txg_hold_open, but
1164 * before processing the dnode holds, due to the
1165 * dmu_tx_unassign() logic.
1166 */
1167
b128c09f 1168 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
34dc7c2f
BB
1169 for (txh = list_head(&tx->tx_holds); txh;
1170 txh = list_next(&tx->tx_holds, txh)) {
1171 dnode_t *dn = txh->txh_dnode;
1172 if (dn != NULL) {
1173 mutex_enter(&dn->dn_mtx);
1174 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1175 mutex_exit(&dn->dn_mtx);
1176 tx->tx_needassign_txh = txh;
570827e1 1177 DMU_TX_STAT_BUMP(dmu_tx_group);
2e528b49 1178 return (SET_ERROR(ERESTART));
34dc7c2f
BB
1179 }
1180 if (dn->dn_assigned_txg == 0)
1181 dn->dn_assigned_txg = tx->tx_txg;
1182 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1183 (void) refcount_add(&dn->dn_tx_holds, tx);
1184 mutex_exit(&dn->dn_mtx);
1185 }
1186 towrite += txh->txh_space_towrite;
1187 tofree += txh->txh_space_tofree;
1188 tooverwrite += txh->txh_space_tooverwrite;
1189 tounref += txh->txh_space_tounref;
b128c09f
BB
1190 tohold += txh->txh_memory_tohold;
1191 fudge += txh->txh_fudge;
34dc7c2f
BB
1192 }
1193
34dc7c2f
BB
1194 /*
1195 * If a snapshot has been taken since we made our estimates,
1196 * assume that we won't be able to free or overwrite anything.
1197 */
1198 if (tx->tx_objset &&
428870ff 1199 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
34dc7c2f
BB
1200 tx->tx_lastsnap_txg) {
1201 towrite += tooverwrite;
1202 tooverwrite = tofree = 0;
1203 }
1204
b128c09f
BB
1205 /* needed allocation: worst-case estimate of write space */
1206 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
1207 /* freed space estimate: worst-case overwrite + free estimate */
34dc7c2f 1208 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
b128c09f 1209 /* convert unrefd space to worst-case estimate */
34dc7c2f 1210 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
b128c09f
BB
1211 /* calculate memory footprint estimate */
1212 memory = towrite + tooverwrite + tohold;
34dc7c2f 1213
1c5de20a 1214#ifdef DEBUG_DMU_TX
b128c09f
BB
1215 /*
1216 * Add in 'tohold' to account for our dirty holds on this memory
1217 * XXX - the "fudge" factor is to account for skipped blocks that
1218 * we missed because dnode_next_offset() misses in-core-only blocks.
1219 */
1220 tx->tx_space_towrite = asize +
1221 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
34dc7c2f
BB
1222 tx->tx_space_tofree = tofree;
1223 tx->tx_space_tooverwrite = tooverwrite;
1224 tx->tx_space_tounref = tounref;
1225#endif
1226
1227 if (tx->tx_dir && asize != 0) {
b128c09f
BB
1228 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1229 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
34dc7c2f
BB
1230 if (err)
1231 return (err);
1232 }
1233
570827e1
BB
1234 DMU_TX_STAT_BUMP(dmu_tx_assigned);
1235
34dc7c2f
BB
1236 return (0);
1237}
1238
1239static void
1240dmu_tx_unassign(dmu_tx_t *tx)
1241{
1242 dmu_tx_hold_t *txh;
1243
1244 if (tx->tx_txg == 0)
1245 return;
1246
1247 txg_rele_to_quiesce(&tx->tx_txgh);
1248
e49f1e20
WA
1249 /*
1250 * Walk the transaction's hold list, removing the hold on the
1251 * associated dnode, and notifying waiters if the refcount drops to 0.
1252 */
34dc7c2f
BB
1253 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1254 txh = list_next(&tx->tx_holds, txh)) {
1255 dnode_t *dn = txh->txh_dnode;
1256
1257 if (dn == NULL)
1258 continue;
1259 mutex_enter(&dn->dn_mtx);
1260 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1261
1262 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1263 dn->dn_assigned_txg = 0;
1264 cv_broadcast(&dn->dn_notxholds);
1265 }
1266 mutex_exit(&dn->dn_mtx);
1267 }
1268
1269 txg_rele_to_sync(&tx->tx_txgh);
1270
1271 tx->tx_lasttried_txg = tx->tx_txg;
1272 tx->tx_txg = 0;
1273}
1274
1275/*
1276 * Assign tx to a transaction group. txg_how can be one of:
1277 *
1278 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1279 * a new one. This should be used when you're not holding locks.
13fe0198 1280 * It will only fail if we're truly out of space (or over quota).
34dc7c2f
BB
1281 *
1282 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1283 * blocking, returns immediately with ERESTART. This should be used
1284 * whenever you're holding locks. On an ERESTART error, the caller
1285 * should drop locks, do a dmu_tx_wait(tx), and try again.
e8b96c60
MA
1286 *
1287 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait()
1288 * has already been called on behalf of this operation (though
1289 * most likely on a different tx).
34dc7c2f
BB
1290 */
1291int
13fe0198 1292dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
34dc7c2f
BB
1293{
1294 int err;
1295
1296 ASSERT(tx->tx_txg == 0);
e8b96c60
MA
1297 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT ||
1298 txg_how == TXG_WAITED);
34dc7c2f
BB
1299 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1300
e8b96c60
MA
1301 if (txg_how == TXG_WAITED)
1302 tx->tx_waited = B_TRUE;
1303
13fe0198
MA
1304 /* If we might wait, we must not hold the config lock. */
1305 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1306
34dc7c2f
BB
1307 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1308 dmu_tx_unassign(tx);
1309
1310 if (err != ERESTART || txg_how != TXG_WAIT)
1311 return (err);
1312
1313 dmu_tx_wait(tx);
1314 }
1315
1316 txg_rele_to_quiesce(&tx->tx_txgh);
1317
1318 return (0);
1319}
1320
1321void
1322dmu_tx_wait(dmu_tx_t *tx)
1323{
1324 spa_t *spa = tx->tx_pool->dp_spa;
e8b96c60 1325 dsl_pool_t *dp = tx->tx_pool;
a77c4c83 1326 hrtime_t before;
34dc7c2f
BB
1327
1328 ASSERT(tx->tx_txg == 0);
13fe0198 1329 ASSERT(!dsl_pool_config_held(tx->tx_pool));
34dc7c2f 1330
a77c4c83
NB
1331 before = gethrtime();
1332
e8b96c60
MA
1333 if (tx->tx_wait_dirty) {
1334 uint64_t dirty;
1335
1336 /*
1337 * dmu_tx_try_assign() has determined that we need to wait
1338 * because we've consumed much or all of the dirty buffer
1339 * space.
1340 */
1341 mutex_enter(&dp->dp_lock);
1342 if (dp->dp_dirty_total >= zfs_dirty_data_max)
1343 DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1344 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1345 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1346 dirty = dp->dp_dirty_total;
1347 mutex_exit(&dp->dp_lock);
1348
1349 dmu_tx_delay(tx, dirty);
1350
1351 tx->tx_wait_dirty = B_FALSE;
1352
1353 /*
1354 * Note: setting tx_waited only has effect if the caller
1355 * used TX_WAIT. Otherwise they are going to destroy
1356 * this tx and try again. The common case, zfs_write(),
1357 * uses TX_WAIT.
1358 */
1359 tx->tx_waited = B_TRUE;
1360 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1361 /*
1362 * If the pool is suspended we need to wait until it
1363 * is resumed. Note that it's possible that the pool
1364 * has become active after this thread has tried to
1365 * obtain a tx. If that's the case then tx_lasttried_txg
1366 * would not have been set.
1367 */
1368 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
34dc7c2f
BB
1369 } else if (tx->tx_needassign_txh) {
1370 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1371
1372 mutex_enter(&dn->dn_mtx);
1373 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1374 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1375 mutex_exit(&dn->dn_mtx);
1376 tx->tx_needassign_txh = NULL;
1377 } else {
e8b96c60
MA
1378 /*
1379 * A dnode is assigned to the quiescing txg. Wait for its
1380 * transaction to complete.
1381 */
34dc7c2f
BB
1382 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1383 }
a77c4c83
NB
1384
1385 spa_tx_assign_add_nsecs(spa, gethrtime() - before);
34dc7c2f
BB
1386}
1387
1388void
1389dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1390{
1c5de20a 1391#ifdef DEBUG_DMU_TX
34dc7c2f
BB
1392 if (tx->tx_dir == NULL || delta == 0)
1393 return;
1394
1395 if (delta > 0) {
1396 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1397 tx->tx_space_towrite);
1398 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1399 } else {
1400 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1401 }
1402#endif
1403}
1404
1405void
1406dmu_tx_commit(dmu_tx_t *tx)
1407{
1408 dmu_tx_hold_t *txh;
1409
1410 ASSERT(tx->tx_txg != 0);
1411
e49f1e20
WA
1412 /*
1413 * Go through the transaction's hold list and remove holds on
1414 * associated dnodes, notifying waiters if no holds remain.
1415 */
c65aa5b2 1416 while ((txh = list_head(&tx->tx_holds))) {
34dc7c2f
BB
1417 dnode_t *dn = txh->txh_dnode;
1418
1419 list_remove(&tx->tx_holds, txh);
1420 kmem_free(txh, sizeof (dmu_tx_hold_t));
1421 if (dn == NULL)
1422 continue;
1423 mutex_enter(&dn->dn_mtx);
1424 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1425
1426 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1427 dn->dn_assigned_txg = 0;
1428 cv_broadcast(&dn->dn_notxholds);
1429 }
1430 mutex_exit(&dn->dn_mtx);
1431 dnode_rele(dn, tx);
1432 }
1433
1434 if (tx->tx_tempreserve_cookie)
1435 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1436
428870ff
BB
1437 if (!list_is_empty(&tx->tx_callbacks))
1438 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1439
34dc7c2f
BB
1440 if (tx->tx_anyobj == FALSE)
1441 txg_rele_to_sync(&tx->tx_txgh);
428870ff
BB
1442
1443 list_destroy(&tx->tx_callbacks);
34dc7c2f 1444 list_destroy(&tx->tx_holds);
1c5de20a 1445#ifdef DEBUG_DMU_TX
34dc7c2f
BB
1446 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1447 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1448 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1449 refcount_destroy_many(&tx->tx_space_written,
1450 refcount_count(&tx->tx_space_written));
1451 refcount_destroy_many(&tx->tx_space_freed,
1452 refcount_count(&tx->tx_space_freed));
1453#endif
1454 kmem_free(tx, sizeof (dmu_tx_t));
1455}
1456
1457void
1458dmu_tx_abort(dmu_tx_t *tx)
1459{
1460 dmu_tx_hold_t *txh;
1461
1462 ASSERT(tx->tx_txg == 0);
1463
c65aa5b2 1464 while ((txh = list_head(&tx->tx_holds))) {
34dc7c2f
BB
1465 dnode_t *dn = txh->txh_dnode;
1466
1467 list_remove(&tx->tx_holds, txh);
1468 kmem_free(txh, sizeof (dmu_tx_hold_t));
1469 if (dn != NULL)
1470 dnode_rele(dn, tx);
1471 }
428870ff
BB
1472
1473 /*
1474 * Call any registered callbacks with an error code.
1475 */
1476 if (!list_is_empty(&tx->tx_callbacks))
1477 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1478
1479 list_destroy(&tx->tx_callbacks);
34dc7c2f 1480 list_destroy(&tx->tx_holds);
1c5de20a 1481#ifdef DEBUG_DMU_TX
34dc7c2f
BB
1482 refcount_destroy_many(&tx->tx_space_written,
1483 refcount_count(&tx->tx_space_written));
1484 refcount_destroy_many(&tx->tx_space_freed,
1485 refcount_count(&tx->tx_space_freed));
1486#endif
1487 kmem_free(tx, sizeof (dmu_tx_t));
1488}
1489
1490uint64_t
1491dmu_tx_get_txg(dmu_tx_t *tx)
1492{
1493 ASSERT(tx->tx_txg != 0);
1494 return (tx->tx_txg);
1495}
428870ff 1496
13fe0198
MA
1497dsl_pool_t *
1498dmu_tx_pool(dmu_tx_t *tx)
1499{
1500 ASSERT(tx->tx_pool != NULL);
1501 return (tx->tx_pool);
1502}
1503
428870ff
BB
1504void
1505dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1506{
1507 dmu_tx_callback_t *dcb;
1508
79c76d5b 1509 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
428870ff
BB
1510
1511 dcb->dcb_func = func;
1512 dcb->dcb_data = data;
1513
1514 list_insert_tail(&tx->tx_callbacks, dcb);
1515}
1516
1517/*
1518 * Call all the commit callbacks on a list, with a given error code.
1519 */
1520void
1521dmu_tx_do_callbacks(list_t *cb_list, int error)
1522{
1523 dmu_tx_callback_t *dcb;
1524
c65aa5b2 1525 while ((dcb = list_head(cb_list))) {
428870ff
BB
1526 list_remove(cb_list, dcb);
1527 dcb->dcb_func(dcb->dcb_data, error);
1528 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1529 }
1530}
1531
1532/*
1533 * Interface to hold a bunch of attributes.
1534 * used for creating new files.
1535 * attrsize is the total size of all attributes
1536 * to be added during object creation
1537 *
1538 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1539 */
1540
1541/*
1542 * hold necessary attribute name for attribute registration.
1543 * should be a very rare case where this is needed. If it does
1544 * happen it would only happen on the first write to the file system.
1545 */
1546static void
1547dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1548{
1549 int i;
1550
1551 if (!sa->sa_need_attr_registration)
1552 return;
1553
1554 for (i = 0; i != sa->sa_num_attrs; i++) {
1555 if (!sa->sa_attr_table[i].sa_registered) {
1556 if (sa->sa_reg_attr_obj)
1557 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1558 B_TRUE, sa->sa_attr_table[i].sa_name);
1559 else
1560 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1561 B_TRUE, sa->sa_attr_table[i].sa_name);
1562 }
1563 }
1564}
1565
1566
1567void
1568dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1569{
1570 dnode_t *dn;
1571 dmu_tx_hold_t *txh;
428870ff
BB
1572
1573 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1574 THT_SPILL, 0, 0);
7d637211
NC
1575 if (txh == NULL)
1576 return;
428870ff
BB
1577
1578 dn = txh->txh_dnode;
1579
1580 if (dn == NULL)
1581 return;
1582
1583 /* If blkptr doesn't exist then add space to towrite */
22cd4a46 1584 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
f1512ee6 1585 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
428870ff 1586 } else {
22cd4a46
AL
1587 blkptr_t *bp;
1588
50c957f7 1589 bp = DN_SPILL_BLKPTR(dn->dn_phys);
428870ff
BB
1590 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1591 bp, bp->blk_birth))
f1512ee6 1592 txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE;
428870ff 1593 else
f1512ee6 1594 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
22cd4a46 1595 if (!BP_IS_HOLE(bp))
f1512ee6 1596 txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE;
428870ff
BB
1597 }
1598}
1599
1600void
1601dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1602{
1603 sa_os_t *sa = tx->tx_objset->os_sa;
1604
1605 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1606
1607 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1608 return;
1609
1610 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1611 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1612 else {
1613 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1614 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1615 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1616 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1617 }
1618
1619 dmu_tx_sa_registration_hold(sa, tx);
1620
50c957f7 1621 if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
428870ff
BB
1622 return;
1623
1624 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1625 THT_SPILL, 0, 0);
1626}
1627
1628/*
1629 * Hold SA attribute
1630 *
1631 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1632 *
1633 * variable_size is the total size of all variable sized attributes
1634 * passed to this function. It is not the total size of all
1635 * variable size attributes that *may* exist on this object.
1636 */
1637void
1638dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1639{
1640 uint64_t object;
1641 sa_os_t *sa = tx->tx_objset->os_sa;
1642
1643 ASSERT(hdl != NULL);
1644
1645 object = sa_handle_object(hdl);
1646
1647 dmu_tx_hold_bonus(tx, object);
1648
1649 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1650 return;
1651
1652 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1653 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1654 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1655 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1656 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1657 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1658 }
1659
1660 dmu_tx_sa_registration_hold(sa, tx);
1661
1662 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1663 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1664
572e2857 1665 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
428870ff
BB
1666 ASSERT(tx->tx_txg == 0);
1667 dmu_tx_hold_spill(tx, object);
572e2857
BB
1668 } else {
1669 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1670 dnode_t *dn;
1671
1672 DB_DNODE_ENTER(db);
1673 dn = DB_DNODE(db);
1674 if (dn->dn_have_spill) {
1675 ASSERT(tx->tx_txg == 0);
1676 dmu_tx_hold_spill(tx, object);
1677 }
1678 DB_DNODE_EXIT(db);
428870ff
BB
1679 }
1680}
c28b2279 1681
570827e1
BB
1682void
1683dmu_tx_init(void)
1684{
1685 dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1686 KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1687 KSTAT_FLAG_VIRTUAL);
1688
1689 if (dmu_tx_ksp != NULL) {
1690 dmu_tx_ksp->ks_data = &dmu_tx_stats;
1691 kstat_install(dmu_tx_ksp);
1692 }
1693}
1694
1695void
1696dmu_tx_fini(void)
1697{
1698 if (dmu_tx_ksp != NULL) {
1699 kstat_delete(dmu_tx_ksp);
1700 dmu_tx_ksp = NULL;
1701 }
1702}
1703
c28b2279
BB
1704#if defined(_KERNEL) && defined(HAVE_SPL)
1705EXPORT_SYMBOL(dmu_tx_create);
1706EXPORT_SYMBOL(dmu_tx_hold_write);
1707EXPORT_SYMBOL(dmu_tx_hold_free);
1708EXPORT_SYMBOL(dmu_tx_hold_zap);
1709EXPORT_SYMBOL(dmu_tx_hold_bonus);
1710EXPORT_SYMBOL(dmu_tx_abort);
1711EXPORT_SYMBOL(dmu_tx_assign);
1712EXPORT_SYMBOL(dmu_tx_wait);
1713EXPORT_SYMBOL(dmu_tx_commit);
1714EXPORT_SYMBOL(dmu_tx_get_txg);
1715EXPORT_SYMBOL(dmu_tx_callback_register);
1716EXPORT_SYMBOL(dmu_tx_do_callbacks);
1717EXPORT_SYMBOL(dmu_tx_hold_spill);
1718EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1719EXPORT_SYMBOL(dmu_tx_hold_sa);
1720#endif