]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dmu_tx.c
Illumos #2619 and #2747
[mirror_zfs.git] / module / zfs / dmu_tx.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
22cd4a46 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
9ae529ec 24 * Copyright (c) 2012 by Delphix. All rights reserved.
22cd4a46 25 */
34dc7c2f 26
34dc7c2f
BB
27#include <sys/dmu.h>
28#include <sys/dmu_impl.h>
29#include <sys/dbuf.h>
30#include <sys/dmu_tx.h>
31#include <sys/dmu_objset.h>
32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34#include <sys/dsl_pool.h>
35#include <sys/zap_impl.h> /* for fzap_default_block_shift */
36#include <sys/spa.h>
428870ff
BB
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
34dc7c2f 39#include <sys/zfs_context.h>
428870ff 40#include <sys/varargs.h>
34dc7c2f
BB
41
42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
44
570827e1
BB
45dmu_tx_stats_t dmu_tx_stats = {
46 { "dmu_tx_assigned", KSTAT_DATA_UINT64 },
47 { "dmu_tx_delay", KSTAT_DATA_UINT64 },
48 { "dmu_tx_error", KSTAT_DATA_UINT64 },
49 { "dmu_tx_suspended", KSTAT_DATA_UINT64 },
50 { "dmu_tx_group", KSTAT_DATA_UINT64 },
51 { "dmu_tx_how", KSTAT_DATA_UINT64 },
52 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
53 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
54 { "dmu_tx_memory_inflight", KSTAT_DATA_UINT64 },
55 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
56 { "dmu_tx_write_limit", KSTAT_DATA_UINT64 },
57 { "dmu_tx_quota", KSTAT_DATA_UINT64 },
58};
59
60static kstat_t *dmu_tx_ksp;
34dc7c2f
BB
61
62dmu_tx_t *
63dmu_tx_create_dd(dsl_dir_t *dd)
64{
b8d06fca 65 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_PUSHPAGE);
34dc7c2f
BB
66 tx->tx_dir = dd;
67 if (dd)
68 tx->tx_pool = dd->dd_pool;
69 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
70 offsetof(dmu_tx_hold_t, txh_node));
428870ff
BB
71 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
72 offsetof(dmu_tx_callback_t, dcb_node));
1c5de20a 73#ifdef DEBUG_DMU_TX
34dc7c2f
BB
74 refcount_create(&tx->tx_space_written);
75 refcount_create(&tx->tx_space_freed);
76#endif
77 return (tx);
78}
79
80dmu_tx_t *
81dmu_tx_create(objset_t *os)
82{
428870ff 83 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
34dc7c2f 84 tx->tx_objset = os;
428870ff 85 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
34dc7c2f
BB
86 return (tx);
87}
88
89dmu_tx_t *
90dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
91{
92 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
93
94 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
95 tx->tx_pool = dp;
96 tx->tx_txg = txg;
97 tx->tx_anyobj = TRUE;
98
99 return (tx);
100}
101
102int
103dmu_tx_is_syncing(dmu_tx_t *tx)
104{
105 return (tx->tx_anyobj);
106}
107
108int
109dmu_tx_private_ok(dmu_tx_t *tx)
110{
111 return (tx->tx_anyobj);
112}
113
114static dmu_tx_hold_t *
115dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
116 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
117{
118 dmu_tx_hold_t *txh;
119 dnode_t *dn = NULL;
120 int err;
121
122 if (object != DMU_NEW_OBJECT) {
428870ff 123 err = dnode_hold(os, object, tx, &dn);
34dc7c2f
BB
124 if (err) {
125 tx->tx_err = err;
126 return (NULL);
127 }
128
129 if (err == 0 && tx->tx_txg != 0) {
130 mutex_enter(&dn->dn_mtx);
131 /*
132 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
133 * problem, but there's no way for it to happen (for
134 * now, at least).
135 */
136 ASSERT(dn->dn_assigned_txg == 0);
137 dn->dn_assigned_txg = tx->tx_txg;
138 (void) refcount_add(&dn->dn_tx_holds, tx);
139 mutex_exit(&dn->dn_mtx);
140 }
141 }
142
b8d06fca 143 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_PUSHPAGE);
34dc7c2f
BB
144 txh->txh_tx = tx;
145 txh->txh_dnode = dn;
1c5de20a 146#ifdef DEBUG_DMU_TX
34dc7c2f
BB
147 txh->txh_type = type;
148 txh->txh_arg1 = arg1;
149 txh->txh_arg2 = arg2;
150#endif
151 list_insert_tail(&tx->tx_holds, txh);
152
153 return (txh);
154}
155
156void
157dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
158{
159 /*
160 * If we're syncing, they can manipulate any object anyhow, and
161 * the hold on the dnode_t can cause problems.
162 */
163 if (!dmu_tx_is_syncing(tx)) {
164 (void) dmu_tx_hold_object_impl(tx, os,
165 object, THT_NEWOBJECT, 0, 0);
166 }
167}
168
169static int
170dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
171{
172 int err;
173 dmu_buf_impl_t *db;
174
175 rw_enter(&dn->dn_struct_rwlock, RW_READER);
176 db = dbuf_hold_level(dn, level, blkid, FTAG);
177 rw_exit(&dn->dn_struct_rwlock);
178 if (db == NULL)
179 return (EIO);
180 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
181 dbuf_rele(db, FTAG);
182 return (err);
183}
184
9babb374 185static void
428870ff
BB
186dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
187 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
9babb374 188{
428870ff
BB
189 objset_t *os = dn->dn_objset;
190 dsl_dataset_t *ds = os->os_dsl_dataset;
191 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
192 dmu_buf_impl_t *parent = NULL;
193 blkptr_t *bp = NULL;
194 uint64_t space;
195
196 if (level >= dn->dn_nlevels || history[level] == blkid)
9babb374
BB
197 return;
198
428870ff 199 history[level] = blkid;
9babb374 200
428870ff
BB
201 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
202
203 if (db == NULL || db == dn->dn_dbuf) {
204 ASSERT(level != 0);
205 db = NULL;
206 } else {
572e2857 207 ASSERT(DB_DNODE(db) == dn);
428870ff
BB
208 ASSERT(db->db_level == level);
209 ASSERT(db->db.db_size == space);
210 ASSERT(db->db_blkid == blkid);
211 bp = db->db_blkptr;
212 parent = db->db_parent;
9babb374
BB
213 }
214
428870ff
BB
215 freeable = (bp && (freeable ||
216 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
9babb374 217
428870ff
BB
218 if (freeable)
219 txh->txh_space_tooverwrite += space;
220 else
221 txh->txh_space_towrite += space;
222 if (bp)
223 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
224
225 dmu_tx_count_twig(txh, dn, parent, level + 1,
226 blkid >> epbs, freeable, history);
9babb374
BB
227}
228
34dc7c2f
BB
229/* ARGSUSED */
230static void
231dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
232{
233 dnode_t *dn = txh->txh_dnode;
234 uint64_t start, end, i;
235 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
236 int err = 0;
d6320ddb 237 int l;
34dc7c2f
BB
238
239 if (len == 0)
240 return;
241
242 min_bs = SPA_MINBLOCKSHIFT;
243 max_bs = SPA_MAXBLOCKSHIFT;
244 min_ibs = DN_MIN_INDBLKSHIFT;
245 max_ibs = DN_MAX_INDBLKSHIFT;
246
34dc7c2f 247 if (dn) {
428870ff 248 uint64_t history[DN_MAX_LEVELS];
9babb374
BB
249 int nlvls = dn->dn_nlevels;
250 int delta;
251
252 /*
253 * For i/o error checking, read the first and last level-0
254 * blocks (if they are not aligned), and all the level-1 blocks.
255 */
34dc7c2f 256 if (dn->dn_maxblkid == 0) {
9babb374
BB
257 delta = dn->dn_datablksz;
258 start = (off < dn->dn_datablksz) ? 0 : 1;
259 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
260 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
b128c09f
BB
261 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
262 if (err)
263 goto out;
9babb374 264 delta -= off;
b128c09f 265 }
34dc7c2f
BB
266 } else {
267 zio_t *zio = zio_root(dn->dn_objset->os_spa,
268 NULL, NULL, ZIO_FLAG_CANFAIL);
269
270 /* first level-0 block */
271 start = off >> dn->dn_datablkshift;
272 if (P2PHASE(off, dn->dn_datablksz) ||
273 len < dn->dn_datablksz) {
274 err = dmu_tx_check_ioerr(zio, dn, 0, start);
275 if (err)
276 goto out;
277 }
278
279 /* last level-0 block */
280 end = (off+len-1) >> dn->dn_datablkshift;
b128c09f 281 if (end != start && end <= dn->dn_maxblkid &&
34dc7c2f
BB
282 P2PHASE(off+len, dn->dn_datablksz)) {
283 err = dmu_tx_check_ioerr(zio, dn, 0, end);
284 if (err)
285 goto out;
286 }
287
288 /* level-1 blocks */
9babb374
BB
289 if (nlvls > 1) {
290 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
291 for (i = (start>>shft)+1; i < end>>shft; i++) {
34dc7c2f
BB
292 err = dmu_tx_check_ioerr(zio, dn, 1, i);
293 if (err)
294 goto out;
295 }
296 }
297
298 err = zio_wait(zio);
299 if (err)
300 goto out;
9babb374 301 delta = P2NPHASE(off, dn->dn_datablksz);
34dc7c2f 302 }
34dc7c2f 303
9babb374
BB
304 if (dn->dn_maxblkid > 0) {
305 /*
306 * The blocksize can't change,
307 * so we can make a more precise estimate.
308 */
309 ASSERT(dn->dn_datablkshift != 0);
34dc7c2f 310 min_bs = max_bs = dn->dn_datablkshift;
9babb374
BB
311 min_ibs = max_ibs = dn->dn_indblkshift;
312 } else if (dn->dn_indblkshift > max_ibs) {
313 /*
314 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
315 * the code will still work correctly on older pools.
316 */
317 min_ibs = max_ibs = dn->dn_indblkshift;
318 }
319
320 /*
321 * If this write is not off the end of the file
322 * we need to account for overwrites/unref.
323 */
428870ff 324 if (start <= dn->dn_maxblkid) {
d6320ddb 325 for (l = 0; l < DN_MAX_LEVELS; l++)
428870ff
BB
326 history[l] = -1ULL;
327 }
9babb374 328 while (start <= dn->dn_maxblkid) {
9babb374
BB
329 dmu_buf_impl_t *db;
330
331 rw_enter(&dn->dn_struct_rwlock, RW_READER);
428870ff 332 err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
9babb374 333 rw_exit(&dn->dn_struct_rwlock);
428870ff
BB
334
335 if (err) {
336 txh->txh_tx->tx_err = err;
337 return;
9babb374 338 }
428870ff
BB
339
340 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
341 history);
9babb374
BB
342 dbuf_rele(db, FTAG);
343 if (++start > end) {
344 /*
345 * Account for new indirects appearing
346 * before this IO gets assigned into a txg.
347 */
348 bits = 64 - min_bs;
349 epbs = min_ibs - SPA_BLKPTRSHIFT;
350 for (bits -= epbs * (nlvls - 1);
351 bits >= 0; bits -= epbs)
352 txh->txh_fudge += 1ULL << max_ibs;
353 goto out;
354 }
355 off += delta;
356 if (len >= delta)
357 len -= delta;
358 delta = dn->dn_datablksz;
359 }
34dc7c2f
BB
360 }
361
362 /*
363 * 'end' is the last thing we will access, not one past.
364 * This way we won't overflow when accessing the last byte.
365 */
366 start = P2ALIGN(off, 1ULL << max_bs);
367 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
368 txh->txh_space_towrite += end - start + 1;
369
370 start >>= min_bs;
371 end >>= min_bs;
372
373 epbs = min_ibs - SPA_BLKPTRSHIFT;
374
375 /*
376 * The object contains at most 2^(64 - min_bs) blocks,
377 * and each indirect level maps 2^epbs.
378 */
379 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
380 start >>= epbs;
381 end >>= epbs;
9babb374 382 ASSERT3U(end, >=, start);
34dc7c2f 383 txh->txh_space_towrite += (end - start + 1) << max_ibs;
9babb374
BB
384 if (start != 0) {
385 /*
386 * We also need a new blkid=0 indirect block
387 * to reference any existing file data.
388 */
389 txh->txh_space_towrite += 1ULL << max_ibs;
390 }
34dc7c2f
BB
391 }
392
34dc7c2f 393out:
9babb374
BB
394 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
395 2 * DMU_MAX_ACCESS)
396 err = EFBIG;
397
34dc7c2f
BB
398 if (err)
399 txh->txh_tx->tx_err = err;
400}
401
402static void
403dmu_tx_count_dnode(dmu_tx_hold_t *txh)
404{
405 dnode_t *dn = txh->txh_dnode;
572e2857 406 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
34dc7c2f
BB
407 uint64_t space = mdn->dn_datablksz +
408 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
409
410 if (dn && dn->dn_dbuf->db_blkptr &&
411 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
428870ff 412 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
34dc7c2f 413 txh->txh_space_tooverwrite += space;
9babb374 414 txh->txh_space_tounref += space;
34dc7c2f
BB
415 } else {
416 txh->txh_space_towrite += space;
417 if (dn && dn->dn_dbuf->db_blkptr)
418 txh->txh_space_tounref += space;
419 }
420}
421
422void
423dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
424{
425 dmu_tx_hold_t *txh;
426
427 ASSERT(tx->tx_txg == 0);
428 ASSERT(len < DMU_MAX_ACCESS);
429 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
430
431 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
432 object, THT_WRITE, off, len);
433 if (txh == NULL)
434 return;
435
436 dmu_tx_count_write(txh, off, len);
437 dmu_tx_count_dnode(txh);
438}
439
440static void
441dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
442{
b128c09f
BB
443 uint64_t blkid, nblks, lastblk;
444 uint64_t space = 0, unref = 0, skipped = 0;
34dc7c2f
BB
445 dnode_t *dn = txh->txh_dnode;
446 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
447 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
b128c09f 448 int epbs;
34dc7c2f 449
b128c09f 450 if (dn->dn_nlevels == 0)
34dc7c2f
BB
451 return;
452
453 /*
b128c09f 454 * The struct_rwlock protects us against dn_nlevels
34dc7c2f
BB
455 * changing, in case (against all odds) we manage to dirty &
456 * sync out the changes after we check for being dirty.
428870ff 457 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
34dc7c2f
BB
458 */
459 rw_enter(&dn->dn_struct_rwlock, RW_READER);
b128c09f
BB
460 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
461 if (dn->dn_maxblkid == 0) {
34dc7c2f
BB
462 if (off == 0 && len >= dn->dn_datablksz) {
463 blkid = 0;
464 nblks = 1;
465 } else {
466 rw_exit(&dn->dn_struct_rwlock);
467 return;
468 }
469 } else {
470 blkid = off >> dn->dn_datablkshift;
b128c09f 471 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
34dc7c2f 472
b128c09f 473 if (blkid >= dn->dn_maxblkid) {
34dc7c2f
BB
474 rw_exit(&dn->dn_struct_rwlock);
475 return;
476 }
b128c09f
BB
477 if (blkid + nblks > dn->dn_maxblkid)
478 nblks = dn->dn_maxblkid - blkid;
34dc7c2f 479
34dc7c2f 480 }
b128c09f 481 if (dn->dn_nlevels == 1) {
34dc7c2f
BB
482 int i;
483 for (i = 0; i < nblks; i++) {
484 blkptr_t *bp = dn->dn_phys->dn_blkptr;
b128c09f 485 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
34dc7c2f 486 bp += blkid + i;
428870ff 487 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
34dc7c2f 488 dprintf_bp(bp, "can free old%s", "");
428870ff 489 space += bp_get_dsize(spa, bp);
34dc7c2f
BB
490 }
491 unref += BP_GET_ASIZE(bp);
492 }
493 nblks = 0;
494 }
495
b128c09f
BB
496 /*
497 * Add in memory requirements of higher-level indirects.
498 * This assumes a worst-possible scenario for dn_nlevels.
499 */
500 {
501 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
502 int level = (dn->dn_nlevels > 1) ? 2 : 1;
503
504 while (level++ < DN_MAX_LEVELS) {
505 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
506 blkcnt = 1 + (blkcnt >> epbs);
507 }
508 ASSERT(blkcnt <= dn->dn_nblkptr);
509 }
510
511 lastblk = blkid + nblks - 1;
34dc7c2f
BB
512 while (nblks) {
513 dmu_buf_impl_t *dbuf;
b128c09f
BB
514 uint64_t ibyte, new_blkid;
515 int epb = 1 << epbs;
516 int err, i, blkoff, tochk;
517 blkptr_t *bp;
518
519 ibyte = blkid << dn->dn_datablkshift;
520 err = dnode_next_offset(dn,
521 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
522 new_blkid = ibyte >> dn->dn_datablkshift;
523 if (err == ESRCH) {
524 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
525 break;
526 }
527 if (err) {
528 txh->txh_tx->tx_err = err;
529 break;
530 }
531 if (new_blkid > lastblk) {
532 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
533 break;
534 }
34dc7c2f 535
b128c09f
BB
536 if (new_blkid > blkid) {
537 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
538 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
539 nblks -= new_blkid - blkid;
540 blkid = new_blkid;
541 }
542 blkoff = P2PHASE(blkid, epb);
543 tochk = MIN(epb - blkoff, nblks);
34dc7c2f 544
428870ff
BB
545 err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
546 if (err) {
547 txh->txh_tx->tx_err = err;
b128c09f 548 break;
34dc7c2f 549 }
428870ff
BB
550
551 txh->txh_memory_tohold += dbuf->db.db_size;
552
553 /*
554 * We don't check memory_tohold against DMU_MAX_ACCESS because
555 * memory_tohold is an over-estimation (especially the >L1
556 * indirect blocks), so it could fail. Callers should have
557 * already verified that they will not be holding too much
558 * memory.
559 */
560
b128c09f
BB
561 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
562 if (err != 0) {
34dc7c2f 563 txh->txh_tx->tx_err = err;
b128c09f 564 dbuf_rele(dbuf, FTAG);
34dc7c2f
BB
565 break;
566 }
567
b128c09f
BB
568 bp = dbuf->db.db_data;
569 bp += blkoff;
570
571 for (i = 0; i < tochk; i++) {
428870ff
BB
572 if (dsl_dataset_block_freeable(ds, &bp[i],
573 bp[i].blk_birth)) {
b128c09f 574 dprintf_bp(&bp[i], "can free old%s", "");
428870ff 575 space += bp_get_dsize(spa, &bp[i]);
b128c09f
BB
576 }
577 unref += BP_GET_ASIZE(bp);
578 }
579 dbuf_rele(dbuf, FTAG);
580
34dc7c2f
BB
581 blkid += tochk;
582 nblks -= tochk;
583 }
584 rw_exit(&dn->dn_struct_rwlock);
585
b128c09f
BB
586 /* account for new level 1 indirect blocks that might show up */
587 if (skipped > 0) {
588 txh->txh_fudge += skipped << dn->dn_indblkshift;
589 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
590 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
591 }
34dc7c2f
BB
592 txh->txh_space_tofree += space;
593 txh->txh_space_tounref += unref;
594}
595
596void
597dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
598{
599 dmu_tx_hold_t *txh;
600 dnode_t *dn;
601 uint64_t start, end, i;
602 int err, shift;
603 zio_t *zio;
604
605 ASSERT(tx->tx_txg == 0);
606
607 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
608 object, THT_FREE, off, len);
609 if (txh == NULL)
610 return;
611 dn = txh->txh_dnode;
612
613 /* first block */
614 if (off != 0)
615 dmu_tx_count_write(txh, off, 1);
616 /* last block */
617 if (len != DMU_OBJECT_END)
618 dmu_tx_count_write(txh, off+len, 1);
619
428870ff
BB
620 dmu_tx_count_dnode(txh);
621
34dc7c2f
BB
622 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
623 return;
624 if (len == DMU_OBJECT_END)
625 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
626
627 /*
628 * For i/o error checking, read the first and last level-0
629 * blocks, and all the level-1 blocks. The above count_write's
b128c09f 630 * have already taken care of the level-0 blocks.
34dc7c2f
BB
631 */
632 if (dn->dn_nlevels > 1) {
633 shift = dn->dn_datablkshift + dn->dn_indblkshift -
634 SPA_BLKPTRSHIFT;
635 start = off >> shift;
636 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
637
638 zio = zio_root(tx->tx_pool->dp_spa,
639 NULL, NULL, ZIO_FLAG_CANFAIL);
640 for (i = start; i <= end; i++) {
641 uint64_t ibyte = i << shift;
b128c09f 642 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
34dc7c2f
BB
643 i = ibyte >> shift;
644 if (err == ESRCH)
645 break;
646 if (err) {
647 tx->tx_err = err;
648 return;
649 }
650
651 err = dmu_tx_check_ioerr(zio, dn, 1, i);
652 if (err) {
653 tx->tx_err = err;
654 return;
655 }
656 }
657 err = zio_wait(zio);
658 if (err) {
659 tx->tx_err = err;
660 return;
661 }
662 }
663
34dc7c2f
BB
664 dmu_tx_count_free(txh, off, len);
665}
666
667void
9babb374 668dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
34dc7c2f
BB
669{
670 dmu_tx_hold_t *txh;
671 dnode_t *dn;
672 uint64_t nblocks;
673 int epbs, err;
674
675 ASSERT(tx->tx_txg == 0);
676
677 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
678 object, THT_ZAP, add, (uintptr_t)name);
679 if (txh == NULL)
680 return;
681 dn = txh->txh_dnode;
682
683 dmu_tx_count_dnode(txh);
684
685 if (dn == NULL) {
686 /*
687 * We will be able to fit a new object's entries into one leaf
688 * block. So there will be at most 2 blocks total,
689 * including the header block.
690 */
691 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
692 return;
693 }
694
9ae529ec 695 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
34dc7c2f
BB
696
697 if (dn->dn_maxblkid == 0 && !add) {
22cd4a46
AL
698 blkptr_t *bp;
699
34dc7c2f
BB
700 /*
701 * If there is only one block (i.e. this is a micro-zap)
702 * and we are not adding anything, the accounting is simple.
703 */
704 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
705 if (err) {
706 tx->tx_err = err;
707 return;
708 }
709
710 /*
711 * Use max block size here, since we don't know how much
712 * the size will change between now and the dbuf dirty call.
713 */
22cd4a46 714 bp = &dn->dn_phys->dn_blkptr[0];
34dc7c2f 715 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
22cd4a46 716 bp, bp->blk_birth))
34dc7c2f 717 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
22cd4a46 718 else
34dc7c2f 719 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
22cd4a46 720 if (!BP_IS_HOLE(bp))
9babb374 721 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
34dc7c2f
BB
722 return;
723 }
724
725 if (dn->dn_maxblkid > 0 && name) {
726 /*
727 * access the name in this fat-zap so that we'll check
728 * for i/o errors to the leaf blocks, etc.
729 */
428870ff 730 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
34dc7c2f
BB
731 8, 0, NULL);
732 if (err == EIO) {
733 tx->tx_err = err;
734 return;
735 }
736 }
737
428870ff 738 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
45d1cae3 739 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
34dc7c2f
BB
740
741 /*
742 * If the modified blocks are scattered to the four winds,
743 * we'll have to modify an indirect twig for each.
744 */
745 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
746 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
9babb374
BB
747 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
748 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
749 else
750 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
34dc7c2f
BB
751}
752
753void
754dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
755{
756 dmu_tx_hold_t *txh;
757
758 ASSERT(tx->tx_txg == 0);
759
760 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
761 object, THT_BONUS, 0, 0);
762 if (txh)
763 dmu_tx_count_dnode(txh);
764}
765
766void
767dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
768{
769 dmu_tx_hold_t *txh;
770 ASSERT(tx->tx_txg == 0);
771
772 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
773 DMU_NEW_OBJECT, THT_SPACE, space, 0);
774
775 txh->txh_space_towrite += space;
776}
777
778int
779dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
780{
781 dmu_tx_hold_t *txh;
782 int holds = 0;
783
784 /*
785 * By asserting that the tx is assigned, we're counting the
786 * number of dn_tx_holds, which is the same as the number of
787 * dn_holds. Otherwise, we'd be counting dn_holds, but
788 * dn_tx_holds could be 0.
789 */
790 ASSERT(tx->tx_txg != 0);
791
792 /* if (tx->tx_anyobj == TRUE) */
793 /* return (0); */
794
795 for (txh = list_head(&tx->tx_holds); txh;
796 txh = list_next(&tx->tx_holds, txh)) {
797 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
798 holds++;
799 }
800
801 return (holds);
802}
803
1c5de20a 804#ifdef DEBUG_DMU_TX
34dc7c2f
BB
805void
806dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
807{
808 dmu_tx_hold_t *txh;
809 int match_object = FALSE, match_offset = FALSE;
572e2857 810 dnode_t *dn;
34dc7c2f 811
572e2857
BB
812 DB_DNODE_ENTER(db);
813 dn = DB_DNODE(db);
99ea23c5 814 ASSERT(dn != NULL);
34dc7c2f 815 ASSERT(tx->tx_txg != 0);
428870ff 816 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
34dc7c2f
BB
817 ASSERT3U(dn->dn_object, ==, db->db.db_object);
818
572e2857
BB
819 if (tx->tx_anyobj) {
820 DB_DNODE_EXIT(db);
34dc7c2f 821 return;
572e2857 822 }
34dc7c2f
BB
823
824 /* XXX No checking on the meta dnode for now */
572e2857
BB
825 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
826 DB_DNODE_EXIT(db);
34dc7c2f 827 return;
572e2857 828 }
34dc7c2f
BB
829
830 for (txh = list_head(&tx->tx_holds); txh;
831 txh = list_next(&tx->tx_holds, txh)) {
99ea23c5 832 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
34dc7c2f
BB
833 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
834 match_object = TRUE;
835 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
836 int datablkshift = dn->dn_datablkshift ?
837 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
838 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
839 int shift = datablkshift + epbs * db->db_level;
840 uint64_t beginblk = shift >= 64 ? 0 :
841 (txh->txh_arg1 >> shift);
842 uint64_t endblk = shift >= 64 ? 0 :
843 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
844 uint64_t blkid = db->db_blkid;
845
846 /* XXX txh_arg2 better not be zero... */
847
848 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
849 txh->txh_type, beginblk, endblk);
850
851 switch (txh->txh_type) {
852 case THT_WRITE:
853 if (blkid >= beginblk && blkid <= endblk)
854 match_offset = TRUE;
855 /*
856 * We will let this hold work for the bonus
428870ff
BB
857 * or spill buffer so that we don't need to
858 * hold it when creating a new object.
34dc7c2f 859 */
428870ff
BB
860 if (blkid == DMU_BONUS_BLKID ||
861 blkid == DMU_SPILL_BLKID)
34dc7c2f
BB
862 match_offset = TRUE;
863 /*
864 * They might have to increase nlevels,
865 * thus dirtying the new TLIBs. Or the
866 * might have to change the block size,
867 * thus dirying the new lvl=0 blk=0.
868 */
869 if (blkid == 0)
870 match_offset = TRUE;
871 break;
872 case THT_FREE:
b128c09f
BB
873 /*
874 * We will dirty all the level 1 blocks in
875 * the free range and perhaps the first and
876 * last level 0 block.
877 */
878 if (blkid >= beginblk && (blkid <= endblk ||
879 txh->txh_arg2 == DMU_OBJECT_END))
34dc7c2f
BB
880 match_offset = TRUE;
881 break;
428870ff
BB
882 case THT_SPILL:
883 if (blkid == DMU_SPILL_BLKID)
884 match_offset = TRUE;
885 break;
34dc7c2f 886 case THT_BONUS:
428870ff 887 if (blkid == DMU_BONUS_BLKID)
34dc7c2f
BB
888 match_offset = TRUE;
889 break;
890 case THT_ZAP:
891 match_offset = TRUE;
892 break;
893 case THT_NEWOBJECT:
894 match_object = TRUE;
895 break;
896 default:
897 ASSERT(!"bad txh_type");
898 }
899 }
572e2857
BB
900 if (match_object && match_offset) {
901 DB_DNODE_EXIT(db);
34dc7c2f 902 return;
572e2857 903 }
34dc7c2f 904 }
572e2857 905 DB_DNODE_EXIT(db);
34dc7c2f
BB
906 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
907 (u_longlong_t)db->db.db_object, db->db_level,
908 (u_longlong_t)db->db_blkid);
909}
910#endif
911
912static int
913dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
914{
915 dmu_tx_hold_t *txh;
916 spa_t *spa = tx->tx_pool->dp_spa;
b128c09f
BB
917 uint64_t memory, asize, fsize, usize;
918 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
34dc7c2f
BB
919
920 ASSERT3U(tx->tx_txg, ==, 0);
921
570827e1
BB
922 if (tx->tx_err) {
923 DMU_TX_STAT_BUMP(dmu_tx_error);
34dc7c2f 924 return (tx->tx_err);
570827e1 925 }
34dc7c2f 926
b128c09f 927 if (spa_suspended(spa)) {
570827e1
BB
928 DMU_TX_STAT_BUMP(dmu_tx_suspended);
929
34dc7c2f
BB
930 /*
931 * If the user has indicated a blocking failure mode
932 * then return ERESTART which will block in dmu_tx_wait().
933 * Otherwise, return EIO so that an error can get
934 * propagated back to the VOP calls.
935 *
936 * Note that we always honor the txg_how flag regardless
937 * of the failuremode setting.
938 */
939 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
940 txg_how != TXG_WAIT)
941 return (EIO);
942
943 return (ERESTART);
944 }
945
946 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
947 tx->tx_needassign_txh = NULL;
948
949 /*
950 * NB: No error returns are allowed after txg_hold_open, but
951 * before processing the dnode holds, due to the
952 * dmu_tx_unassign() logic.
953 */
954
b128c09f 955 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
34dc7c2f
BB
956 for (txh = list_head(&tx->tx_holds); txh;
957 txh = list_next(&tx->tx_holds, txh)) {
958 dnode_t *dn = txh->txh_dnode;
959 if (dn != NULL) {
960 mutex_enter(&dn->dn_mtx);
961 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
962 mutex_exit(&dn->dn_mtx);
963 tx->tx_needassign_txh = txh;
570827e1 964 DMU_TX_STAT_BUMP(dmu_tx_group);
34dc7c2f
BB
965 return (ERESTART);
966 }
967 if (dn->dn_assigned_txg == 0)
968 dn->dn_assigned_txg = tx->tx_txg;
969 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
970 (void) refcount_add(&dn->dn_tx_holds, tx);
971 mutex_exit(&dn->dn_mtx);
972 }
973 towrite += txh->txh_space_towrite;
974 tofree += txh->txh_space_tofree;
975 tooverwrite += txh->txh_space_tooverwrite;
976 tounref += txh->txh_space_tounref;
b128c09f
BB
977 tohold += txh->txh_memory_tohold;
978 fudge += txh->txh_fudge;
34dc7c2f
BB
979 }
980
981 /*
982 * NB: This check must be after we've held the dnodes, so that
983 * the dmu_tx_unassign() logic will work properly
984 */
570827e1
BB
985 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg) {
986 DMU_TX_STAT_BUMP(dmu_tx_how);
34dc7c2f 987 return (ERESTART);
570827e1 988 }
34dc7c2f
BB
989
990 /*
991 * If a snapshot has been taken since we made our estimates,
992 * assume that we won't be able to free or overwrite anything.
993 */
994 if (tx->tx_objset &&
428870ff 995 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
34dc7c2f
BB
996 tx->tx_lastsnap_txg) {
997 towrite += tooverwrite;
998 tooverwrite = tofree = 0;
999 }
1000
b128c09f
BB
1001 /* needed allocation: worst-case estimate of write space */
1002 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
1003 /* freed space estimate: worst-case overwrite + free estimate */
34dc7c2f 1004 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
b128c09f 1005 /* convert unrefd space to worst-case estimate */
34dc7c2f 1006 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
b128c09f
BB
1007 /* calculate memory footprint estimate */
1008 memory = towrite + tooverwrite + tohold;
34dc7c2f 1009
1c5de20a 1010#ifdef DEBUG_DMU_TX
b128c09f
BB
1011 /*
1012 * Add in 'tohold' to account for our dirty holds on this memory
1013 * XXX - the "fudge" factor is to account for skipped blocks that
1014 * we missed because dnode_next_offset() misses in-core-only blocks.
1015 */
1016 tx->tx_space_towrite = asize +
1017 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
34dc7c2f
BB
1018 tx->tx_space_tofree = tofree;
1019 tx->tx_space_tooverwrite = tooverwrite;
1020 tx->tx_space_tounref = tounref;
1021#endif
1022
1023 if (tx->tx_dir && asize != 0) {
b128c09f
BB
1024 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1025 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
34dc7c2f
BB
1026 if (err)
1027 return (err);
1028 }
1029
570827e1
BB
1030 DMU_TX_STAT_BUMP(dmu_tx_assigned);
1031
34dc7c2f
BB
1032 return (0);
1033}
1034
1035static void
1036dmu_tx_unassign(dmu_tx_t *tx)
1037{
1038 dmu_tx_hold_t *txh;
1039
1040 if (tx->tx_txg == 0)
1041 return;
1042
1043 txg_rele_to_quiesce(&tx->tx_txgh);
1044
1045 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1046 txh = list_next(&tx->tx_holds, txh)) {
1047 dnode_t *dn = txh->txh_dnode;
1048
1049 if (dn == NULL)
1050 continue;
1051 mutex_enter(&dn->dn_mtx);
1052 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1053
1054 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1055 dn->dn_assigned_txg = 0;
1056 cv_broadcast(&dn->dn_notxholds);
1057 }
1058 mutex_exit(&dn->dn_mtx);
1059 }
1060
1061 txg_rele_to_sync(&tx->tx_txgh);
1062
1063 tx->tx_lasttried_txg = tx->tx_txg;
1064 tx->tx_txg = 0;
1065}
1066
1067/*
1068 * Assign tx to a transaction group. txg_how can be one of:
1069 *
1070 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1071 * a new one. This should be used when you're not holding locks.
1072 * If will only fail if we're truly out of space (or over quota).
1073 *
1074 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1075 * blocking, returns immediately with ERESTART. This should be used
1076 * whenever you're holding locks. On an ERESTART error, the caller
1077 * should drop locks, do a dmu_tx_wait(tx), and try again.
1078 *
1079 * (3) A specific txg. Use this if you need to ensure that multiple
1080 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1081 * returns ERESTART if it can't assign you into the requested txg.
1082 */
1083int
1084dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1085{
1086 int err;
1087
1088 ASSERT(tx->tx_txg == 0);
1089 ASSERT(txg_how != 0);
1090 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1091
1092 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1093 dmu_tx_unassign(tx);
1094
1095 if (err != ERESTART || txg_how != TXG_WAIT)
1096 return (err);
1097
1098 dmu_tx_wait(tx);
1099 }
1100
1101 txg_rele_to_quiesce(&tx->tx_txgh);
1102
1103 return (0);
1104}
1105
1106void
1107dmu_tx_wait(dmu_tx_t *tx)
1108{
1109 spa_t *spa = tx->tx_pool->dp_spa;
1110
1111 ASSERT(tx->tx_txg == 0);
1112
1113 /*
1114 * It's possible that the pool has become active after this thread
1115 * has tried to obtain a tx. If that's the case then his
1116 * tx_lasttried_txg would not have been assigned.
1117 */
b128c09f 1118 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
34dc7c2f
BB
1119 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1120 } else if (tx->tx_needassign_txh) {
1121 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1122
1123 mutex_enter(&dn->dn_mtx);
1124 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1125 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1126 mutex_exit(&dn->dn_mtx);
1127 tx->tx_needassign_txh = NULL;
1128 } else {
1129 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1130 }
1131}
1132
1133void
1134dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1135{
1c5de20a 1136#ifdef DEBUG_DMU_TX
34dc7c2f
BB
1137 if (tx->tx_dir == NULL || delta == 0)
1138 return;
1139
1140 if (delta > 0) {
1141 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1142 tx->tx_space_towrite);
1143 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1144 } else {
1145 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1146 }
1147#endif
1148}
1149
1150void
1151dmu_tx_commit(dmu_tx_t *tx)
1152{
1153 dmu_tx_hold_t *txh;
1154
1155 ASSERT(tx->tx_txg != 0);
1156
c65aa5b2 1157 while ((txh = list_head(&tx->tx_holds))) {
34dc7c2f
BB
1158 dnode_t *dn = txh->txh_dnode;
1159
1160 list_remove(&tx->tx_holds, txh);
1161 kmem_free(txh, sizeof (dmu_tx_hold_t));
1162 if (dn == NULL)
1163 continue;
1164 mutex_enter(&dn->dn_mtx);
1165 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1166
1167 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1168 dn->dn_assigned_txg = 0;
1169 cv_broadcast(&dn->dn_notxholds);
1170 }
1171 mutex_exit(&dn->dn_mtx);
1172 dnode_rele(dn, tx);
1173 }
1174
1175 if (tx->tx_tempreserve_cookie)
1176 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1177
428870ff
BB
1178 if (!list_is_empty(&tx->tx_callbacks))
1179 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1180
34dc7c2f
BB
1181 if (tx->tx_anyobj == FALSE)
1182 txg_rele_to_sync(&tx->tx_txgh);
428870ff
BB
1183
1184 list_destroy(&tx->tx_callbacks);
34dc7c2f 1185 list_destroy(&tx->tx_holds);
1c5de20a 1186#ifdef DEBUG_DMU_TX
34dc7c2f
BB
1187 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1188 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1189 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1190 refcount_destroy_many(&tx->tx_space_written,
1191 refcount_count(&tx->tx_space_written));
1192 refcount_destroy_many(&tx->tx_space_freed,
1193 refcount_count(&tx->tx_space_freed));
1194#endif
1195 kmem_free(tx, sizeof (dmu_tx_t));
1196}
1197
1198void
1199dmu_tx_abort(dmu_tx_t *tx)
1200{
1201 dmu_tx_hold_t *txh;
1202
1203 ASSERT(tx->tx_txg == 0);
1204
c65aa5b2 1205 while ((txh = list_head(&tx->tx_holds))) {
34dc7c2f
BB
1206 dnode_t *dn = txh->txh_dnode;
1207
1208 list_remove(&tx->tx_holds, txh);
1209 kmem_free(txh, sizeof (dmu_tx_hold_t));
1210 if (dn != NULL)
1211 dnode_rele(dn, tx);
1212 }
428870ff
BB
1213
1214 /*
1215 * Call any registered callbacks with an error code.
1216 */
1217 if (!list_is_empty(&tx->tx_callbacks))
1218 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1219
1220 list_destroy(&tx->tx_callbacks);
34dc7c2f 1221 list_destroy(&tx->tx_holds);
1c5de20a 1222#ifdef DEBUG_DMU_TX
34dc7c2f
BB
1223 refcount_destroy_many(&tx->tx_space_written,
1224 refcount_count(&tx->tx_space_written));
1225 refcount_destroy_many(&tx->tx_space_freed,
1226 refcount_count(&tx->tx_space_freed));
1227#endif
1228 kmem_free(tx, sizeof (dmu_tx_t));
1229}
1230
1231uint64_t
1232dmu_tx_get_txg(dmu_tx_t *tx)
1233{
1234 ASSERT(tx->tx_txg != 0);
1235 return (tx->tx_txg);
1236}
428870ff
BB
1237
1238void
1239dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1240{
1241 dmu_tx_callback_t *dcb;
1242
b8d06fca 1243 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_PUSHPAGE);
428870ff
BB
1244
1245 dcb->dcb_func = func;
1246 dcb->dcb_data = data;
1247
1248 list_insert_tail(&tx->tx_callbacks, dcb);
1249}
1250
1251/*
1252 * Call all the commit callbacks on a list, with a given error code.
1253 */
1254void
1255dmu_tx_do_callbacks(list_t *cb_list, int error)
1256{
1257 dmu_tx_callback_t *dcb;
1258
c65aa5b2 1259 while ((dcb = list_head(cb_list))) {
428870ff
BB
1260 list_remove(cb_list, dcb);
1261 dcb->dcb_func(dcb->dcb_data, error);
1262 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1263 }
1264}
1265
1266/*
1267 * Interface to hold a bunch of attributes.
1268 * used for creating new files.
1269 * attrsize is the total size of all attributes
1270 * to be added during object creation
1271 *
1272 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1273 */
1274
1275/*
1276 * hold necessary attribute name for attribute registration.
1277 * should be a very rare case where this is needed. If it does
1278 * happen it would only happen on the first write to the file system.
1279 */
1280static void
1281dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1282{
1283 int i;
1284
1285 if (!sa->sa_need_attr_registration)
1286 return;
1287
1288 for (i = 0; i != sa->sa_num_attrs; i++) {
1289 if (!sa->sa_attr_table[i].sa_registered) {
1290 if (sa->sa_reg_attr_obj)
1291 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1292 B_TRUE, sa->sa_attr_table[i].sa_name);
1293 else
1294 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1295 B_TRUE, sa->sa_attr_table[i].sa_name);
1296 }
1297 }
1298}
1299
1300
1301void
1302dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1303{
1304 dnode_t *dn;
1305 dmu_tx_hold_t *txh;
428870ff
BB
1306
1307 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1308 THT_SPILL, 0, 0);
1309
1310 dn = txh->txh_dnode;
1311
1312 if (dn == NULL)
1313 return;
1314
1315 /* If blkptr doesn't exist then add space to towrite */
22cd4a46 1316 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
428870ff 1317 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
428870ff 1318 } else {
22cd4a46
AL
1319 blkptr_t *bp;
1320
1321 bp = &dn->dn_phys->dn_spill;
428870ff
BB
1322 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1323 bp, bp->blk_birth))
1324 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1325 else
1326 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
22cd4a46 1327 if (!BP_IS_HOLE(bp))
428870ff
BB
1328 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1329 }
1330}
1331
1332void
1333dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1334{
1335 sa_os_t *sa = tx->tx_objset->os_sa;
1336
1337 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1338
1339 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1340 return;
1341
1342 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1343 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1344 else {
1345 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1346 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1347 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1348 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1349 }
1350
1351 dmu_tx_sa_registration_hold(sa, tx);
1352
1353 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1354 return;
1355
1356 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1357 THT_SPILL, 0, 0);
1358}
1359
1360/*
1361 * Hold SA attribute
1362 *
1363 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1364 *
1365 * variable_size is the total size of all variable sized attributes
1366 * passed to this function. It is not the total size of all
1367 * variable size attributes that *may* exist on this object.
1368 */
1369void
1370dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1371{
1372 uint64_t object;
1373 sa_os_t *sa = tx->tx_objset->os_sa;
1374
1375 ASSERT(hdl != NULL);
1376
1377 object = sa_handle_object(hdl);
1378
1379 dmu_tx_hold_bonus(tx, object);
1380
1381 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1382 return;
1383
1384 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1385 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1386 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1387 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1388 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1389 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1390 }
1391
1392 dmu_tx_sa_registration_hold(sa, tx);
1393
1394 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1395 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1396
572e2857 1397 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
428870ff
BB
1398 ASSERT(tx->tx_txg == 0);
1399 dmu_tx_hold_spill(tx, object);
572e2857
BB
1400 } else {
1401 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1402 dnode_t *dn;
1403
1404 DB_DNODE_ENTER(db);
1405 dn = DB_DNODE(db);
1406 if (dn->dn_have_spill) {
1407 ASSERT(tx->tx_txg == 0);
1408 dmu_tx_hold_spill(tx, object);
1409 }
1410 DB_DNODE_EXIT(db);
428870ff
BB
1411 }
1412}
c28b2279 1413
570827e1
BB
1414void
1415dmu_tx_init(void)
1416{
1417 dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1418 KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1419 KSTAT_FLAG_VIRTUAL);
1420
1421 if (dmu_tx_ksp != NULL) {
1422 dmu_tx_ksp->ks_data = &dmu_tx_stats;
1423 kstat_install(dmu_tx_ksp);
1424 }
1425}
1426
1427void
1428dmu_tx_fini(void)
1429{
1430 if (dmu_tx_ksp != NULL) {
1431 kstat_delete(dmu_tx_ksp);
1432 dmu_tx_ksp = NULL;
1433 }
1434}
1435
c28b2279
BB
1436#if defined(_KERNEL) && defined(HAVE_SPL)
1437EXPORT_SYMBOL(dmu_tx_create);
1438EXPORT_SYMBOL(dmu_tx_hold_write);
1439EXPORT_SYMBOL(dmu_tx_hold_free);
1440EXPORT_SYMBOL(dmu_tx_hold_zap);
1441EXPORT_SYMBOL(dmu_tx_hold_bonus);
1442EXPORT_SYMBOL(dmu_tx_abort);
1443EXPORT_SYMBOL(dmu_tx_assign);
1444EXPORT_SYMBOL(dmu_tx_wait);
1445EXPORT_SYMBOL(dmu_tx_commit);
1446EXPORT_SYMBOL(dmu_tx_get_txg);
1447EXPORT_SYMBOL(dmu_tx_callback_register);
1448EXPORT_SYMBOL(dmu_tx_do_callbacks);
1449EXPORT_SYMBOL(dmu_tx_hold_spill);
1450EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1451EXPORT_SYMBOL(dmu_tx_hold_sa);
1452#endif