4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
28 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/varargs.h>
42 typedef void (*dmu_tx_hold_func_t
)(dmu_tx_t
*tx
, struct dnode
*dn
,
43 uint64_t arg1
, uint64_t arg2
);
45 dmu_tx_stats_t dmu_tx_stats
= {
46 { "dmu_tx_assigned", KSTAT_DATA_UINT64
},
47 { "dmu_tx_delay", KSTAT_DATA_UINT64
},
48 { "dmu_tx_error", KSTAT_DATA_UINT64
},
49 { "dmu_tx_suspended", KSTAT_DATA_UINT64
},
50 { "dmu_tx_group", KSTAT_DATA_UINT64
},
51 { "dmu_tx_how", KSTAT_DATA_UINT64
},
52 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64
},
53 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64
},
54 { "dmu_tx_memory_inflight", KSTAT_DATA_UINT64
},
55 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64
},
56 { "dmu_tx_write_limit", KSTAT_DATA_UINT64
},
57 { "dmu_tx_quota", KSTAT_DATA_UINT64
},
60 static kstat_t
*dmu_tx_ksp
;
63 dmu_tx_create_dd(dsl_dir_t
*dd
)
65 dmu_tx_t
*tx
= kmem_zalloc(sizeof (dmu_tx_t
), KM_PUSHPAGE
);
68 tx
->tx_pool
= dd
->dd_pool
;
69 list_create(&tx
->tx_holds
, sizeof (dmu_tx_hold_t
),
70 offsetof(dmu_tx_hold_t
, txh_node
));
71 list_create(&tx
->tx_callbacks
, sizeof (dmu_tx_callback_t
),
72 offsetof(dmu_tx_callback_t
, dcb_node
));
74 refcount_create(&tx
->tx_space_written
);
75 refcount_create(&tx
->tx_space_freed
);
81 dmu_tx_create(objset_t
*os
)
83 dmu_tx_t
*tx
= dmu_tx_create_dd(os
->os_dsl_dataset
->ds_dir
);
85 tx
->tx_lastsnap_txg
= dsl_dataset_prev_snap_txg(os
->os_dsl_dataset
);
90 dmu_tx_create_assigned(struct dsl_pool
*dp
, uint64_t txg
)
92 dmu_tx_t
*tx
= dmu_tx_create_dd(NULL
);
94 ASSERT3U(txg
, <=, dp
->dp_tx
.tx_open_txg
);
103 dmu_tx_is_syncing(dmu_tx_t
*tx
)
105 return (tx
->tx_anyobj
);
109 dmu_tx_private_ok(dmu_tx_t
*tx
)
111 return (tx
->tx_anyobj
);
114 static dmu_tx_hold_t
*
115 dmu_tx_hold_object_impl(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
,
116 enum dmu_tx_hold_type type
, uint64_t arg1
, uint64_t arg2
)
122 if (object
!= DMU_NEW_OBJECT
) {
123 err
= dnode_hold(os
, object
, tx
, &dn
);
129 if (err
== 0 && tx
->tx_txg
!= 0) {
130 mutex_enter(&dn
->dn_mtx
);
132 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
133 * problem, but there's no way for it to happen (for
136 ASSERT(dn
->dn_assigned_txg
== 0);
137 dn
->dn_assigned_txg
= tx
->tx_txg
;
138 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
139 mutex_exit(&dn
->dn_mtx
);
143 txh
= kmem_zalloc(sizeof (dmu_tx_hold_t
), KM_PUSHPAGE
);
147 txh
->txh_type
= type
;
148 txh
->txh_arg1
= arg1
;
149 txh
->txh_arg2
= arg2
;
151 list_insert_tail(&tx
->tx_holds
, txh
);
157 dmu_tx_add_new_object(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
)
160 * If we're syncing, they can manipulate any object anyhow, and
161 * the hold on the dnode_t can cause problems.
163 if (!dmu_tx_is_syncing(tx
)) {
164 (void) dmu_tx_hold_object_impl(tx
, os
,
165 object
, THT_NEWOBJECT
, 0, 0);
170 dmu_tx_check_ioerr(zio_t
*zio
, dnode_t
*dn
, int level
, uint64_t blkid
)
175 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
176 db
= dbuf_hold_level(dn
, level
, blkid
, FTAG
);
177 rw_exit(&dn
->dn_struct_rwlock
);
180 err
= dbuf_read(db
, zio
, DB_RF_CANFAIL
| DB_RF_NOPREFETCH
);
186 dmu_tx_count_twig(dmu_tx_hold_t
*txh
, dnode_t
*dn
, dmu_buf_impl_t
*db
,
187 int level
, uint64_t blkid
, boolean_t freeable
, uint64_t *history
)
189 objset_t
*os
= dn
->dn_objset
;
190 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
191 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
192 dmu_buf_impl_t
*parent
= NULL
;
196 if (level
>= dn
->dn_nlevels
|| history
[level
] == blkid
)
199 history
[level
] = blkid
;
201 space
= (level
== 0) ? dn
->dn_datablksz
: (1ULL << dn
->dn_indblkshift
);
203 if (db
== NULL
|| db
== dn
->dn_dbuf
) {
207 ASSERT(DB_DNODE(db
) == dn
);
208 ASSERT(db
->db_level
== level
);
209 ASSERT(db
->db
.db_size
== space
);
210 ASSERT(db
->db_blkid
== blkid
);
212 parent
= db
->db_parent
;
215 freeable
= (bp
&& (freeable
||
216 dsl_dataset_block_freeable(ds
, bp
, bp
->blk_birth
)));
219 txh
->txh_space_tooverwrite
+= space
;
221 txh
->txh_space_towrite
+= space
;
223 txh
->txh_space_tounref
+= bp_get_dsize(os
->os_spa
, bp
);
225 dmu_tx_count_twig(txh
, dn
, parent
, level
+ 1,
226 blkid
>> epbs
, freeable
, history
);
231 dmu_tx_count_write(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
233 dnode_t
*dn
= txh
->txh_dnode
;
234 uint64_t start
, end
, i
;
235 int min_bs
, max_bs
, min_ibs
, max_ibs
, epbs
, bits
;
242 min_bs
= SPA_MINBLOCKSHIFT
;
243 max_bs
= SPA_MAXBLOCKSHIFT
;
244 min_ibs
= DN_MIN_INDBLKSHIFT
;
245 max_ibs
= DN_MAX_INDBLKSHIFT
;
248 uint64_t history
[DN_MAX_LEVELS
];
249 int nlvls
= dn
->dn_nlevels
;
253 * For i/o error checking, read the first and last level-0
254 * blocks (if they are not aligned), and all the level-1 blocks.
256 if (dn
->dn_maxblkid
== 0) {
257 delta
= dn
->dn_datablksz
;
258 start
= (off
< dn
->dn_datablksz
) ? 0 : 1;
259 end
= (off
+len
<= dn
->dn_datablksz
) ? 0 : 1;
260 if (start
== 0 && (off
> 0 || len
< dn
->dn_datablksz
)) {
261 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
267 zio_t
*zio
= zio_root(dn
->dn_objset
->os_spa
,
268 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
270 /* first level-0 block */
271 start
= off
>> dn
->dn_datablkshift
;
272 if (P2PHASE(off
, dn
->dn_datablksz
) ||
273 len
< dn
->dn_datablksz
) {
274 err
= dmu_tx_check_ioerr(zio
, dn
, 0, start
);
279 /* last level-0 block */
280 end
= (off
+len
-1) >> dn
->dn_datablkshift
;
281 if (end
!= start
&& end
<= dn
->dn_maxblkid
&&
282 P2PHASE(off
+len
, dn
->dn_datablksz
)) {
283 err
= dmu_tx_check_ioerr(zio
, dn
, 0, end
);
290 int shft
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
291 for (i
= (start
>>shft
)+1; i
< end
>>shft
; i
++) {
292 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
301 delta
= P2NPHASE(off
, dn
->dn_datablksz
);
304 if (dn
->dn_maxblkid
> 0) {
306 * The blocksize can't change,
307 * so we can make a more precise estimate.
309 ASSERT(dn
->dn_datablkshift
!= 0);
310 min_bs
= max_bs
= dn
->dn_datablkshift
;
311 min_ibs
= max_ibs
= dn
->dn_indblkshift
;
312 } else if (dn
->dn_indblkshift
> max_ibs
) {
314 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
315 * the code will still work correctly on older pools.
317 min_ibs
= max_ibs
= dn
->dn_indblkshift
;
321 * If this write is not off the end of the file
322 * we need to account for overwrites/unref.
324 if (start
<= dn
->dn_maxblkid
) {
325 for (l
= 0; l
< DN_MAX_LEVELS
; l
++)
328 while (start
<= dn
->dn_maxblkid
) {
331 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
332 err
= dbuf_hold_impl(dn
, 0, start
, FALSE
, FTAG
, &db
);
333 rw_exit(&dn
->dn_struct_rwlock
);
336 txh
->txh_tx
->tx_err
= err
;
340 dmu_tx_count_twig(txh
, dn
, db
, 0, start
, B_FALSE
,
345 * Account for new indirects appearing
346 * before this IO gets assigned into a txg.
349 epbs
= min_ibs
- SPA_BLKPTRSHIFT
;
350 for (bits
-= epbs
* (nlvls
- 1);
351 bits
>= 0; bits
-= epbs
)
352 txh
->txh_fudge
+= 1ULL << max_ibs
;
358 delta
= dn
->dn_datablksz
;
363 * 'end' is the last thing we will access, not one past.
364 * This way we won't overflow when accessing the last byte.
366 start
= P2ALIGN(off
, 1ULL << max_bs
);
367 end
= P2ROUNDUP(off
+ len
, 1ULL << max_bs
) - 1;
368 txh
->txh_space_towrite
+= end
- start
+ 1;
373 epbs
= min_ibs
- SPA_BLKPTRSHIFT
;
376 * The object contains at most 2^(64 - min_bs) blocks,
377 * and each indirect level maps 2^epbs.
379 for (bits
= 64 - min_bs
; bits
>= 0; bits
-= epbs
) {
382 ASSERT3U(end
, >=, start
);
383 txh
->txh_space_towrite
+= (end
- start
+ 1) << max_ibs
;
386 * We also need a new blkid=0 indirect block
387 * to reference any existing file data.
389 txh
->txh_space_towrite
+= 1ULL << max_ibs
;
394 if (txh
->txh_space_towrite
+ txh
->txh_space_tooverwrite
>
399 txh
->txh_tx
->tx_err
= err
;
403 dmu_tx_count_dnode(dmu_tx_hold_t
*txh
)
405 dnode_t
*dn
= txh
->txh_dnode
;
406 dnode_t
*mdn
= DMU_META_DNODE(txh
->txh_tx
->tx_objset
);
407 uint64_t space
= mdn
->dn_datablksz
+
408 ((mdn
->dn_nlevels
-1) << mdn
->dn_indblkshift
);
410 if (dn
&& dn
->dn_dbuf
->db_blkptr
&&
411 dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
412 dn
->dn_dbuf
->db_blkptr
, dn
->dn_dbuf
->db_blkptr
->blk_birth
)) {
413 txh
->txh_space_tooverwrite
+= space
;
414 txh
->txh_space_tounref
+= space
;
416 txh
->txh_space_towrite
+= space
;
417 if (dn
&& dn
->dn_dbuf
->db_blkptr
)
418 txh
->txh_space_tounref
+= space
;
423 dmu_tx_hold_write(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, int len
)
427 ASSERT(tx
->tx_txg
== 0);
428 ASSERT(len
< DMU_MAX_ACCESS
);
429 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
431 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
432 object
, THT_WRITE
, off
, len
);
436 dmu_tx_count_write(txh
, off
, len
);
437 dmu_tx_count_dnode(txh
);
441 dmu_tx_count_free(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
443 uint64_t blkid
, nblks
, lastblk
;
444 uint64_t space
= 0, unref
= 0, skipped
= 0;
445 dnode_t
*dn
= txh
->txh_dnode
;
446 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
447 spa_t
*spa
= txh
->txh_tx
->tx_pool
->dp_spa
;
450 if (dn
->dn_nlevels
== 0)
454 * The struct_rwlock protects us against dn_nlevels
455 * changing, in case (against all odds) we manage to dirty &
456 * sync out the changes after we check for being dirty.
457 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
459 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
460 epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
461 if (dn
->dn_maxblkid
== 0) {
462 if (off
== 0 && len
>= dn
->dn_datablksz
) {
466 rw_exit(&dn
->dn_struct_rwlock
);
470 blkid
= off
>> dn
->dn_datablkshift
;
471 nblks
= (len
+ dn
->dn_datablksz
- 1) >> dn
->dn_datablkshift
;
473 if (blkid
>= dn
->dn_maxblkid
) {
474 rw_exit(&dn
->dn_struct_rwlock
);
477 if (blkid
+ nblks
> dn
->dn_maxblkid
)
478 nblks
= dn
->dn_maxblkid
- blkid
;
481 if (dn
->dn_nlevels
== 1) {
483 for (i
= 0; i
< nblks
; i
++) {
484 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
485 ASSERT3U(blkid
+ i
, <, dn
->dn_nblkptr
);
487 if (dsl_dataset_block_freeable(ds
, bp
, bp
->blk_birth
)) {
488 dprintf_bp(bp
, "can free old%s", "");
489 space
+= bp_get_dsize(spa
, bp
);
491 unref
+= BP_GET_ASIZE(bp
);
497 * Add in memory requirements of higher-level indirects.
498 * This assumes a worst-possible scenario for dn_nlevels.
501 uint64_t blkcnt
= 1 + ((nblks
>> epbs
) >> epbs
);
502 int level
= (dn
->dn_nlevels
> 1) ? 2 : 1;
504 while (level
++ < DN_MAX_LEVELS
) {
505 txh
->txh_memory_tohold
+= blkcnt
<< dn
->dn_indblkshift
;
506 blkcnt
= 1 + (blkcnt
>> epbs
);
508 ASSERT(blkcnt
<= dn
->dn_nblkptr
);
511 lastblk
= blkid
+ nblks
- 1;
513 dmu_buf_impl_t
*dbuf
;
514 uint64_t ibyte
, new_blkid
;
516 int err
, i
, blkoff
, tochk
;
519 ibyte
= blkid
<< dn
->dn_datablkshift
;
520 err
= dnode_next_offset(dn
,
521 DNODE_FIND_HAVELOCK
, &ibyte
, 2, 1, 0);
522 new_blkid
= ibyte
>> dn
->dn_datablkshift
;
524 skipped
+= (lastblk
>> epbs
) - (blkid
>> epbs
) + 1;
528 txh
->txh_tx
->tx_err
= err
;
531 if (new_blkid
> lastblk
) {
532 skipped
+= (lastblk
>> epbs
) - (blkid
>> epbs
) + 1;
536 if (new_blkid
> blkid
) {
537 ASSERT((new_blkid
>> epbs
) > (blkid
>> epbs
));
538 skipped
+= (new_blkid
>> epbs
) - (blkid
>> epbs
) - 1;
539 nblks
-= new_blkid
- blkid
;
542 blkoff
= P2PHASE(blkid
, epb
);
543 tochk
= MIN(epb
- blkoff
, nblks
);
545 err
= dbuf_hold_impl(dn
, 1, blkid
>> epbs
, FALSE
, FTAG
, &dbuf
);
547 txh
->txh_tx
->tx_err
= err
;
551 txh
->txh_memory_tohold
+= dbuf
->db
.db_size
;
554 * We don't check memory_tohold against DMU_MAX_ACCESS because
555 * memory_tohold is an over-estimation (especially the >L1
556 * indirect blocks), so it could fail. Callers should have
557 * already verified that they will not be holding too much
561 err
= dbuf_read(dbuf
, NULL
, DB_RF_HAVESTRUCT
| DB_RF_CANFAIL
);
563 txh
->txh_tx
->tx_err
= err
;
564 dbuf_rele(dbuf
, FTAG
);
568 bp
= dbuf
->db
.db_data
;
571 for (i
= 0; i
< tochk
; i
++) {
572 if (dsl_dataset_block_freeable(ds
, &bp
[i
],
574 dprintf_bp(&bp
[i
], "can free old%s", "");
575 space
+= bp_get_dsize(spa
, &bp
[i
]);
577 unref
+= BP_GET_ASIZE(bp
);
579 dbuf_rele(dbuf
, FTAG
);
584 rw_exit(&dn
->dn_struct_rwlock
);
586 /* account for new level 1 indirect blocks that might show up */
588 txh
->txh_fudge
+= skipped
<< dn
->dn_indblkshift
;
589 skipped
= MIN(skipped
, DMU_MAX_DELETEBLKCNT
>> epbs
);
590 txh
->txh_memory_tohold
+= skipped
<< dn
->dn_indblkshift
;
592 txh
->txh_space_tofree
+= space
;
593 txh
->txh_space_tounref
+= unref
;
597 dmu_tx_hold_free(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, uint64_t len
)
601 uint64_t start
, end
, i
;
605 ASSERT(tx
->tx_txg
== 0);
607 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
608 object
, THT_FREE
, off
, len
);
615 dmu_tx_count_write(txh
, off
, 1);
617 if (len
!= DMU_OBJECT_END
)
618 dmu_tx_count_write(txh
, off
+len
, 1);
620 dmu_tx_count_dnode(txh
);
622 if (off
>= (dn
->dn_maxblkid
+1) * dn
->dn_datablksz
)
624 if (len
== DMU_OBJECT_END
)
625 len
= (dn
->dn_maxblkid
+1) * dn
->dn_datablksz
- off
;
628 * For i/o error checking, read the first and last level-0
629 * blocks, and all the level-1 blocks. The above count_write's
630 * have already taken care of the level-0 blocks.
632 if (dn
->dn_nlevels
> 1) {
633 shift
= dn
->dn_datablkshift
+ dn
->dn_indblkshift
-
635 start
= off
>> shift
;
636 end
= dn
->dn_datablkshift
? ((off
+len
) >> shift
) : 0;
638 zio
= zio_root(tx
->tx_pool
->dp_spa
,
639 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
640 for (i
= start
; i
<= end
; i
++) {
641 uint64_t ibyte
= i
<< shift
;
642 err
= dnode_next_offset(dn
, 0, &ibyte
, 2, 1, 0);
651 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
664 dmu_tx_count_free(txh
, off
, len
);
668 dmu_tx_hold_zap(dmu_tx_t
*tx
, uint64_t object
, int add
, const char *name
)
675 ASSERT(tx
->tx_txg
== 0);
677 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
678 object
, THT_ZAP
, add
, (uintptr_t)name
);
683 dmu_tx_count_dnode(txh
);
687 * We will be able to fit a new object's entries into one leaf
688 * block. So there will be at most 2 blocks total,
689 * including the header block.
691 dmu_tx_count_write(txh
, 0, 2 << fzap_default_block_shift
);
695 ASSERT3U(DMU_OT_BYTESWAP(dn
->dn_type
), ==, DMU_BSWAP_ZAP
);
697 if (dn
->dn_maxblkid
== 0 && !add
) {
701 * If there is only one block (i.e. this is a micro-zap)
702 * and we are not adding anything, the accounting is simple.
704 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
711 * Use max block size here, since we don't know how much
712 * the size will change between now and the dbuf dirty call.
714 bp
= &dn
->dn_phys
->dn_blkptr
[0];
715 if (dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
717 txh
->txh_space_tooverwrite
+= SPA_MAXBLOCKSIZE
;
719 txh
->txh_space_towrite
+= SPA_MAXBLOCKSIZE
;
721 txh
->txh_space_tounref
+= SPA_MAXBLOCKSIZE
;
725 if (dn
->dn_maxblkid
> 0 && name
) {
727 * access the name in this fat-zap so that we'll check
728 * for i/o errors to the leaf blocks, etc.
730 err
= zap_lookup(dn
->dn_objset
, dn
->dn_object
, name
,
738 err
= zap_count_write(dn
->dn_objset
, dn
->dn_object
, name
, add
,
739 &txh
->txh_space_towrite
, &txh
->txh_space_tooverwrite
);
742 * If the modified blocks are scattered to the four winds,
743 * we'll have to modify an indirect twig for each.
745 epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
746 for (nblocks
= dn
->dn_maxblkid
>> epbs
; nblocks
!= 0; nblocks
>>= epbs
)
747 if (dn
->dn_objset
->os_dsl_dataset
->ds_phys
->ds_prev_snap_obj
)
748 txh
->txh_space_towrite
+= 3 << dn
->dn_indblkshift
;
750 txh
->txh_space_tooverwrite
+= 3 << dn
->dn_indblkshift
;
754 dmu_tx_hold_bonus(dmu_tx_t
*tx
, uint64_t object
)
758 ASSERT(tx
->tx_txg
== 0);
760 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
761 object
, THT_BONUS
, 0, 0);
763 dmu_tx_count_dnode(txh
);
767 dmu_tx_hold_space(dmu_tx_t
*tx
, uint64_t space
)
770 ASSERT(tx
->tx_txg
== 0);
772 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
773 DMU_NEW_OBJECT
, THT_SPACE
, space
, 0);
775 txh
->txh_space_towrite
+= space
;
779 dmu_tx_holds(dmu_tx_t
*tx
, uint64_t object
)
785 * By asserting that the tx is assigned, we're counting the
786 * number of dn_tx_holds, which is the same as the number of
787 * dn_holds. Otherwise, we'd be counting dn_holds, but
788 * dn_tx_holds could be 0.
790 ASSERT(tx
->tx_txg
!= 0);
792 /* if (tx->tx_anyobj == TRUE) */
795 for (txh
= list_head(&tx
->tx_holds
); txh
;
796 txh
= list_next(&tx
->tx_holds
, txh
)) {
797 if (txh
->txh_dnode
&& txh
->txh_dnode
->dn_object
== object
)
806 dmu_tx_dirty_buf(dmu_tx_t
*tx
, dmu_buf_impl_t
*db
)
809 int match_object
= FALSE
, match_offset
= FALSE
;
815 ASSERT(tx
->tx_txg
!= 0);
816 ASSERT(tx
->tx_objset
== NULL
|| dn
->dn_objset
== tx
->tx_objset
);
817 ASSERT3U(dn
->dn_object
, ==, db
->db
.db_object
);
824 /* XXX No checking on the meta dnode for now */
825 if (db
->db
.db_object
== DMU_META_DNODE_OBJECT
) {
830 for (txh
= list_head(&tx
->tx_holds
); txh
;
831 txh
= list_next(&tx
->tx_holds
, txh
)) {
832 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
833 if (txh
->txh_dnode
== dn
&& txh
->txh_type
!= THT_NEWOBJECT
)
835 if (txh
->txh_dnode
== NULL
|| txh
->txh_dnode
== dn
) {
836 int datablkshift
= dn
->dn_datablkshift
?
837 dn
->dn_datablkshift
: SPA_MAXBLOCKSHIFT
;
838 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
839 int shift
= datablkshift
+ epbs
* db
->db_level
;
840 uint64_t beginblk
= shift
>= 64 ? 0 :
841 (txh
->txh_arg1
>> shift
);
842 uint64_t endblk
= shift
>= 64 ? 0 :
843 ((txh
->txh_arg1
+ txh
->txh_arg2
- 1) >> shift
);
844 uint64_t blkid
= db
->db_blkid
;
846 /* XXX txh_arg2 better not be zero... */
848 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
849 txh
->txh_type
, beginblk
, endblk
);
851 switch (txh
->txh_type
) {
853 if (blkid
>= beginblk
&& blkid
<= endblk
)
856 * We will let this hold work for the bonus
857 * or spill buffer so that we don't need to
858 * hold it when creating a new object.
860 if (blkid
== DMU_BONUS_BLKID
||
861 blkid
== DMU_SPILL_BLKID
)
864 * They might have to increase nlevels,
865 * thus dirtying the new TLIBs. Or the
866 * might have to change the block size,
867 * thus dirying the new lvl=0 blk=0.
874 * We will dirty all the level 1 blocks in
875 * the free range and perhaps the first and
876 * last level 0 block.
878 if (blkid
>= beginblk
&& (blkid
<= endblk
||
879 txh
->txh_arg2
== DMU_OBJECT_END
))
883 if (blkid
== DMU_SPILL_BLKID
)
887 if (blkid
== DMU_BONUS_BLKID
)
897 ASSERT(!"bad txh_type");
900 if (match_object
&& match_offset
) {
906 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
907 (u_longlong_t
)db
->db
.db_object
, db
->db_level
,
908 (u_longlong_t
)db
->db_blkid
);
913 dmu_tx_try_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
916 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
917 uint64_t memory
, asize
, fsize
, usize
;
918 uint64_t towrite
, tofree
, tooverwrite
, tounref
, tohold
, fudge
;
920 ASSERT3U(tx
->tx_txg
, ==, 0);
923 DMU_TX_STAT_BUMP(dmu_tx_error
);
927 if (spa_suspended(spa
)) {
928 DMU_TX_STAT_BUMP(dmu_tx_suspended
);
931 * If the user has indicated a blocking failure mode
932 * then return ERESTART which will block in dmu_tx_wait().
933 * Otherwise, return EIO so that an error can get
934 * propagated back to the VOP calls.
936 * Note that we always honor the txg_how flag regardless
937 * of the failuremode setting.
939 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
&&
946 tx
->tx_txg
= txg_hold_open(tx
->tx_pool
, &tx
->tx_txgh
);
947 tx
->tx_needassign_txh
= NULL
;
950 * NB: No error returns are allowed after txg_hold_open, but
951 * before processing the dnode holds, due to the
952 * dmu_tx_unassign() logic.
955 towrite
= tofree
= tooverwrite
= tounref
= tohold
= fudge
= 0;
956 for (txh
= list_head(&tx
->tx_holds
); txh
;
957 txh
= list_next(&tx
->tx_holds
, txh
)) {
958 dnode_t
*dn
= txh
->txh_dnode
;
960 mutex_enter(&dn
->dn_mtx
);
961 if (dn
->dn_assigned_txg
== tx
->tx_txg
- 1) {
962 mutex_exit(&dn
->dn_mtx
);
963 tx
->tx_needassign_txh
= txh
;
964 DMU_TX_STAT_BUMP(dmu_tx_group
);
967 if (dn
->dn_assigned_txg
== 0)
968 dn
->dn_assigned_txg
= tx
->tx_txg
;
969 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
970 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
971 mutex_exit(&dn
->dn_mtx
);
973 towrite
+= txh
->txh_space_towrite
;
974 tofree
+= txh
->txh_space_tofree
;
975 tooverwrite
+= txh
->txh_space_tooverwrite
;
976 tounref
+= txh
->txh_space_tounref
;
977 tohold
+= txh
->txh_memory_tohold
;
978 fudge
+= txh
->txh_fudge
;
982 * NB: This check must be after we've held the dnodes, so that
983 * the dmu_tx_unassign() logic will work properly
985 if (txg_how
>= TXG_INITIAL
&& txg_how
!= tx
->tx_txg
) {
986 DMU_TX_STAT_BUMP(dmu_tx_how
);
991 * If a snapshot has been taken since we made our estimates,
992 * assume that we won't be able to free or overwrite anything.
995 dsl_dataset_prev_snap_txg(tx
->tx_objset
->os_dsl_dataset
) >
996 tx
->tx_lastsnap_txg
) {
997 towrite
+= tooverwrite
;
998 tooverwrite
= tofree
= 0;
1001 /* needed allocation: worst-case estimate of write space */
1002 asize
= spa_get_asize(tx
->tx_pool
->dp_spa
, towrite
+ tooverwrite
);
1003 /* freed space estimate: worst-case overwrite + free estimate */
1004 fsize
= spa_get_asize(tx
->tx_pool
->dp_spa
, tooverwrite
) + tofree
;
1005 /* convert unrefd space to worst-case estimate */
1006 usize
= spa_get_asize(tx
->tx_pool
->dp_spa
, tounref
);
1007 /* calculate memory footprint estimate */
1008 memory
= towrite
+ tooverwrite
+ tohold
;
1012 * Add in 'tohold' to account for our dirty holds on this memory
1013 * XXX - the "fudge" factor is to account for skipped blocks that
1014 * we missed because dnode_next_offset() misses in-core-only blocks.
1016 tx
->tx_space_towrite
= asize
+
1017 spa_get_asize(tx
->tx_pool
->dp_spa
, tohold
+ fudge
);
1018 tx
->tx_space_tofree
= tofree
;
1019 tx
->tx_space_tooverwrite
= tooverwrite
;
1020 tx
->tx_space_tounref
= tounref
;
1023 if (tx
->tx_dir
&& asize
!= 0) {
1024 int err
= dsl_dir_tempreserve_space(tx
->tx_dir
, memory
,
1025 asize
, fsize
, usize
, &tx
->tx_tempreserve_cookie
, tx
);
1030 DMU_TX_STAT_BUMP(dmu_tx_assigned
);
1036 dmu_tx_unassign(dmu_tx_t
*tx
)
1040 if (tx
->tx_txg
== 0)
1043 txg_rele_to_quiesce(&tx
->tx_txgh
);
1045 for (txh
= list_head(&tx
->tx_holds
); txh
!= tx
->tx_needassign_txh
;
1046 txh
= list_next(&tx
->tx_holds
, txh
)) {
1047 dnode_t
*dn
= txh
->txh_dnode
;
1051 mutex_enter(&dn
->dn_mtx
);
1052 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1054 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1055 dn
->dn_assigned_txg
= 0;
1056 cv_broadcast(&dn
->dn_notxholds
);
1058 mutex_exit(&dn
->dn_mtx
);
1061 txg_rele_to_sync(&tx
->tx_txgh
);
1063 tx
->tx_lasttried_txg
= tx
->tx_txg
;
1068 * Assign tx to a transaction group. txg_how can be one of:
1070 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1071 * a new one. This should be used when you're not holding locks.
1072 * If will only fail if we're truly out of space (or over quota).
1074 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1075 * blocking, returns immediately with ERESTART. This should be used
1076 * whenever you're holding locks. On an ERESTART error, the caller
1077 * should drop locks, do a dmu_tx_wait(tx), and try again.
1079 * (3) A specific txg. Use this if you need to ensure that multiple
1080 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1081 * returns ERESTART if it can't assign you into the requested txg.
1084 dmu_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
1088 ASSERT(tx
->tx_txg
== 0);
1089 ASSERT(txg_how
!= 0);
1090 ASSERT(!dsl_pool_sync_context(tx
->tx_pool
));
1092 while ((err
= dmu_tx_try_assign(tx
, txg_how
)) != 0) {
1093 dmu_tx_unassign(tx
);
1095 if (err
!= ERESTART
|| txg_how
!= TXG_WAIT
)
1101 txg_rele_to_quiesce(&tx
->tx_txgh
);
1107 dmu_tx_wait(dmu_tx_t
*tx
)
1109 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
1111 ASSERT(tx
->tx_txg
== 0);
1114 * It's possible that the pool has become active after this thread
1115 * has tried to obtain a tx. If that's the case then his
1116 * tx_lasttried_txg would not have been assigned.
1118 if (spa_suspended(spa
) || tx
->tx_lasttried_txg
== 0) {
1119 txg_wait_synced(tx
->tx_pool
, spa_last_synced_txg(spa
) + 1);
1120 } else if (tx
->tx_needassign_txh
) {
1121 dnode_t
*dn
= tx
->tx_needassign_txh
->txh_dnode
;
1123 mutex_enter(&dn
->dn_mtx
);
1124 while (dn
->dn_assigned_txg
== tx
->tx_lasttried_txg
- 1)
1125 cv_wait(&dn
->dn_notxholds
, &dn
->dn_mtx
);
1126 mutex_exit(&dn
->dn_mtx
);
1127 tx
->tx_needassign_txh
= NULL
;
1129 txg_wait_open(tx
->tx_pool
, tx
->tx_lasttried_txg
+ 1);
1134 dmu_tx_willuse_space(dmu_tx_t
*tx
, int64_t delta
)
1137 if (tx
->tx_dir
== NULL
|| delta
== 0)
1141 ASSERT3U(refcount_count(&tx
->tx_space_written
) + delta
, <=,
1142 tx
->tx_space_towrite
);
1143 (void) refcount_add_many(&tx
->tx_space_written
, delta
, NULL
);
1145 (void) refcount_add_many(&tx
->tx_space_freed
, -delta
, NULL
);
1151 dmu_tx_commit(dmu_tx_t
*tx
)
1155 ASSERT(tx
->tx_txg
!= 0);
1157 while ((txh
= list_head(&tx
->tx_holds
))) {
1158 dnode_t
*dn
= txh
->txh_dnode
;
1160 list_remove(&tx
->tx_holds
, txh
);
1161 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1164 mutex_enter(&dn
->dn_mtx
);
1165 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1167 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1168 dn
->dn_assigned_txg
= 0;
1169 cv_broadcast(&dn
->dn_notxholds
);
1171 mutex_exit(&dn
->dn_mtx
);
1175 if (tx
->tx_tempreserve_cookie
)
1176 dsl_dir_tempreserve_clear(tx
->tx_tempreserve_cookie
, tx
);
1178 if (!list_is_empty(&tx
->tx_callbacks
))
1179 txg_register_callbacks(&tx
->tx_txgh
, &tx
->tx_callbacks
);
1181 if (tx
->tx_anyobj
== FALSE
)
1182 txg_rele_to_sync(&tx
->tx_txgh
);
1184 list_destroy(&tx
->tx_callbacks
);
1185 list_destroy(&tx
->tx_holds
);
1187 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1188 tx
->tx_space_towrite
, refcount_count(&tx
->tx_space_written
),
1189 tx
->tx_space_tofree
, refcount_count(&tx
->tx_space_freed
));
1190 refcount_destroy_many(&tx
->tx_space_written
,
1191 refcount_count(&tx
->tx_space_written
));
1192 refcount_destroy_many(&tx
->tx_space_freed
,
1193 refcount_count(&tx
->tx_space_freed
));
1195 kmem_free(tx
, sizeof (dmu_tx_t
));
1199 dmu_tx_abort(dmu_tx_t
*tx
)
1203 ASSERT(tx
->tx_txg
== 0);
1205 while ((txh
= list_head(&tx
->tx_holds
))) {
1206 dnode_t
*dn
= txh
->txh_dnode
;
1208 list_remove(&tx
->tx_holds
, txh
);
1209 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1215 * Call any registered callbacks with an error code.
1217 if (!list_is_empty(&tx
->tx_callbacks
))
1218 dmu_tx_do_callbacks(&tx
->tx_callbacks
, ECANCELED
);
1220 list_destroy(&tx
->tx_callbacks
);
1221 list_destroy(&tx
->tx_holds
);
1223 refcount_destroy_many(&tx
->tx_space_written
,
1224 refcount_count(&tx
->tx_space_written
));
1225 refcount_destroy_many(&tx
->tx_space_freed
,
1226 refcount_count(&tx
->tx_space_freed
));
1228 kmem_free(tx
, sizeof (dmu_tx_t
));
1232 dmu_tx_get_txg(dmu_tx_t
*tx
)
1234 ASSERT(tx
->tx_txg
!= 0);
1235 return (tx
->tx_txg
);
1239 dmu_tx_callback_register(dmu_tx_t
*tx
, dmu_tx_callback_func_t
*func
, void *data
)
1241 dmu_tx_callback_t
*dcb
;
1243 dcb
= kmem_alloc(sizeof (dmu_tx_callback_t
), KM_PUSHPAGE
);
1245 dcb
->dcb_func
= func
;
1246 dcb
->dcb_data
= data
;
1248 list_insert_tail(&tx
->tx_callbacks
, dcb
);
1252 * Call all the commit callbacks on a list, with a given error code.
1255 dmu_tx_do_callbacks(list_t
*cb_list
, int error
)
1257 dmu_tx_callback_t
*dcb
;
1259 while ((dcb
= list_head(cb_list
))) {
1260 list_remove(cb_list
, dcb
);
1261 dcb
->dcb_func(dcb
->dcb_data
, error
);
1262 kmem_free(dcb
, sizeof (dmu_tx_callback_t
));
1267 * Interface to hold a bunch of attributes.
1268 * used for creating new files.
1269 * attrsize is the total size of all attributes
1270 * to be added during object creation
1272 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1276 * hold necessary attribute name for attribute registration.
1277 * should be a very rare case where this is needed. If it does
1278 * happen it would only happen on the first write to the file system.
1281 dmu_tx_sa_registration_hold(sa_os_t
*sa
, dmu_tx_t
*tx
)
1285 if (!sa
->sa_need_attr_registration
)
1288 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1289 if (!sa
->sa_attr_table
[i
].sa_registered
) {
1290 if (sa
->sa_reg_attr_obj
)
1291 dmu_tx_hold_zap(tx
, sa
->sa_reg_attr_obj
,
1292 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1294 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
,
1295 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1302 dmu_tx_hold_spill(dmu_tx_t
*tx
, uint64_t object
)
1307 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, object
,
1310 dn
= txh
->txh_dnode
;
1315 /* If blkptr doesn't exist then add space to towrite */
1316 if (!(dn
->dn_phys
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
)) {
1317 txh
->txh_space_towrite
+= SPA_MAXBLOCKSIZE
;
1321 bp
= &dn
->dn_phys
->dn_spill
;
1322 if (dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
1324 txh
->txh_space_tooverwrite
+= SPA_MAXBLOCKSIZE
;
1326 txh
->txh_space_towrite
+= SPA_MAXBLOCKSIZE
;
1327 if (!BP_IS_HOLE(bp
))
1328 txh
->txh_space_tounref
+= SPA_MAXBLOCKSIZE
;
1333 dmu_tx_hold_sa_create(dmu_tx_t
*tx
, int attrsize
)
1335 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1337 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1339 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1342 if (tx
->tx_objset
->os_sa
->sa_layout_attr_obj
)
1343 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1345 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1346 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1347 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1348 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1351 dmu_tx_sa_registration_hold(sa
, tx
);
1353 if (attrsize
<= DN_MAX_BONUSLEN
&& !sa
->sa_force_spill
)
1356 (void) dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, DMU_NEW_OBJECT
,
1363 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1365 * variable_size is the total size of all variable sized attributes
1366 * passed to this function. It is not the total size of all
1367 * variable size attributes that *may* exist on this object.
1370 dmu_tx_hold_sa(dmu_tx_t
*tx
, sa_handle_t
*hdl
, boolean_t may_grow
)
1373 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1375 ASSERT(hdl
!= NULL
);
1377 object
= sa_handle_object(hdl
);
1379 dmu_tx_hold_bonus(tx
, object
);
1381 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1384 if (tx
->tx_objset
->os_sa
->sa_reg_attr_obj
== 0 ||
1385 tx
->tx_objset
->os_sa
->sa_layout_attr_obj
== 0) {
1386 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1387 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1388 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1389 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1392 dmu_tx_sa_registration_hold(sa
, tx
);
1394 if (may_grow
&& tx
->tx_objset
->os_sa
->sa_layout_attr_obj
)
1395 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1397 if (sa
->sa_force_spill
|| may_grow
|| hdl
->sa_spill
) {
1398 ASSERT(tx
->tx_txg
== 0);
1399 dmu_tx_hold_spill(tx
, object
);
1401 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1406 if (dn
->dn_have_spill
) {
1407 ASSERT(tx
->tx_txg
== 0);
1408 dmu_tx_hold_spill(tx
, object
);
1417 dmu_tx_ksp
= kstat_create("zfs", 0, "dmu_tx", "misc",
1418 KSTAT_TYPE_NAMED
, sizeof (dmu_tx_stats
) / sizeof (kstat_named_t
),
1419 KSTAT_FLAG_VIRTUAL
);
1421 if (dmu_tx_ksp
!= NULL
) {
1422 dmu_tx_ksp
->ks_data
= &dmu_tx_stats
;
1423 kstat_install(dmu_tx_ksp
);
1430 if (dmu_tx_ksp
!= NULL
) {
1431 kstat_delete(dmu_tx_ksp
);
1436 #if defined(_KERNEL) && defined(HAVE_SPL)
1437 EXPORT_SYMBOL(dmu_tx_create
);
1438 EXPORT_SYMBOL(dmu_tx_hold_write
);
1439 EXPORT_SYMBOL(dmu_tx_hold_free
);
1440 EXPORT_SYMBOL(dmu_tx_hold_zap
);
1441 EXPORT_SYMBOL(dmu_tx_hold_bonus
);
1442 EXPORT_SYMBOL(dmu_tx_abort
);
1443 EXPORT_SYMBOL(dmu_tx_assign
);
1444 EXPORT_SYMBOL(dmu_tx_wait
);
1445 EXPORT_SYMBOL(dmu_tx_commit
);
1446 EXPORT_SYMBOL(dmu_tx_get_txg
);
1447 EXPORT_SYMBOL(dmu_tx_callback_register
);
1448 EXPORT_SYMBOL(dmu_tx_do_callbacks
);
1449 EXPORT_SYMBOL(dmu_tx_hold_spill
);
1450 EXPORT_SYMBOL(dmu_tx_hold_sa_create
);
1451 EXPORT_SYMBOL(dmu_tx_hold_sa
);