]> git.proxmox.com Git - mirror_zfs-debian.git/blob - zfs/lib/libzpool/dmu_tx.c
Rebase to OpenSolaris b103, in the process we are removing any code which did not...
[mirror_zfs-debian.git] / zfs / lib / libzpool / dmu_tx.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/dmu.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dbuf.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
32 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
33 #include <sys/dsl_pool.h>
34 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
35 #include <sys/spa.h>
36 #include <sys/zfs_context.h>
37
38 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
39 uint64_t arg1, uint64_t arg2);
40
41
42 dmu_tx_t *
43 dmu_tx_create_dd(dsl_dir_t *dd)
44 {
45 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
46 tx->tx_dir = dd;
47 if (dd)
48 tx->tx_pool = dd->dd_pool;
49 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
50 offsetof(dmu_tx_hold_t, txh_node));
51 #ifdef ZFS_DEBUG
52 refcount_create(&tx->tx_space_written);
53 refcount_create(&tx->tx_space_freed);
54 #endif
55 return (tx);
56 }
57
58 dmu_tx_t *
59 dmu_tx_create(objset_t *os)
60 {
61 dmu_tx_t *tx = dmu_tx_create_dd(os->os->os_dsl_dataset->ds_dir);
62 tx->tx_objset = os;
63 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os->os_dsl_dataset);
64 return (tx);
65 }
66
67 dmu_tx_t *
68 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
69 {
70 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
71
72 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
73 tx->tx_pool = dp;
74 tx->tx_txg = txg;
75 tx->tx_anyobj = TRUE;
76
77 return (tx);
78 }
79
80 int
81 dmu_tx_is_syncing(dmu_tx_t *tx)
82 {
83 return (tx->tx_anyobj);
84 }
85
86 int
87 dmu_tx_private_ok(dmu_tx_t *tx)
88 {
89 return (tx->tx_anyobj);
90 }
91
92 static dmu_tx_hold_t *
93 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
94 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
95 {
96 dmu_tx_hold_t *txh;
97 dnode_t *dn = NULL;
98 int err;
99
100 if (object != DMU_NEW_OBJECT) {
101 err = dnode_hold(os->os, object, tx, &dn);
102 if (err) {
103 tx->tx_err = err;
104 return (NULL);
105 }
106
107 if (err == 0 && tx->tx_txg != 0) {
108 mutex_enter(&dn->dn_mtx);
109 /*
110 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
111 * problem, but there's no way for it to happen (for
112 * now, at least).
113 */
114 ASSERT(dn->dn_assigned_txg == 0);
115 dn->dn_assigned_txg = tx->tx_txg;
116 (void) refcount_add(&dn->dn_tx_holds, tx);
117 mutex_exit(&dn->dn_mtx);
118 }
119 }
120
121 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
122 txh->txh_tx = tx;
123 txh->txh_dnode = dn;
124 #ifdef ZFS_DEBUG
125 txh->txh_type = type;
126 txh->txh_arg1 = arg1;
127 txh->txh_arg2 = arg2;
128 #endif
129 list_insert_tail(&tx->tx_holds, txh);
130
131 return (txh);
132 }
133
134 void
135 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
136 {
137 /*
138 * If we're syncing, they can manipulate any object anyhow, and
139 * the hold on the dnode_t can cause problems.
140 */
141 if (!dmu_tx_is_syncing(tx)) {
142 (void) dmu_tx_hold_object_impl(tx, os,
143 object, THT_NEWOBJECT, 0, 0);
144 }
145 }
146
147 static int
148 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
149 {
150 int err;
151 dmu_buf_impl_t *db;
152
153 rw_enter(&dn->dn_struct_rwlock, RW_READER);
154 db = dbuf_hold_level(dn, level, blkid, FTAG);
155 rw_exit(&dn->dn_struct_rwlock);
156 if (db == NULL)
157 return (EIO);
158 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
159 dbuf_rele(db, FTAG);
160 return (err);
161 }
162
163 /* ARGSUSED */
164 static void
165 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
166 {
167 dnode_t *dn = txh->txh_dnode;
168 uint64_t start, end, i;
169 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
170 int err = 0;
171
172 if (len == 0)
173 return;
174
175 min_bs = SPA_MINBLOCKSHIFT;
176 max_bs = SPA_MAXBLOCKSHIFT;
177 min_ibs = DN_MIN_INDBLKSHIFT;
178 max_ibs = DN_MAX_INDBLKSHIFT;
179
180 /*
181 * For i/o error checking, read the first and last level-0
182 * blocks (if they are not aligned), and all the level-1 blocks.
183 */
184
185 if (dn) {
186 if (dn->dn_maxblkid == 0) {
187 if ((off > 0 || len < dn->dn_datablksz) &&
188 off < dn->dn_datablksz) {
189 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
190 if (err)
191 goto out;
192 }
193 } else {
194 zio_t *zio = zio_root(dn->dn_objset->os_spa,
195 NULL, NULL, ZIO_FLAG_CANFAIL);
196
197 /* first level-0 block */
198 start = off >> dn->dn_datablkshift;
199 if (P2PHASE(off, dn->dn_datablksz) ||
200 len < dn->dn_datablksz) {
201 err = dmu_tx_check_ioerr(zio, dn, 0, start);
202 if (err)
203 goto out;
204 }
205
206 /* last level-0 block */
207 end = (off+len-1) >> dn->dn_datablkshift;
208 if (end != start && end <= dn->dn_maxblkid &&
209 P2PHASE(off+len, dn->dn_datablksz)) {
210 err = dmu_tx_check_ioerr(zio, dn, 0, end);
211 if (err)
212 goto out;
213 }
214
215 /* level-1 blocks */
216 if (dn->dn_nlevels > 1) {
217 start >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT;
218 end >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT;
219 for (i = start+1; i < end; i++) {
220 err = dmu_tx_check_ioerr(zio, dn, 1, i);
221 if (err)
222 goto out;
223 }
224 }
225
226 err = zio_wait(zio);
227 if (err)
228 goto out;
229 }
230 }
231
232 /*
233 * If there's more than one block, the blocksize can't change,
234 * so we can make a more precise estimate. Alternatively,
235 * if the dnode's ibs is larger than max_ibs, always use that.
236 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
237 * the code will still work correctly on existing pools.
238 */
239 if (dn && (dn->dn_maxblkid != 0 || dn->dn_indblkshift > max_ibs)) {
240 min_ibs = max_ibs = dn->dn_indblkshift;
241 if (dn->dn_datablkshift != 0)
242 min_bs = max_bs = dn->dn_datablkshift;
243 }
244
245 /*
246 * 'end' is the last thing we will access, not one past.
247 * This way we won't overflow when accessing the last byte.
248 */
249 start = P2ALIGN(off, 1ULL << max_bs);
250 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
251 txh->txh_space_towrite += end - start + 1;
252
253 start >>= min_bs;
254 end >>= min_bs;
255
256 epbs = min_ibs - SPA_BLKPTRSHIFT;
257
258 /*
259 * The object contains at most 2^(64 - min_bs) blocks,
260 * and each indirect level maps 2^epbs.
261 */
262 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
263 start >>= epbs;
264 end >>= epbs;
265 /*
266 * If we increase the number of levels of indirection,
267 * we'll need new blkid=0 indirect blocks. If start == 0,
268 * we're already accounting for that blocks; and if end == 0,
269 * we can't increase the number of levels beyond that.
270 */
271 if (start != 0 && end != 0)
272 txh->txh_space_towrite += 1ULL << max_ibs;
273 txh->txh_space_towrite += (end - start + 1) << max_ibs;
274 }
275
276 ASSERT(txh->txh_space_towrite < 2 * DMU_MAX_ACCESS);
277
278 out:
279 if (err)
280 txh->txh_tx->tx_err = err;
281 }
282
283 static void
284 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
285 {
286 dnode_t *dn = txh->txh_dnode;
287 dnode_t *mdn = txh->txh_tx->tx_objset->os->os_meta_dnode;
288 uint64_t space = mdn->dn_datablksz +
289 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
290
291 if (dn && dn->dn_dbuf->db_blkptr &&
292 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
293 dn->dn_dbuf->db_blkptr->blk_birth)) {
294 txh->txh_space_tooverwrite += space;
295 } else {
296 txh->txh_space_towrite += space;
297 if (dn && dn->dn_dbuf->db_blkptr)
298 txh->txh_space_tounref += space;
299 }
300 }
301
302 void
303 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
304 {
305 dmu_tx_hold_t *txh;
306
307 ASSERT(tx->tx_txg == 0);
308 ASSERT(len < DMU_MAX_ACCESS);
309 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
310
311 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
312 object, THT_WRITE, off, len);
313 if (txh == NULL)
314 return;
315
316 dmu_tx_count_write(txh, off, len);
317 dmu_tx_count_dnode(txh);
318 }
319
320 static void
321 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
322 {
323 uint64_t blkid, nblks, lastblk;
324 uint64_t space = 0, unref = 0, skipped = 0;
325 dnode_t *dn = txh->txh_dnode;
326 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
327 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
328 int epbs;
329
330 if (dn->dn_nlevels == 0)
331 return;
332
333 /*
334 * The struct_rwlock protects us against dn_nlevels
335 * changing, in case (against all odds) we manage to dirty &
336 * sync out the changes after we check for being dirty.
337 * Also, dbuf_hold_level() wants us to have the struct_rwlock.
338 */
339 rw_enter(&dn->dn_struct_rwlock, RW_READER);
340 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
341 if (dn->dn_maxblkid == 0) {
342 if (off == 0 && len >= dn->dn_datablksz) {
343 blkid = 0;
344 nblks = 1;
345 } else {
346 rw_exit(&dn->dn_struct_rwlock);
347 return;
348 }
349 } else {
350 blkid = off >> dn->dn_datablkshift;
351 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
352
353 if (blkid >= dn->dn_maxblkid) {
354 rw_exit(&dn->dn_struct_rwlock);
355 return;
356 }
357 if (blkid + nblks > dn->dn_maxblkid)
358 nblks = dn->dn_maxblkid - blkid;
359
360 }
361 if (dn->dn_nlevels == 1) {
362 int i;
363 for (i = 0; i < nblks; i++) {
364 blkptr_t *bp = dn->dn_phys->dn_blkptr;
365 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
366 bp += blkid + i;
367 if (dsl_dataset_block_freeable(ds, bp->blk_birth)) {
368 dprintf_bp(bp, "can free old%s", "");
369 space += bp_get_dasize(spa, bp);
370 }
371 unref += BP_GET_ASIZE(bp);
372 }
373 nblks = 0;
374 }
375
376 /*
377 * Add in memory requirements of higher-level indirects.
378 * This assumes a worst-possible scenario for dn_nlevels.
379 */
380 {
381 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
382 int level = (dn->dn_nlevels > 1) ? 2 : 1;
383
384 while (level++ < DN_MAX_LEVELS) {
385 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
386 blkcnt = 1 + (blkcnt >> epbs);
387 }
388 ASSERT(blkcnt <= dn->dn_nblkptr);
389 }
390
391 lastblk = blkid + nblks - 1;
392 while (nblks) {
393 dmu_buf_impl_t *dbuf;
394 uint64_t ibyte, new_blkid;
395 int epb = 1 << epbs;
396 int err, i, blkoff, tochk;
397 blkptr_t *bp;
398
399 ibyte = blkid << dn->dn_datablkshift;
400 err = dnode_next_offset(dn,
401 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
402 new_blkid = ibyte >> dn->dn_datablkshift;
403 if (err == ESRCH) {
404 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
405 break;
406 }
407 if (err) {
408 txh->txh_tx->tx_err = err;
409 break;
410 }
411 if (new_blkid > lastblk) {
412 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
413 break;
414 }
415
416 if (new_blkid > blkid) {
417 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
418 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
419 nblks -= new_blkid - blkid;
420 blkid = new_blkid;
421 }
422 blkoff = P2PHASE(blkid, epb);
423 tochk = MIN(epb - blkoff, nblks);
424
425 dbuf = dbuf_hold_level(dn, 1, blkid >> epbs, FTAG);
426
427 txh->txh_memory_tohold += dbuf->db.db_size;
428 if (txh->txh_memory_tohold > DMU_MAX_ACCESS) {
429 txh->txh_tx->tx_err = E2BIG;
430 dbuf_rele(dbuf, FTAG);
431 break;
432 }
433 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
434 if (err != 0) {
435 txh->txh_tx->tx_err = err;
436 dbuf_rele(dbuf, FTAG);
437 break;
438 }
439
440 bp = dbuf->db.db_data;
441 bp += blkoff;
442
443 for (i = 0; i < tochk; i++) {
444 if (dsl_dataset_block_freeable(ds, bp[i].blk_birth)) {
445 dprintf_bp(&bp[i], "can free old%s", "");
446 space += bp_get_dasize(spa, &bp[i]);
447 }
448 unref += BP_GET_ASIZE(bp);
449 }
450 dbuf_rele(dbuf, FTAG);
451
452 blkid += tochk;
453 nblks -= tochk;
454 }
455 rw_exit(&dn->dn_struct_rwlock);
456
457 /* account for new level 1 indirect blocks that might show up */
458 if (skipped > 0) {
459 txh->txh_fudge += skipped << dn->dn_indblkshift;
460 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
461 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
462 }
463 txh->txh_space_tofree += space;
464 txh->txh_space_tounref += unref;
465 }
466
467 void
468 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
469 {
470 dmu_tx_hold_t *txh;
471 dnode_t *dn;
472 uint64_t start, end, i;
473 int err, shift;
474 zio_t *zio;
475
476 ASSERT(tx->tx_txg == 0);
477
478 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
479 object, THT_FREE, off, len);
480 if (txh == NULL)
481 return;
482 dn = txh->txh_dnode;
483
484 /* first block */
485 if (off != 0)
486 dmu_tx_count_write(txh, off, 1);
487 /* last block */
488 if (len != DMU_OBJECT_END)
489 dmu_tx_count_write(txh, off+len, 1);
490
491 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
492 return;
493 if (len == DMU_OBJECT_END)
494 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
495
496 /*
497 * For i/o error checking, read the first and last level-0
498 * blocks, and all the level-1 blocks. The above count_write's
499 * have already taken care of the level-0 blocks.
500 */
501 if (dn->dn_nlevels > 1) {
502 shift = dn->dn_datablkshift + dn->dn_indblkshift -
503 SPA_BLKPTRSHIFT;
504 start = off >> shift;
505 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
506
507 zio = zio_root(tx->tx_pool->dp_spa,
508 NULL, NULL, ZIO_FLAG_CANFAIL);
509 for (i = start; i <= end; i++) {
510 uint64_t ibyte = i << shift;
511 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
512 i = ibyte >> shift;
513 if (err == ESRCH)
514 break;
515 if (err) {
516 tx->tx_err = err;
517 return;
518 }
519
520 err = dmu_tx_check_ioerr(zio, dn, 1, i);
521 if (err) {
522 tx->tx_err = err;
523 return;
524 }
525 }
526 err = zio_wait(zio);
527 if (err) {
528 tx->tx_err = err;
529 return;
530 }
531 }
532
533 dmu_tx_count_dnode(txh);
534 dmu_tx_count_free(txh, off, len);
535 }
536
537 void
538 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, char *name)
539 {
540 dmu_tx_hold_t *txh;
541 dnode_t *dn;
542 uint64_t nblocks;
543 int epbs, err;
544
545 ASSERT(tx->tx_txg == 0);
546
547 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
548 object, THT_ZAP, add, (uintptr_t)name);
549 if (txh == NULL)
550 return;
551 dn = txh->txh_dnode;
552
553 dmu_tx_count_dnode(txh);
554
555 if (dn == NULL) {
556 /*
557 * We will be able to fit a new object's entries into one leaf
558 * block. So there will be at most 2 blocks total,
559 * including the header block.
560 */
561 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
562 return;
563 }
564
565 ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
566
567 if (dn->dn_maxblkid == 0 && !add) {
568 /*
569 * If there is only one block (i.e. this is a micro-zap)
570 * and we are not adding anything, the accounting is simple.
571 */
572 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
573 if (err) {
574 tx->tx_err = err;
575 return;
576 }
577
578 /*
579 * Use max block size here, since we don't know how much
580 * the size will change between now and the dbuf dirty call.
581 */
582 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
583 dn->dn_phys->dn_blkptr[0].blk_birth)) {
584 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
585 } else {
586 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
587 txh->txh_space_tounref +=
588 BP_GET_ASIZE(dn->dn_phys->dn_blkptr);
589 }
590 return;
591 }
592
593 if (dn->dn_maxblkid > 0 && name) {
594 /*
595 * access the name in this fat-zap so that we'll check
596 * for i/o errors to the leaf blocks, etc.
597 */
598 err = zap_lookup(&dn->dn_objset->os, dn->dn_object, name,
599 8, 0, NULL);
600 if (err == EIO) {
601 tx->tx_err = err;
602 return;
603 }
604 }
605
606 /*
607 * 3 blocks overwritten: target leaf, ptrtbl block, header block
608 * 3 new blocks written if adding: new split leaf, 2 grown ptrtbl blocks
609 */
610 dmu_tx_count_write(txh, dn->dn_maxblkid * dn->dn_datablksz,
611 (3 + (add ? 3 : 0)) << dn->dn_datablkshift);
612
613 /*
614 * If the modified blocks are scattered to the four winds,
615 * we'll have to modify an indirect twig for each.
616 */
617 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
618 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
619 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
620 }
621
622 void
623 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
624 {
625 dmu_tx_hold_t *txh;
626
627 ASSERT(tx->tx_txg == 0);
628
629 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
630 object, THT_BONUS, 0, 0);
631 if (txh)
632 dmu_tx_count_dnode(txh);
633 }
634
635 void
636 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
637 {
638 dmu_tx_hold_t *txh;
639 ASSERT(tx->tx_txg == 0);
640
641 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
642 DMU_NEW_OBJECT, THT_SPACE, space, 0);
643
644 txh->txh_space_towrite += space;
645 }
646
647 int
648 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
649 {
650 dmu_tx_hold_t *txh;
651 int holds = 0;
652
653 /*
654 * By asserting that the tx is assigned, we're counting the
655 * number of dn_tx_holds, which is the same as the number of
656 * dn_holds. Otherwise, we'd be counting dn_holds, but
657 * dn_tx_holds could be 0.
658 */
659 ASSERT(tx->tx_txg != 0);
660
661 /* if (tx->tx_anyobj == TRUE) */
662 /* return (0); */
663
664 for (txh = list_head(&tx->tx_holds); txh;
665 txh = list_next(&tx->tx_holds, txh)) {
666 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
667 holds++;
668 }
669
670 return (holds);
671 }
672
673 #ifdef ZFS_DEBUG
674 void
675 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
676 {
677 dmu_tx_hold_t *txh;
678 int match_object = FALSE, match_offset = FALSE;
679 dnode_t *dn = db->db_dnode;
680
681 ASSERT(tx->tx_txg != 0);
682 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset->os);
683 ASSERT3U(dn->dn_object, ==, db->db.db_object);
684
685 if (tx->tx_anyobj)
686 return;
687
688 /* XXX No checking on the meta dnode for now */
689 if (db->db.db_object == DMU_META_DNODE_OBJECT)
690 return;
691
692 for (txh = list_head(&tx->tx_holds); txh;
693 txh = list_next(&tx->tx_holds, txh)) {
694 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
695 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
696 match_object = TRUE;
697 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
698 int datablkshift = dn->dn_datablkshift ?
699 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
700 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
701 int shift = datablkshift + epbs * db->db_level;
702 uint64_t beginblk = shift >= 64 ? 0 :
703 (txh->txh_arg1 >> shift);
704 uint64_t endblk = shift >= 64 ? 0 :
705 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
706 uint64_t blkid = db->db_blkid;
707
708 /* XXX txh_arg2 better not be zero... */
709
710 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
711 txh->txh_type, beginblk, endblk);
712
713 switch (txh->txh_type) {
714 case THT_WRITE:
715 if (blkid >= beginblk && blkid <= endblk)
716 match_offset = TRUE;
717 /*
718 * We will let this hold work for the bonus
719 * buffer so that we don't need to hold it
720 * when creating a new object.
721 */
722 if (blkid == DB_BONUS_BLKID)
723 match_offset = TRUE;
724 /*
725 * They might have to increase nlevels,
726 * thus dirtying the new TLIBs. Or the
727 * might have to change the block size,
728 * thus dirying the new lvl=0 blk=0.
729 */
730 if (blkid == 0)
731 match_offset = TRUE;
732 break;
733 case THT_FREE:
734 /*
735 * We will dirty all the level 1 blocks in
736 * the free range and perhaps the first and
737 * last level 0 block.
738 */
739 if (blkid >= beginblk && (blkid <= endblk ||
740 txh->txh_arg2 == DMU_OBJECT_END))
741 match_offset = TRUE;
742 break;
743 case THT_BONUS:
744 if (blkid == DB_BONUS_BLKID)
745 match_offset = TRUE;
746 break;
747 case THT_ZAP:
748 match_offset = TRUE;
749 break;
750 case THT_NEWOBJECT:
751 match_object = TRUE;
752 break;
753 default:
754 ASSERT(!"bad txh_type");
755 }
756 }
757 if (match_object && match_offset)
758 return;
759 }
760 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
761 (u_longlong_t)db->db.db_object, db->db_level,
762 (u_longlong_t)db->db_blkid);
763 }
764 #endif
765
766 static int
767 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
768 {
769 dmu_tx_hold_t *txh;
770 spa_t *spa = tx->tx_pool->dp_spa;
771 uint64_t memory, asize, fsize, usize;
772 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
773
774 ASSERT3U(tx->tx_txg, ==, 0);
775
776 if (tx->tx_err)
777 return (tx->tx_err);
778
779 if (spa_suspended(spa)) {
780 /*
781 * If the user has indicated a blocking failure mode
782 * then return ERESTART which will block in dmu_tx_wait().
783 * Otherwise, return EIO so that an error can get
784 * propagated back to the VOP calls.
785 *
786 * Note that we always honor the txg_how flag regardless
787 * of the failuremode setting.
788 */
789 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
790 txg_how != TXG_WAIT)
791 return (EIO);
792
793 return (ERESTART);
794 }
795
796 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
797 tx->tx_needassign_txh = NULL;
798
799 /*
800 * NB: No error returns are allowed after txg_hold_open, but
801 * before processing the dnode holds, due to the
802 * dmu_tx_unassign() logic.
803 */
804
805 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
806 for (txh = list_head(&tx->tx_holds); txh;
807 txh = list_next(&tx->tx_holds, txh)) {
808 dnode_t *dn = txh->txh_dnode;
809 if (dn != NULL) {
810 mutex_enter(&dn->dn_mtx);
811 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
812 mutex_exit(&dn->dn_mtx);
813 tx->tx_needassign_txh = txh;
814 return (ERESTART);
815 }
816 if (dn->dn_assigned_txg == 0)
817 dn->dn_assigned_txg = tx->tx_txg;
818 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
819 (void) refcount_add(&dn->dn_tx_holds, tx);
820 mutex_exit(&dn->dn_mtx);
821 }
822 towrite += txh->txh_space_towrite;
823 tofree += txh->txh_space_tofree;
824 tooverwrite += txh->txh_space_tooverwrite;
825 tounref += txh->txh_space_tounref;
826 tohold += txh->txh_memory_tohold;
827 fudge += txh->txh_fudge;
828 }
829
830 /*
831 * NB: This check must be after we've held the dnodes, so that
832 * the dmu_tx_unassign() logic will work properly
833 */
834 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
835 return (ERESTART);
836
837 /*
838 * If a snapshot has been taken since we made our estimates,
839 * assume that we won't be able to free or overwrite anything.
840 */
841 if (tx->tx_objset &&
842 dsl_dataset_prev_snap_txg(tx->tx_objset->os->os_dsl_dataset) >
843 tx->tx_lastsnap_txg) {
844 towrite += tooverwrite;
845 tooverwrite = tofree = 0;
846 }
847
848 /* needed allocation: worst-case estimate of write space */
849 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
850 /* freed space estimate: worst-case overwrite + free estimate */
851 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
852 /* convert unrefd space to worst-case estimate */
853 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
854 /* calculate memory footprint estimate */
855 memory = towrite + tooverwrite + tohold;
856
857 #ifdef ZFS_DEBUG
858 /*
859 * Add in 'tohold' to account for our dirty holds on this memory
860 * XXX - the "fudge" factor is to account for skipped blocks that
861 * we missed because dnode_next_offset() misses in-core-only blocks.
862 */
863 tx->tx_space_towrite = asize +
864 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
865 tx->tx_space_tofree = tofree;
866 tx->tx_space_tooverwrite = tooverwrite;
867 tx->tx_space_tounref = tounref;
868 #endif
869
870 if (tx->tx_dir && asize != 0) {
871 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
872 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
873 if (err)
874 return (err);
875 }
876
877 return (0);
878 }
879
880 static void
881 dmu_tx_unassign(dmu_tx_t *tx)
882 {
883 dmu_tx_hold_t *txh;
884
885 if (tx->tx_txg == 0)
886 return;
887
888 txg_rele_to_quiesce(&tx->tx_txgh);
889
890 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
891 txh = list_next(&tx->tx_holds, txh)) {
892 dnode_t *dn = txh->txh_dnode;
893
894 if (dn == NULL)
895 continue;
896 mutex_enter(&dn->dn_mtx);
897 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
898
899 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
900 dn->dn_assigned_txg = 0;
901 cv_broadcast(&dn->dn_notxholds);
902 }
903 mutex_exit(&dn->dn_mtx);
904 }
905
906 txg_rele_to_sync(&tx->tx_txgh);
907
908 tx->tx_lasttried_txg = tx->tx_txg;
909 tx->tx_txg = 0;
910 }
911
912 /*
913 * Assign tx to a transaction group. txg_how can be one of:
914 *
915 * (1) TXG_WAIT. If the current open txg is full, waits until there's
916 * a new one. This should be used when you're not holding locks.
917 * If will only fail if we're truly out of space (or over quota).
918 *
919 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
920 * blocking, returns immediately with ERESTART. This should be used
921 * whenever you're holding locks. On an ERESTART error, the caller
922 * should drop locks, do a dmu_tx_wait(tx), and try again.
923 *
924 * (3) A specific txg. Use this if you need to ensure that multiple
925 * transactions all sync in the same txg. Like TXG_NOWAIT, it
926 * returns ERESTART if it can't assign you into the requested txg.
927 */
928 int
929 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
930 {
931 int err;
932
933 ASSERT(tx->tx_txg == 0);
934 ASSERT(txg_how != 0);
935 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
936
937 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
938 dmu_tx_unassign(tx);
939
940 if (err != ERESTART || txg_how != TXG_WAIT)
941 return (err);
942
943 dmu_tx_wait(tx);
944 }
945
946 txg_rele_to_quiesce(&tx->tx_txgh);
947
948 return (0);
949 }
950
951 void
952 dmu_tx_wait(dmu_tx_t *tx)
953 {
954 spa_t *spa = tx->tx_pool->dp_spa;
955
956 ASSERT(tx->tx_txg == 0);
957
958 /*
959 * It's possible that the pool has become active after this thread
960 * has tried to obtain a tx. If that's the case then his
961 * tx_lasttried_txg would not have been assigned.
962 */
963 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
964 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
965 } else if (tx->tx_needassign_txh) {
966 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
967
968 mutex_enter(&dn->dn_mtx);
969 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
970 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
971 mutex_exit(&dn->dn_mtx);
972 tx->tx_needassign_txh = NULL;
973 } else {
974 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
975 }
976 }
977
978 void
979 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
980 {
981 #ifdef ZFS_DEBUG
982 if (tx->tx_dir == NULL || delta == 0)
983 return;
984
985 if (delta > 0) {
986 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
987 tx->tx_space_towrite);
988 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
989 } else {
990 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
991 }
992 #endif
993 }
994
995 void
996 dmu_tx_commit(dmu_tx_t *tx)
997 {
998 dmu_tx_hold_t *txh;
999
1000 ASSERT(tx->tx_txg != 0);
1001
1002 while (txh = list_head(&tx->tx_holds)) {
1003 dnode_t *dn = txh->txh_dnode;
1004
1005 list_remove(&tx->tx_holds, txh);
1006 kmem_free(txh, sizeof (dmu_tx_hold_t));
1007 if (dn == NULL)
1008 continue;
1009 mutex_enter(&dn->dn_mtx);
1010 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1011
1012 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1013 dn->dn_assigned_txg = 0;
1014 cv_broadcast(&dn->dn_notxholds);
1015 }
1016 mutex_exit(&dn->dn_mtx);
1017 dnode_rele(dn, tx);
1018 }
1019
1020 if (tx->tx_tempreserve_cookie)
1021 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1022
1023 if (tx->tx_anyobj == FALSE)
1024 txg_rele_to_sync(&tx->tx_txgh);
1025 list_destroy(&tx->tx_holds);
1026 #ifdef ZFS_DEBUG
1027 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1028 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1029 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1030 refcount_destroy_many(&tx->tx_space_written,
1031 refcount_count(&tx->tx_space_written));
1032 refcount_destroy_many(&tx->tx_space_freed,
1033 refcount_count(&tx->tx_space_freed));
1034 #endif
1035 kmem_free(tx, sizeof (dmu_tx_t));
1036 }
1037
1038 void
1039 dmu_tx_abort(dmu_tx_t *tx)
1040 {
1041 dmu_tx_hold_t *txh;
1042
1043 ASSERT(tx->tx_txg == 0);
1044
1045 while (txh = list_head(&tx->tx_holds)) {
1046 dnode_t *dn = txh->txh_dnode;
1047
1048 list_remove(&tx->tx_holds, txh);
1049 kmem_free(txh, sizeof (dmu_tx_hold_t));
1050 if (dn != NULL)
1051 dnode_rele(dn, tx);
1052 }
1053 list_destroy(&tx->tx_holds);
1054 #ifdef ZFS_DEBUG
1055 refcount_destroy_many(&tx->tx_space_written,
1056 refcount_count(&tx->tx_space_written));
1057 refcount_destroy_many(&tx->tx_space_freed,
1058 refcount_count(&tx->tx_space_freed));
1059 #endif
1060 kmem_free(tx, sizeof (dmu_tx_t));
1061 }
1062
1063 uint64_t
1064 dmu_tx_get_txg(dmu_tx_t *tx)
1065 {
1066 ASSERT(tx->tx_txg != 0);
1067 return (tx->tx_txg);
1068 }