]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dmu_tx.c
Use list_remove_head() where possible.
[mirror_zfs.git] / module / zfs / dmu_tx.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 */
26
27 #include <sys/dmu.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dbuf.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h>
36 #include <sys/spa.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/trace_zfs.h>
41
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
44
45 dmu_tx_stats_t dmu_tx_stats = {
46 { "dmu_tx_assigned", KSTAT_DATA_UINT64 },
47 { "dmu_tx_delay", KSTAT_DATA_UINT64 },
48 { "dmu_tx_error", KSTAT_DATA_UINT64 },
49 { "dmu_tx_suspended", KSTAT_DATA_UINT64 },
50 { "dmu_tx_group", KSTAT_DATA_UINT64 },
51 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
52 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
53 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
54 { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 },
55 { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 },
56 { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64 },
57 { "dmu_tx_wrlog_delay", KSTAT_DATA_UINT64 },
58 { "dmu_tx_quota", KSTAT_DATA_UINT64 },
59 };
60
61 static kstat_t *dmu_tx_ksp;
62
63 dmu_tx_t *
64 dmu_tx_create_dd(dsl_dir_t *dd)
65 {
66 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
67 tx->tx_dir = dd;
68 if (dd != NULL)
69 tx->tx_pool = dd->dd_pool;
70 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
71 offsetof(dmu_tx_hold_t, txh_node));
72 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
73 offsetof(dmu_tx_callback_t, dcb_node));
74 tx->tx_start = gethrtime();
75 return (tx);
76 }
77
78 dmu_tx_t *
79 dmu_tx_create(objset_t *os)
80 {
81 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
82 tx->tx_objset = os;
83 return (tx);
84 }
85
86 dmu_tx_t *
87 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
88 {
89 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
90
91 TXG_VERIFY(dp->dp_spa, txg);
92 tx->tx_pool = dp;
93 tx->tx_txg = txg;
94 tx->tx_anyobj = TRUE;
95
96 return (tx);
97 }
98
99 int
100 dmu_tx_is_syncing(dmu_tx_t *tx)
101 {
102 return (tx->tx_anyobj);
103 }
104
105 int
106 dmu_tx_private_ok(dmu_tx_t *tx)
107 {
108 return (tx->tx_anyobj);
109 }
110
111 static dmu_tx_hold_t *
112 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
113 uint64_t arg1, uint64_t arg2)
114 {
115 dmu_tx_hold_t *txh;
116
117 if (dn != NULL) {
118 (void) zfs_refcount_add(&dn->dn_holds, tx);
119 if (tx->tx_txg != 0) {
120 mutex_enter(&dn->dn_mtx);
121 /*
122 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
123 * problem, but there's no way for it to happen (for
124 * now, at least).
125 */
126 ASSERT(dn->dn_assigned_txg == 0);
127 dn->dn_assigned_txg = tx->tx_txg;
128 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
129 mutex_exit(&dn->dn_mtx);
130 }
131 }
132
133 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
134 txh->txh_tx = tx;
135 txh->txh_dnode = dn;
136 zfs_refcount_create(&txh->txh_space_towrite);
137 zfs_refcount_create(&txh->txh_memory_tohold);
138 txh->txh_type = type;
139 txh->txh_arg1 = arg1;
140 txh->txh_arg2 = arg2;
141 list_insert_tail(&tx->tx_holds, txh);
142
143 return (txh);
144 }
145
146 static dmu_tx_hold_t *
147 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
148 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
149 {
150 dnode_t *dn = NULL;
151 dmu_tx_hold_t *txh;
152 int err;
153
154 if (object != DMU_NEW_OBJECT) {
155 err = dnode_hold(os, object, FTAG, &dn);
156 if (err != 0) {
157 tx->tx_err = err;
158 return (NULL);
159 }
160 }
161 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
162 if (dn != NULL)
163 dnode_rele(dn, FTAG);
164 return (txh);
165 }
166
167 void
168 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
169 {
170 /*
171 * If we're syncing, they can manipulate any object anyhow, and
172 * the hold on the dnode_t can cause problems.
173 */
174 if (!dmu_tx_is_syncing(tx))
175 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
176 }
177
178 /*
179 * This function reads specified data from disk. The specified data will
180 * be needed to perform the transaction -- i.e, it will be read after
181 * we do dmu_tx_assign(). There are two reasons that we read the data now
182 * (before dmu_tx_assign()):
183 *
184 * 1. Reading it now has potentially better performance. The transaction
185 * has not yet been assigned, so the TXG is not held open, and also the
186 * caller typically has less locks held when calling dmu_tx_hold_*() than
187 * after the transaction has been assigned. This reduces the lock (and txg)
188 * hold times, thus reducing lock contention.
189 *
190 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
191 * that are detected before they start making changes to the DMU state
192 * (i.e. now). Once the transaction has been assigned, and some DMU
193 * state has been changed, it can be difficult to recover from an i/o
194 * error (e.g. to undo the changes already made in memory at the DMU
195 * layer). Typically code to do so does not exist in the caller -- it
196 * assumes that the data has already been cached and thus i/o errors are
197 * not possible.
198 *
199 * It has been observed that the i/o initiated here can be a performance
200 * problem, and it appears to be optional, because we don't look at the
201 * data which is read. However, removing this read would only serve to
202 * move the work elsewhere (after the dmu_tx_assign()), where it may
203 * have a greater impact on performance (in addition to the impact on
204 * fault tolerance noted above).
205 */
206 static int
207 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
208 {
209 int err;
210 dmu_buf_impl_t *db;
211
212 rw_enter(&dn->dn_struct_rwlock, RW_READER);
213 db = dbuf_hold_level(dn, level, blkid, FTAG);
214 rw_exit(&dn->dn_struct_rwlock);
215 if (db == NULL)
216 return (SET_ERROR(EIO));
217 /*
218 * PARTIAL_FIRST allows caching for uncacheable blocks. It will
219 * be cleared after dmu_buf_will_dirty() call dbuf_read() again.
220 */
221 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH |
222 (level == 0 ? DB_RF_PARTIAL_FIRST : 0));
223 dbuf_rele(db, FTAG);
224 return (err);
225 }
226
227 static void
228 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
229 {
230 dnode_t *dn = txh->txh_dnode;
231 int err = 0;
232
233 if (len == 0)
234 return;
235
236 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
237
238 if (dn == NULL)
239 return;
240
241 /*
242 * For i/o error checking, read the blocks that will be needed
243 * to perform the write: the first and last level-0 blocks (if
244 * they are not aligned, i.e. if they are partial-block writes),
245 * and all the level-1 blocks.
246 */
247 if (dn->dn_maxblkid == 0) {
248 if (off < dn->dn_datablksz &&
249 (off > 0 || len < dn->dn_datablksz)) {
250 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
251 if (err != 0) {
252 txh->txh_tx->tx_err = err;
253 }
254 }
255 } else {
256 zio_t *zio = zio_root(dn->dn_objset->os_spa,
257 NULL, NULL, ZIO_FLAG_CANFAIL);
258
259 /* first level-0 block */
260 uint64_t start = off >> dn->dn_datablkshift;
261 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
262 err = dmu_tx_check_ioerr(zio, dn, 0, start);
263 if (err != 0) {
264 txh->txh_tx->tx_err = err;
265 }
266 }
267
268 /* last level-0 block */
269 uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
270 if (end != start && end <= dn->dn_maxblkid &&
271 P2PHASE(off + len, dn->dn_datablksz)) {
272 err = dmu_tx_check_ioerr(zio, dn, 0, end);
273 if (err != 0) {
274 txh->txh_tx->tx_err = err;
275 }
276 }
277
278 /* level-1 blocks */
279 if (dn->dn_nlevels > 1) {
280 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
281 for (uint64_t i = (start >> shft) + 1;
282 i < end >> shft; i++) {
283 err = dmu_tx_check_ioerr(zio, dn, 1, i);
284 if (err != 0) {
285 txh->txh_tx->tx_err = err;
286 }
287 }
288 }
289
290 err = zio_wait(zio);
291 if (err != 0) {
292 txh->txh_tx->tx_err = err;
293 }
294 }
295 }
296
297 static void
298 dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
299 {
300 dnode_t *dn = txh->txh_dnode;
301 int err = 0;
302
303 if (len == 0)
304 return;
305
306 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
307
308 if (dn == NULL)
309 return;
310
311 /*
312 * For i/o error checking, read the blocks that will be needed
313 * to perform the append; first level-0 block (if not aligned, i.e.
314 * if they are partial-block writes), no additional blocks are read.
315 */
316 if (dn->dn_maxblkid == 0) {
317 if (off < dn->dn_datablksz &&
318 (off > 0 || len < dn->dn_datablksz)) {
319 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
320 if (err != 0) {
321 txh->txh_tx->tx_err = err;
322 }
323 }
324 } else {
325 zio_t *zio = zio_root(dn->dn_objset->os_spa,
326 NULL, NULL, ZIO_FLAG_CANFAIL);
327
328 /* first level-0 block */
329 uint64_t start = off >> dn->dn_datablkshift;
330 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
331 err = dmu_tx_check_ioerr(zio, dn, 0, start);
332 if (err != 0) {
333 txh->txh_tx->tx_err = err;
334 }
335 }
336
337 err = zio_wait(zio);
338 if (err != 0) {
339 txh->txh_tx->tx_err = err;
340 }
341 }
342 }
343
344 static void
345 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
346 {
347 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
348 DNODE_MIN_SIZE, FTAG);
349 }
350
351 void
352 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
353 {
354 dmu_tx_hold_t *txh;
355
356 ASSERT0(tx->tx_txg);
357 ASSERT3U(len, <=, DMU_MAX_ACCESS);
358 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
359
360 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
361 object, THT_WRITE, off, len);
362 if (txh != NULL) {
363 dmu_tx_count_write(txh, off, len);
364 dmu_tx_count_dnode(txh);
365 }
366 }
367
368 void
369 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
370 {
371 dmu_tx_hold_t *txh;
372
373 ASSERT0(tx->tx_txg);
374 ASSERT3U(len, <=, DMU_MAX_ACCESS);
375 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
376
377 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
378 if (txh != NULL) {
379 dmu_tx_count_write(txh, off, len);
380 dmu_tx_count_dnode(txh);
381 }
382 }
383
384 /*
385 * Should be used when appending to an object and the exact offset is unknown.
386 * The write must occur at or beyond the specified offset. Only the L0 block
387 * at provided offset will be prefetched.
388 */
389 void
390 dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
391 {
392 dmu_tx_hold_t *txh;
393
394 ASSERT0(tx->tx_txg);
395 ASSERT3U(len, <=, DMU_MAX_ACCESS);
396
397 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
398 object, THT_APPEND, off, DMU_OBJECT_END);
399 if (txh != NULL) {
400 dmu_tx_count_append(txh, off, len);
401 dmu_tx_count_dnode(txh);
402 }
403 }
404
405 void
406 dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
407 {
408 dmu_tx_hold_t *txh;
409
410 ASSERT0(tx->tx_txg);
411 ASSERT3U(len, <=, DMU_MAX_ACCESS);
412
413 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END);
414 if (txh != NULL) {
415 dmu_tx_count_append(txh, off, len);
416 dmu_tx_count_dnode(txh);
417 }
418 }
419
420 /*
421 * This function marks the transaction as being a "net free". The end
422 * result is that refquotas will be disabled for this transaction, and
423 * this transaction will be able to use half of the pool space overhead
424 * (see dsl_pool_adjustedsize()). Therefore this function should only
425 * be called for transactions that we expect will not cause a net increase
426 * in the amount of space used (but it's OK if that is occasionally not true).
427 */
428 void
429 dmu_tx_mark_netfree(dmu_tx_t *tx)
430 {
431 tx->tx_netfree = B_TRUE;
432 }
433
434 static void
435 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
436 {
437 dmu_tx_t *tx = txh->txh_tx;
438 dnode_t *dn = txh->txh_dnode;
439 int err;
440
441 ASSERT(tx->tx_txg == 0);
442
443 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
444 return;
445 if (len == DMU_OBJECT_END)
446 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
447
448 /*
449 * For i/o error checking, we read the first and last level-0
450 * blocks if they are not aligned, and all the level-1 blocks.
451 *
452 * Note: dbuf_free_range() assumes that we have not instantiated
453 * any level-0 dbufs that will be completely freed. Therefore we must
454 * exercise care to not read or count the first and last blocks
455 * if they are blocksize-aligned.
456 */
457 if (dn->dn_datablkshift == 0) {
458 if (off != 0 || len < dn->dn_datablksz)
459 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
460 } else {
461 /* first block will be modified if it is not aligned */
462 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
463 dmu_tx_count_write(txh, off, 1);
464 /* last block will be modified if it is not aligned */
465 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
466 dmu_tx_count_write(txh, off + len, 1);
467 }
468
469 /*
470 * Check level-1 blocks.
471 */
472 if (dn->dn_nlevels > 1) {
473 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
474 SPA_BLKPTRSHIFT;
475 uint64_t start = off >> shift;
476 uint64_t end = (off + len) >> shift;
477
478 ASSERT(dn->dn_indblkshift != 0);
479
480 /*
481 * dnode_reallocate() can result in an object with indirect
482 * blocks having an odd data block size. In this case,
483 * just check the single block.
484 */
485 if (dn->dn_datablkshift == 0)
486 start = end = 0;
487
488 zio_t *zio = zio_root(tx->tx_pool->dp_spa,
489 NULL, NULL, ZIO_FLAG_CANFAIL);
490 for (uint64_t i = start; i <= end; i++) {
491 uint64_t ibyte = i << shift;
492 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
493 i = ibyte >> shift;
494 if (err == ESRCH || i > end)
495 break;
496 if (err != 0) {
497 tx->tx_err = err;
498 (void) zio_wait(zio);
499 return;
500 }
501
502 (void) zfs_refcount_add_many(&txh->txh_memory_tohold,
503 1 << dn->dn_indblkshift, FTAG);
504
505 err = dmu_tx_check_ioerr(zio, dn, 1, i);
506 if (err != 0) {
507 tx->tx_err = err;
508 (void) zio_wait(zio);
509 return;
510 }
511 }
512 err = zio_wait(zio);
513 if (err != 0) {
514 tx->tx_err = err;
515 return;
516 }
517 }
518 }
519
520 void
521 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
522 {
523 dmu_tx_hold_t *txh;
524
525 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
526 object, THT_FREE, off, len);
527 if (txh != NULL) {
528 dmu_tx_count_dnode(txh);
529 dmu_tx_count_free(txh, off, len);
530 }
531 }
532
533 void
534 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
535 {
536 dmu_tx_hold_t *txh;
537
538 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
539 if (txh != NULL) {
540 dmu_tx_count_dnode(txh);
541 dmu_tx_count_free(txh, off, len);
542 }
543 }
544
545 static void
546 dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
547 {
548
549 /*
550 * Reuse dmu_tx_count_free(), it does exactly what we need for clone.
551 */
552 dmu_tx_count_free(txh, off, len);
553 }
554
555 void
556 dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
557 {
558 dmu_tx_hold_t *txh;
559
560 ASSERT0(tx->tx_txg);
561 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
562
563 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len);
564 if (txh != NULL) {
565 dmu_tx_count_dnode(txh);
566 dmu_tx_count_clone(txh, off, len);
567 }
568 }
569
570 static void
571 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
572 {
573 dmu_tx_t *tx = txh->txh_tx;
574 dnode_t *dn = txh->txh_dnode;
575 int err;
576 extern int zap_micro_max_size;
577
578 ASSERT(tx->tx_txg == 0);
579
580 dmu_tx_count_dnode(txh);
581
582 /*
583 * Modifying a almost-full microzap is around the worst case (128KB)
584 *
585 * If it is a fat zap, the worst case would be 7*16KB=112KB:
586 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
587 * - 4 new blocks written if adding:
588 * - 2 blocks for possibly split leaves,
589 * - 2 grown ptrtbl blocks
590 */
591 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
592 zap_micro_max_size, FTAG);
593
594 if (dn == NULL)
595 return;
596
597 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
598
599 if (dn->dn_maxblkid == 0 || name == NULL) {
600 /*
601 * This is a microzap (only one block), or we don't know
602 * the name. Check the first block for i/o errors.
603 */
604 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
605 if (err != 0) {
606 tx->tx_err = err;
607 }
608 } else {
609 /*
610 * Access the name so that we'll check for i/o errors to
611 * the leaf blocks, etc. We ignore ENOENT, as this name
612 * may not yet exist.
613 */
614 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
615 if (err == EIO || err == ECKSUM || err == ENXIO) {
616 tx->tx_err = err;
617 }
618 }
619 }
620
621 void
622 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
623 {
624 dmu_tx_hold_t *txh;
625
626 ASSERT0(tx->tx_txg);
627
628 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
629 object, THT_ZAP, add, (uintptr_t)name);
630 if (txh != NULL)
631 dmu_tx_hold_zap_impl(txh, name);
632 }
633
634 void
635 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
636 {
637 dmu_tx_hold_t *txh;
638
639 ASSERT0(tx->tx_txg);
640 ASSERT(dn != NULL);
641
642 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
643 if (txh != NULL)
644 dmu_tx_hold_zap_impl(txh, name);
645 }
646
647 void
648 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
649 {
650 dmu_tx_hold_t *txh;
651
652 ASSERT(tx->tx_txg == 0);
653
654 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
655 object, THT_BONUS, 0, 0);
656 if (txh)
657 dmu_tx_count_dnode(txh);
658 }
659
660 void
661 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
662 {
663 dmu_tx_hold_t *txh;
664
665 ASSERT0(tx->tx_txg);
666
667 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
668 if (txh)
669 dmu_tx_count_dnode(txh);
670 }
671
672 void
673 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
674 {
675 dmu_tx_hold_t *txh;
676
677 ASSERT(tx->tx_txg == 0);
678
679 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
680 DMU_NEW_OBJECT, THT_SPACE, space, 0);
681 if (txh) {
682 (void) zfs_refcount_add_many(
683 &txh->txh_space_towrite, space, FTAG);
684 }
685 }
686
687 #ifdef ZFS_DEBUG
688 void
689 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
690 {
691 boolean_t match_object = B_FALSE;
692 boolean_t match_offset = B_FALSE;
693
694 DB_DNODE_ENTER(db);
695 dnode_t *dn = DB_DNODE(db);
696 ASSERT(tx->tx_txg != 0);
697 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
698 ASSERT3U(dn->dn_object, ==, db->db.db_object);
699
700 if (tx->tx_anyobj) {
701 DB_DNODE_EXIT(db);
702 return;
703 }
704
705 /* XXX No checking on the meta dnode for now */
706 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
707 DB_DNODE_EXIT(db);
708 return;
709 }
710
711 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
712 txh = list_next(&tx->tx_holds, txh)) {
713 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
714 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
715 match_object = TRUE;
716 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
717 int datablkshift = dn->dn_datablkshift ?
718 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
719 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
720 int shift = datablkshift + epbs * db->db_level;
721 uint64_t beginblk = shift >= 64 ? 0 :
722 (txh->txh_arg1 >> shift);
723 uint64_t endblk = shift >= 64 ? 0 :
724 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
725 uint64_t blkid = db->db_blkid;
726
727 /* XXX txh_arg2 better not be zero... */
728
729 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
730 txh->txh_type, (u_longlong_t)beginblk,
731 (u_longlong_t)endblk);
732
733 switch (txh->txh_type) {
734 case THT_WRITE:
735 if (blkid >= beginblk && blkid <= endblk)
736 match_offset = TRUE;
737 /*
738 * We will let this hold work for the bonus
739 * or spill buffer so that we don't need to
740 * hold it when creating a new object.
741 */
742 if (blkid == DMU_BONUS_BLKID ||
743 blkid == DMU_SPILL_BLKID)
744 match_offset = TRUE;
745 /*
746 * They might have to increase nlevels,
747 * thus dirtying the new TLIBs. Or the
748 * might have to change the block size,
749 * thus dirying the new lvl=0 blk=0.
750 */
751 if (blkid == 0)
752 match_offset = TRUE;
753 break;
754 case THT_APPEND:
755 if (blkid >= beginblk && (blkid <= endblk ||
756 txh->txh_arg2 == DMU_OBJECT_END))
757 match_offset = TRUE;
758
759 /*
760 * THT_WRITE used for bonus and spill blocks.
761 */
762 ASSERT(blkid != DMU_BONUS_BLKID &&
763 blkid != DMU_SPILL_BLKID);
764
765 /*
766 * They might have to increase nlevels,
767 * thus dirtying the new TLIBs. Or the
768 * might have to change the block size,
769 * thus dirying the new lvl=0 blk=0.
770 */
771 if (blkid == 0)
772 match_offset = TRUE;
773 break;
774 case THT_FREE:
775 /*
776 * We will dirty all the level 1 blocks in
777 * the free range and perhaps the first and
778 * last level 0 block.
779 */
780 if (blkid >= beginblk && (blkid <= endblk ||
781 txh->txh_arg2 == DMU_OBJECT_END))
782 match_offset = TRUE;
783 break;
784 case THT_SPILL:
785 if (blkid == DMU_SPILL_BLKID)
786 match_offset = TRUE;
787 break;
788 case THT_BONUS:
789 if (blkid == DMU_BONUS_BLKID)
790 match_offset = TRUE;
791 break;
792 case THT_ZAP:
793 match_offset = TRUE;
794 break;
795 case THT_NEWOBJECT:
796 match_object = TRUE;
797 break;
798 case THT_CLONE:
799 if (blkid >= beginblk && blkid <= endblk)
800 match_offset = TRUE;
801 break;
802 default:
803 cmn_err(CE_PANIC, "bad txh_type %d",
804 txh->txh_type);
805 }
806 }
807 if (match_object && match_offset) {
808 DB_DNODE_EXIT(db);
809 return;
810 }
811 }
812 DB_DNODE_EXIT(db);
813 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
814 (u_longlong_t)db->db.db_object, db->db_level,
815 (u_longlong_t)db->db_blkid);
816 }
817 #endif
818
819 /*
820 * If we can't do 10 iops, something is wrong. Let us go ahead
821 * and hit zfs_dirty_data_max.
822 */
823 static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
824
825 /*
826 * We delay transactions when we've determined that the backend storage
827 * isn't able to accommodate the rate of incoming writes.
828 *
829 * If there is already a transaction waiting, we delay relative to when
830 * that transaction finishes waiting. This way the calculated min_time
831 * is independent of the number of threads concurrently executing
832 * transactions.
833 *
834 * If we are the only waiter, wait relative to when the transaction
835 * started, rather than the current time. This credits the transaction for
836 * "time already served", e.g. reading indirect blocks.
837 *
838 * The minimum time for a transaction to take is calculated as:
839 * min_time = scale * (dirty - min) / (max - dirty)
840 * min_time is then capped at zfs_delay_max_ns.
841 *
842 * The delay has two degrees of freedom that can be adjusted via tunables.
843 * The percentage of dirty data at which we start to delay is defined by
844 * zfs_delay_min_dirty_percent. This should typically be at or above
845 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
846 * delay after writing at full speed has failed to keep up with the incoming
847 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
848 * speaking, this variable determines the amount of delay at the midpoint of
849 * the curve.
850 *
851 * delay
852 * 10ms +-------------------------------------------------------------*+
853 * | *|
854 * 9ms + *+
855 * | *|
856 * 8ms + *+
857 * | * |
858 * 7ms + * +
859 * | * |
860 * 6ms + * +
861 * | * |
862 * 5ms + * +
863 * | * |
864 * 4ms + * +
865 * | * |
866 * 3ms + * +
867 * | * |
868 * 2ms + (midpoint) * +
869 * | | ** |
870 * 1ms + v *** +
871 * | zfs_delay_scale ----------> ******** |
872 * 0 +-------------------------------------*********----------------+
873 * 0% <- zfs_dirty_data_max -> 100%
874 *
875 * Note that since the delay is added to the outstanding time remaining on the
876 * most recent transaction, the delay is effectively the inverse of IOPS.
877 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
878 * was chosen such that small changes in the amount of accumulated dirty data
879 * in the first 3/4 of the curve yield relatively small differences in the
880 * amount of delay.
881 *
882 * The effects can be easier to understand when the amount of delay is
883 * represented on a log scale:
884 *
885 * delay
886 * 100ms +-------------------------------------------------------------++
887 * + +
888 * | |
889 * + *+
890 * 10ms + *+
891 * + ** +
892 * | (midpoint) ** |
893 * + | ** +
894 * 1ms + v **** +
895 * + zfs_delay_scale ----------> ***** +
896 * | **** |
897 * + **** +
898 * 100us + ** +
899 * + * +
900 * | * |
901 * + * +
902 * 10us + * +
903 * + +
904 * | |
905 * + +
906 * +--------------------------------------------------------------+
907 * 0% <- zfs_dirty_data_max -> 100%
908 *
909 * Note here that only as the amount of dirty data approaches its limit does
910 * the delay start to increase rapidly. The goal of a properly tuned system
911 * should be to keep the amount of dirty data out of that range by first
912 * ensuring that the appropriate limits are set for the I/O scheduler to reach
913 * optimal throughput on the backend storage, and then by changing the value
914 * of zfs_delay_scale to increase the steepness of the curve.
915 */
916 static void
917 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
918 {
919 dsl_pool_t *dp = tx->tx_pool;
920 uint64_t delay_min_bytes, wrlog;
921 hrtime_t wakeup, tx_time = 0, now;
922
923 /* Calculate minimum transaction time for the dirty data amount. */
924 delay_min_bytes =
925 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
926 if (dirty > delay_min_bytes) {
927 /*
928 * The caller has already waited until we are under the max.
929 * We make them pass us the amount of dirty data so we don't
930 * have to handle the case of it being >= the max, which
931 * could cause a divide-by-zero if it's == the max.
932 */
933 ASSERT3U(dirty, <, zfs_dirty_data_max);
934
935 tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
936 (zfs_dirty_data_max - dirty);
937 }
938
939 /* Calculate minimum transaction time for the TX_WRITE log size. */
940 wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
941 delay_min_bytes =
942 zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
943 if (wrlog >= zfs_wrlog_data_max) {
944 tx_time = zfs_delay_max_ns;
945 } else if (wrlog > delay_min_bytes) {
946 tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
947 (zfs_wrlog_data_max - wrlog), tx_time);
948 }
949
950 if (tx_time == 0)
951 return;
952
953 tx_time = MIN(tx_time, zfs_delay_max_ns);
954 now = gethrtime();
955 if (now > tx->tx_start + tx_time)
956 return;
957
958 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
959 uint64_t, tx_time);
960
961 mutex_enter(&dp->dp_lock);
962 wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
963 dp->dp_last_wakeup = wakeup;
964 mutex_exit(&dp->dp_lock);
965
966 zfs_sleep_until(wakeup);
967 }
968
969 /*
970 * This routine attempts to assign the transaction to a transaction group.
971 * To do so, we must determine if there is sufficient free space on disk.
972 *
973 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
974 * on it), then it is assumed that there is sufficient free space,
975 * unless there's insufficient slop space in the pool (see the comment
976 * above spa_slop_shift in spa_misc.c).
977 *
978 * If it is not a "netfree" transaction, then if the data already on disk
979 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
980 * ENOSPC. Otherwise, if the current rough estimate of pending changes,
981 * plus the rough estimate of this transaction's changes, may exceed the
982 * allowed usage, then this will fail with ERESTART, which will cause the
983 * caller to wait for the pending changes to be written to disk (by waiting
984 * for the next TXG to open), and then check the space usage again.
985 *
986 * The rough estimate of pending changes is comprised of the sum of:
987 *
988 * - this transaction's holds' txh_space_towrite
989 *
990 * - dd_tempreserved[], which is the sum of in-flight transactions'
991 * holds' txh_space_towrite (i.e. those transactions that have called
992 * dmu_tx_assign() but not yet called dmu_tx_commit()).
993 *
994 * - dd_space_towrite[], which is the amount of dirtied dbufs.
995 *
996 * Note that all of these values are inflated by spa_get_worst_case_asize(),
997 * which means that we may get ERESTART well before we are actually in danger
998 * of running out of space, but this also mitigates any small inaccuracies
999 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
1000 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
1001 * to the MOS).
1002 *
1003 * Note that due to this algorithm, it is possible to exceed the allowed
1004 * usage by one transaction. Also, as we approach the allowed usage,
1005 * we will allow a very limited amount of changes into each TXG, thus
1006 * decreasing performance.
1007 */
1008 static int
1009 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
1010 {
1011 spa_t *spa = tx->tx_pool->dp_spa;
1012
1013 ASSERT0(tx->tx_txg);
1014
1015 if (tx->tx_err) {
1016 DMU_TX_STAT_BUMP(dmu_tx_error);
1017 return (tx->tx_err);
1018 }
1019
1020 if (spa_suspended(spa)) {
1021 DMU_TX_STAT_BUMP(dmu_tx_suspended);
1022
1023 /*
1024 * If the user has indicated a blocking failure mode
1025 * then return ERESTART which will block in dmu_tx_wait().
1026 * Otherwise, return EIO so that an error can get
1027 * propagated back to the VOP calls.
1028 *
1029 * Note that we always honor the txg_how flag regardless
1030 * of the failuremode setting.
1031 */
1032 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1033 !(txg_how & TXG_WAIT))
1034 return (SET_ERROR(EIO));
1035
1036 return (SET_ERROR(ERESTART));
1037 }
1038
1039 if (!tx->tx_dirty_delayed &&
1040 dsl_pool_need_wrlog_delay(tx->tx_pool)) {
1041 tx->tx_wait_dirty = B_TRUE;
1042 DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
1043 return (SET_ERROR(ERESTART));
1044 }
1045
1046 if (!tx->tx_dirty_delayed &&
1047 dsl_pool_need_dirty_delay(tx->tx_pool)) {
1048 tx->tx_wait_dirty = B_TRUE;
1049 DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
1050 return (SET_ERROR(ERESTART));
1051 }
1052
1053 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1054 tx->tx_needassign_txh = NULL;
1055
1056 /*
1057 * NB: No error returns are allowed after txg_hold_open, but
1058 * before processing the dnode holds, due to the
1059 * dmu_tx_unassign() logic.
1060 */
1061
1062 uint64_t towrite = 0;
1063 uint64_t tohold = 0;
1064 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1065 txh = list_next(&tx->tx_holds, txh)) {
1066 dnode_t *dn = txh->txh_dnode;
1067 if (dn != NULL) {
1068 /*
1069 * This thread can't hold the dn_struct_rwlock
1070 * while assigning the tx, because this can lead to
1071 * deadlock. Specifically, if this dnode is already
1072 * assigned to an earlier txg, this thread may need
1073 * to wait for that txg to sync (the ERESTART case
1074 * below). The other thread that has assigned this
1075 * dnode to an earlier txg prevents this txg from
1076 * syncing until its tx can complete (calling
1077 * dmu_tx_commit()), but it may need to acquire the
1078 * dn_struct_rwlock to do so (e.g. via
1079 * dmu_buf_hold*()).
1080 *
1081 * Note that this thread can't hold the lock for
1082 * read either, but the rwlock doesn't record
1083 * enough information to make that assertion.
1084 */
1085 ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
1086
1087 mutex_enter(&dn->dn_mtx);
1088 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1089 mutex_exit(&dn->dn_mtx);
1090 tx->tx_needassign_txh = txh;
1091 DMU_TX_STAT_BUMP(dmu_tx_group);
1092 return (SET_ERROR(ERESTART));
1093 }
1094 if (dn->dn_assigned_txg == 0)
1095 dn->dn_assigned_txg = tx->tx_txg;
1096 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1097 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
1098 mutex_exit(&dn->dn_mtx);
1099 }
1100 towrite += zfs_refcount_count(&txh->txh_space_towrite);
1101 tohold += zfs_refcount_count(&txh->txh_memory_tohold);
1102 }
1103
1104 /* needed allocation: worst-case estimate of write space */
1105 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
1106 /* calculate memory footprint estimate */
1107 uint64_t memory = towrite + tohold;
1108
1109 if (tx->tx_dir != NULL && asize != 0) {
1110 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1111 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
1112 if (err != 0)
1113 return (err);
1114 }
1115
1116 DMU_TX_STAT_BUMP(dmu_tx_assigned);
1117
1118 return (0);
1119 }
1120
1121 static void
1122 dmu_tx_unassign(dmu_tx_t *tx)
1123 {
1124 if (tx->tx_txg == 0)
1125 return;
1126
1127 txg_rele_to_quiesce(&tx->tx_txgh);
1128
1129 /*
1130 * Walk the transaction's hold list, removing the hold on the
1131 * associated dnode, and notifying waiters if the refcount drops to 0.
1132 */
1133 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
1134 txh && txh != tx->tx_needassign_txh;
1135 txh = list_next(&tx->tx_holds, txh)) {
1136 dnode_t *dn = txh->txh_dnode;
1137
1138 if (dn == NULL)
1139 continue;
1140 mutex_enter(&dn->dn_mtx);
1141 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1142
1143 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1144 dn->dn_assigned_txg = 0;
1145 cv_broadcast(&dn->dn_notxholds);
1146 }
1147 mutex_exit(&dn->dn_mtx);
1148 }
1149
1150 txg_rele_to_sync(&tx->tx_txgh);
1151
1152 tx->tx_lasttried_txg = tx->tx_txg;
1153 tx->tx_txg = 0;
1154 }
1155
1156 /*
1157 * Assign tx to a transaction group; txg_how is a bitmask:
1158 *
1159 * If TXG_WAIT is set and the currently open txg is full, this function
1160 * will wait until there's a new txg. This should be used when no locks
1161 * are being held. With this bit set, this function will only fail if
1162 * we're truly out of space (or over quota).
1163 *
1164 * If TXG_WAIT is *not* set and we can't assign into the currently open
1165 * txg without blocking, this function will return immediately with
1166 * ERESTART. This should be used whenever locks are being held. On an
1167 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1168 * and try again.
1169 *
1170 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1171 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1172 * details on the throttle). This is used by the VFS operations, after
1173 * they have already called dmu_tx_wait() (though most likely on a
1174 * different tx).
1175 *
1176 * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1177 * will assign the tx to monotonically increasing txgs. Of course this is
1178 * not strong monotonicity, because the same txg can be returned multiple
1179 * times in a row. This guarantee holds both for subsequent calls from
1180 * one thread and for multiple threads. For example, it is impossible to
1181 * observe the following sequence of events:
1182 *
1183 * Thread 1 Thread 2
1184 *
1185 * dmu_tx_assign(T1, ...)
1186 * 1 <- dmu_tx_get_txg(T1)
1187 * dmu_tx_assign(T2, ...)
1188 * 2 <- dmu_tx_get_txg(T2)
1189 * dmu_tx_assign(T3, ...)
1190 * 1 <- dmu_tx_get_txg(T3)
1191 */
1192 int
1193 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1194 {
1195 int err;
1196
1197 ASSERT(tx->tx_txg == 0);
1198 ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
1199 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1200
1201 /* If we might wait, we must not hold the config lock. */
1202 IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
1203
1204 if ((txg_how & TXG_NOTHROTTLE))
1205 tx->tx_dirty_delayed = B_TRUE;
1206
1207 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1208 dmu_tx_unassign(tx);
1209
1210 if (err != ERESTART || !(txg_how & TXG_WAIT))
1211 return (err);
1212
1213 dmu_tx_wait(tx);
1214 }
1215
1216 txg_rele_to_quiesce(&tx->tx_txgh);
1217
1218 return (0);
1219 }
1220
1221 void
1222 dmu_tx_wait(dmu_tx_t *tx)
1223 {
1224 spa_t *spa = tx->tx_pool->dp_spa;
1225 dsl_pool_t *dp = tx->tx_pool;
1226 hrtime_t before;
1227
1228 ASSERT(tx->tx_txg == 0);
1229 ASSERT(!dsl_pool_config_held(tx->tx_pool));
1230
1231 before = gethrtime();
1232
1233 if (tx->tx_wait_dirty) {
1234 uint64_t dirty;
1235
1236 /*
1237 * dmu_tx_try_assign() has determined that we need to wait
1238 * because we've consumed much or all of the dirty buffer
1239 * space.
1240 */
1241 mutex_enter(&dp->dp_lock);
1242 if (dp->dp_dirty_total >= zfs_dirty_data_max)
1243 DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1244 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1245 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1246 dirty = dp->dp_dirty_total;
1247 mutex_exit(&dp->dp_lock);
1248
1249 dmu_tx_delay(tx, dirty);
1250
1251 tx->tx_wait_dirty = B_FALSE;
1252
1253 /*
1254 * Note: setting tx_dirty_delayed only has effect if the
1255 * caller used TX_WAIT. Otherwise they are going to
1256 * destroy this tx and try again. The common case,
1257 * zfs_write(), uses TX_WAIT.
1258 */
1259 tx->tx_dirty_delayed = B_TRUE;
1260 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1261 /*
1262 * If the pool is suspended we need to wait until it
1263 * is resumed. Note that it's possible that the pool
1264 * has become active after this thread has tried to
1265 * obtain a tx. If that's the case then tx_lasttried_txg
1266 * would not have been set.
1267 */
1268 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1269 } else if (tx->tx_needassign_txh) {
1270 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1271
1272 mutex_enter(&dn->dn_mtx);
1273 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1274 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1275 mutex_exit(&dn->dn_mtx);
1276 tx->tx_needassign_txh = NULL;
1277 } else {
1278 /*
1279 * If we have a lot of dirty data just wait until we sync
1280 * out a TXG at which point we'll hopefully have synced
1281 * a portion of the changes.
1282 */
1283 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1284 }
1285
1286 spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1287 }
1288
1289 static void
1290 dmu_tx_destroy(dmu_tx_t *tx)
1291 {
1292 dmu_tx_hold_t *txh;
1293
1294 while ((txh = list_head(&tx->tx_holds)) != NULL) {
1295 dnode_t *dn = txh->txh_dnode;
1296
1297 list_remove(&tx->tx_holds, txh);
1298 zfs_refcount_destroy_many(&txh->txh_space_towrite,
1299 zfs_refcount_count(&txh->txh_space_towrite));
1300 zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1301 zfs_refcount_count(&txh->txh_memory_tohold));
1302 kmem_free(txh, sizeof (dmu_tx_hold_t));
1303 if (dn != NULL)
1304 dnode_rele(dn, tx);
1305 }
1306
1307 list_destroy(&tx->tx_callbacks);
1308 list_destroy(&tx->tx_holds);
1309 kmem_free(tx, sizeof (dmu_tx_t));
1310 }
1311
1312 void
1313 dmu_tx_commit(dmu_tx_t *tx)
1314 {
1315 ASSERT(tx->tx_txg != 0);
1316
1317 /*
1318 * Go through the transaction's hold list and remove holds on
1319 * associated dnodes, notifying waiters if no holds remain.
1320 */
1321 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1322 txh = list_next(&tx->tx_holds, txh)) {
1323 dnode_t *dn = txh->txh_dnode;
1324
1325 if (dn == NULL)
1326 continue;
1327
1328 mutex_enter(&dn->dn_mtx);
1329 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1330
1331 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1332 dn->dn_assigned_txg = 0;
1333 cv_broadcast(&dn->dn_notxholds);
1334 }
1335 mutex_exit(&dn->dn_mtx);
1336 }
1337
1338 if (tx->tx_tempreserve_cookie)
1339 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1340
1341 if (!list_is_empty(&tx->tx_callbacks))
1342 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1343
1344 if (tx->tx_anyobj == FALSE)
1345 txg_rele_to_sync(&tx->tx_txgh);
1346
1347 dmu_tx_destroy(tx);
1348 }
1349
1350 void
1351 dmu_tx_abort(dmu_tx_t *tx)
1352 {
1353 ASSERT(tx->tx_txg == 0);
1354
1355 /*
1356 * Call any registered callbacks with an error code.
1357 */
1358 if (!list_is_empty(&tx->tx_callbacks))
1359 dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
1360
1361 dmu_tx_destroy(tx);
1362 }
1363
1364 uint64_t
1365 dmu_tx_get_txg(dmu_tx_t *tx)
1366 {
1367 ASSERT(tx->tx_txg != 0);
1368 return (tx->tx_txg);
1369 }
1370
1371 dsl_pool_t *
1372 dmu_tx_pool(dmu_tx_t *tx)
1373 {
1374 ASSERT(tx->tx_pool != NULL);
1375 return (tx->tx_pool);
1376 }
1377
1378 void
1379 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1380 {
1381 dmu_tx_callback_t *dcb;
1382
1383 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1384
1385 dcb->dcb_func = func;
1386 dcb->dcb_data = data;
1387
1388 list_insert_tail(&tx->tx_callbacks, dcb);
1389 }
1390
1391 /*
1392 * Call all the commit callbacks on a list, with a given error code.
1393 */
1394 void
1395 dmu_tx_do_callbacks(list_t *cb_list, int error)
1396 {
1397 dmu_tx_callback_t *dcb;
1398
1399 while ((dcb = list_remove_tail(cb_list)) != NULL) {
1400 dcb->dcb_func(dcb->dcb_data, error);
1401 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1402 }
1403 }
1404
1405 /*
1406 * Interface to hold a bunch of attributes.
1407 * used for creating new files.
1408 * attrsize is the total size of all attributes
1409 * to be added during object creation
1410 *
1411 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1412 */
1413
1414 /*
1415 * hold necessary attribute name for attribute registration.
1416 * should be a very rare case where this is needed. If it does
1417 * happen it would only happen on the first write to the file system.
1418 */
1419 static void
1420 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1421 {
1422 if (!sa->sa_need_attr_registration)
1423 return;
1424
1425 for (int i = 0; i != sa->sa_num_attrs; i++) {
1426 if (!sa->sa_attr_table[i].sa_registered) {
1427 if (sa->sa_reg_attr_obj)
1428 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1429 B_TRUE, sa->sa_attr_table[i].sa_name);
1430 else
1431 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1432 B_TRUE, sa->sa_attr_table[i].sa_name);
1433 }
1434 }
1435 }
1436
1437 void
1438 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1439 {
1440 dmu_tx_hold_t *txh;
1441
1442 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1443 THT_SPILL, 0, 0);
1444 if (txh != NULL)
1445 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
1446 SPA_OLD_MAXBLOCKSIZE, FTAG);
1447 }
1448
1449 void
1450 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1451 {
1452 sa_os_t *sa = tx->tx_objset->os_sa;
1453
1454 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1455
1456 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1457 return;
1458
1459 if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1460 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1461 } else {
1462 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1463 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1464 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1465 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1466 }
1467
1468 dmu_tx_sa_registration_hold(sa, tx);
1469
1470 if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1471 return;
1472
1473 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1474 THT_SPILL, 0, 0);
1475 }
1476
1477 /*
1478 * Hold SA attribute
1479 *
1480 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1481 *
1482 * variable_size is the total size of all variable sized attributes
1483 * passed to this function. It is not the total size of all
1484 * variable size attributes that *may* exist on this object.
1485 */
1486 void
1487 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1488 {
1489 uint64_t object;
1490 sa_os_t *sa = tx->tx_objset->os_sa;
1491
1492 ASSERT(hdl != NULL);
1493
1494 object = sa_handle_object(hdl);
1495
1496 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1497 DB_DNODE_ENTER(db);
1498 dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1499 DB_DNODE_EXIT(db);
1500
1501 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1502 return;
1503
1504 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1505 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1506 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1507 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1508 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1509 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1510 }
1511
1512 dmu_tx_sa_registration_hold(sa, tx);
1513
1514 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1515 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1516
1517 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1518 ASSERT(tx->tx_txg == 0);
1519 dmu_tx_hold_spill(tx, object);
1520 } else {
1521 dnode_t *dn;
1522
1523 DB_DNODE_ENTER(db);
1524 dn = DB_DNODE(db);
1525 if (dn->dn_have_spill) {
1526 ASSERT(tx->tx_txg == 0);
1527 dmu_tx_hold_spill(tx, object);
1528 }
1529 DB_DNODE_EXIT(db);
1530 }
1531 }
1532
1533 void
1534 dmu_tx_init(void)
1535 {
1536 dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1537 KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1538 KSTAT_FLAG_VIRTUAL);
1539
1540 if (dmu_tx_ksp != NULL) {
1541 dmu_tx_ksp->ks_data = &dmu_tx_stats;
1542 kstat_install(dmu_tx_ksp);
1543 }
1544 }
1545
1546 void
1547 dmu_tx_fini(void)
1548 {
1549 if (dmu_tx_ksp != NULL) {
1550 kstat_delete(dmu_tx_ksp);
1551 dmu_tx_ksp = NULL;
1552 }
1553 }
1554
1555 #if defined(_KERNEL)
1556 EXPORT_SYMBOL(dmu_tx_create);
1557 EXPORT_SYMBOL(dmu_tx_hold_write);
1558 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1559 EXPORT_SYMBOL(dmu_tx_hold_append);
1560 EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode);
1561 EXPORT_SYMBOL(dmu_tx_hold_free);
1562 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1563 EXPORT_SYMBOL(dmu_tx_hold_zap);
1564 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1565 EXPORT_SYMBOL(dmu_tx_hold_bonus);
1566 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1567 EXPORT_SYMBOL(dmu_tx_abort);
1568 EXPORT_SYMBOL(dmu_tx_assign);
1569 EXPORT_SYMBOL(dmu_tx_wait);
1570 EXPORT_SYMBOL(dmu_tx_commit);
1571 EXPORT_SYMBOL(dmu_tx_mark_netfree);
1572 EXPORT_SYMBOL(dmu_tx_get_txg);
1573 EXPORT_SYMBOL(dmu_tx_callback_register);
1574 EXPORT_SYMBOL(dmu_tx_do_callbacks);
1575 EXPORT_SYMBOL(dmu_tx_hold_spill);
1576 EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1577 EXPORT_SYMBOL(dmu_tx_hold_sa);
1578 #endif