]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
22cd4a46 | 23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. |
2bce8049 | 24 | * Copyright (c) 2012, 2016 by Delphix. All rights reserved. |
22cd4a46 | 25 | */ |
34dc7c2f | 26 | |
34dc7c2f BB |
27 | #include <sys/dmu.h> |
28 | #include <sys/dmu_impl.h> | |
29 | #include <sys/dbuf.h> | |
30 | #include <sys/dmu_tx.h> | |
31 | #include <sys/dmu_objset.h> | |
32 | #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ | |
33 | #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ | |
34 | #include <sys/dsl_pool.h> | |
35 | #include <sys/zap_impl.h> /* for fzap_default_block_shift */ | |
36 | #include <sys/spa.h> | |
428870ff BB |
37 | #include <sys/sa.h> |
38 | #include <sys/sa_impl.h> | |
34dc7c2f | 39 | #include <sys/zfs_context.h> |
428870ff | 40 | #include <sys/varargs.h> |
49ee64e5 | 41 | #include <sys/trace_dmu.h> |
34dc7c2f BB |
42 | |
43 | typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, | |
44 | uint64_t arg1, uint64_t arg2); | |
45 | ||
570827e1 BB |
46 | dmu_tx_stats_t dmu_tx_stats = { |
47 | { "dmu_tx_assigned", KSTAT_DATA_UINT64 }, | |
48 | { "dmu_tx_delay", KSTAT_DATA_UINT64 }, | |
49 | { "dmu_tx_error", KSTAT_DATA_UINT64 }, | |
50 | { "dmu_tx_suspended", KSTAT_DATA_UINT64 }, | |
51 | { "dmu_tx_group", KSTAT_DATA_UINT64 }, | |
570827e1 BB |
52 | { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 }, |
53 | { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 }, | |
570827e1 | 54 | { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 }, |
e8b96c60 MA |
55 | { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 }, |
56 | { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 }, | |
570827e1 BB |
57 | { "dmu_tx_quota", KSTAT_DATA_UINT64 }, |
58 | }; | |
59 | ||
60 | static kstat_t *dmu_tx_ksp; | |
34dc7c2f BB |
61 | |
62 | dmu_tx_t * | |
63 | dmu_tx_create_dd(dsl_dir_t *dd) | |
64 | { | |
79c76d5b | 65 | dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); |
34dc7c2f | 66 | tx->tx_dir = dd; |
6f1ffb06 | 67 | if (dd != NULL) |
34dc7c2f BB |
68 | tx->tx_pool = dd->dd_pool; |
69 | list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), | |
70 | offsetof(dmu_tx_hold_t, txh_node)); | |
428870ff BB |
71 | list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), |
72 | offsetof(dmu_tx_callback_t, dcb_node)); | |
e8b96c60 | 73 | tx->tx_start = gethrtime(); |
1c5de20a | 74 | #ifdef DEBUG_DMU_TX |
34dc7c2f BB |
75 | refcount_create(&tx->tx_space_written); |
76 | refcount_create(&tx->tx_space_freed); | |
77 | #endif | |
78 | return (tx); | |
79 | } | |
80 | ||
81 | dmu_tx_t * | |
82 | dmu_tx_create(objset_t *os) | |
83 | { | |
428870ff | 84 | dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); |
34dc7c2f | 85 | tx->tx_objset = os; |
428870ff | 86 | tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); |
34dc7c2f BB |
87 | return (tx); |
88 | } | |
89 | ||
90 | dmu_tx_t * | |
91 | dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) | |
92 | { | |
93 | dmu_tx_t *tx = dmu_tx_create_dd(NULL); | |
94 | ||
95 | ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); | |
96 | tx->tx_pool = dp; | |
97 | tx->tx_txg = txg; | |
98 | tx->tx_anyobj = TRUE; | |
99 | ||
100 | return (tx); | |
101 | } | |
102 | ||
103 | int | |
104 | dmu_tx_is_syncing(dmu_tx_t *tx) | |
105 | { | |
106 | return (tx->tx_anyobj); | |
107 | } | |
108 | ||
109 | int | |
110 | dmu_tx_private_ok(dmu_tx_t *tx) | |
111 | { | |
112 | return (tx->tx_anyobj); | |
113 | } | |
114 | ||
115 | static dmu_tx_hold_t * | |
116 | dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, | |
117 | enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) | |
118 | { | |
119 | dmu_tx_hold_t *txh; | |
120 | dnode_t *dn = NULL; | |
121 | int err; | |
122 | ||
123 | if (object != DMU_NEW_OBJECT) { | |
428870ff | 124 | err = dnode_hold(os, object, tx, &dn); |
34dc7c2f BB |
125 | if (err) { |
126 | tx->tx_err = err; | |
127 | return (NULL); | |
128 | } | |
129 | ||
130 | if (err == 0 && tx->tx_txg != 0) { | |
131 | mutex_enter(&dn->dn_mtx); | |
132 | /* | |
133 | * dn->dn_assigned_txg == tx->tx_txg doesn't pose a | |
134 | * problem, but there's no way for it to happen (for | |
135 | * now, at least). | |
136 | */ | |
137 | ASSERT(dn->dn_assigned_txg == 0); | |
138 | dn->dn_assigned_txg = tx->tx_txg; | |
139 | (void) refcount_add(&dn->dn_tx_holds, tx); | |
140 | mutex_exit(&dn->dn_mtx); | |
141 | } | |
142 | } | |
143 | ||
79c76d5b | 144 | txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); |
34dc7c2f BB |
145 | txh->txh_tx = tx; |
146 | txh->txh_dnode = dn; | |
1c5de20a | 147 | #ifdef DEBUG_DMU_TX |
34dc7c2f BB |
148 | txh->txh_type = type; |
149 | txh->txh_arg1 = arg1; | |
150 | txh->txh_arg2 = arg2; | |
151 | #endif | |
152 | list_insert_tail(&tx->tx_holds, txh); | |
153 | ||
154 | return (txh); | |
155 | } | |
156 | ||
157 | void | |
158 | dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) | |
159 | { | |
160 | /* | |
161 | * If we're syncing, they can manipulate any object anyhow, and | |
162 | * the hold on the dnode_t can cause problems. | |
163 | */ | |
164 | if (!dmu_tx_is_syncing(tx)) { | |
165 | (void) dmu_tx_hold_object_impl(tx, os, | |
166 | object, THT_NEWOBJECT, 0, 0); | |
167 | } | |
168 | } | |
169 | ||
170 | static int | |
171 | dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) | |
172 | { | |
173 | int err; | |
174 | dmu_buf_impl_t *db; | |
175 | ||
176 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
177 | db = dbuf_hold_level(dn, level, blkid, FTAG); | |
178 | rw_exit(&dn->dn_struct_rwlock); | |
179 | if (db == NULL) | |
2e528b49 | 180 | return (SET_ERROR(EIO)); |
34dc7c2f BB |
181 | err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); |
182 | dbuf_rele(db, FTAG); | |
183 | return (err); | |
184 | } | |
185 | ||
9babb374 | 186 | static void |
428870ff BB |
187 | dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, |
188 | int level, uint64_t blkid, boolean_t freeable, uint64_t *history) | |
9babb374 | 189 | { |
428870ff BB |
190 | objset_t *os = dn->dn_objset; |
191 | dsl_dataset_t *ds = os->os_dsl_dataset; | |
192 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
193 | dmu_buf_impl_t *parent = NULL; | |
194 | blkptr_t *bp = NULL; | |
195 | uint64_t space; | |
196 | ||
197 | if (level >= dn->dn_nlevels || history[level] == blkid) | |
9babb374 BB |
198 | return; |
199 | ||
428870ff | 200 | history[level] = blkid; |
9babb374 | 201 | |
428870ff BB |
202 | space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); |
203 | ||
204 | if (db == NULL || db == dn->dn_dbuf) { | |
205 | ASSERT(level != 0); | |
206 | db = NULL; | |
207 | } else { | |
572e2857 | 208 | ASSERT(DB_DNODE(db) == dn); |
428870ff BB |
209 | ASSERT(db->db_level == level); |
210 | ASSERT(db->db.db_size == space); | |
211 | ASSERT(db->db_blkid == blkid); | |
212 | bp = db->db_blkptr; | |
213 | parent = db->db_parent; | |
9babb374 BB |
214 | } |
215 | ||
428870ff BB |
216 | freeable = (bp && (freeable || |
217 | dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); | |
9babb374 | 218 | |
428870ff BB |
219 | if (freeable) |
220 | txh->txh_space_tooverwrite += space; | |
221 | else | |
222 | txh->txh_space_towrite += space; | |
223 | if (bp) | |
224 | txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); | |
225 | ||
226 | dmu_tx_count_twig(txh, dn, parent, level + 1, | |
227 | blkid >> epbs, freeable, history); | |
9babb374 BB |
228 | } |
229 | ||
34dc7c2f BB |
230 | /* ARGSUSED */ |
231 | static void | |
232 | dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) | |
233 | { | |
234 | dnode_t *dn = txh->txh_dnode; | |
235 | uint64_t start, end, i; | |
236 | int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; | |
237 | int err = 0; | |
d6320ddb | 238 | int l; |
34dc7c2f BB |
239 | |
240 | if (len == 0) | |
241 | return; | |
242 | ||
243 | min_bs = SPA_MINBLOCKSHIFT; | |
f1512ee6 | 244 | max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; |
34dc7c2f BB |
245 | min_ibs = DN_MIN_INDBLKSHIFT; |
246 | max_ibs = DN_MAX_INDBLKSHIFT; | |
247 | ||
34dc7c2f | 248 | if (dn) { |
428870ff | 249 | uint64_t history[DN_MAX_LEVELS]; |
9babb374 BB |
250 | int nlvls = dn->dn_nlevels; |
251 | int delta; | |
252 | ||
253 | /* | |
254 | * For i/o error checking, read the first and last level-0 | |
255 | * blocks (if they are not aligned), and all the level-1 blocks. | |
256 | */ | |
34dc7c2f | 257 | if (dn->dn_maxblkid == 0) { |
9babb374 BB |
258 | delta = dn->dn_datablksz; |
259 | start = (off < dn->dn_datablksz) ? 0 : 1; | |
260 | end = (off+len <= dn->dn_datablksz) ? 0 : 1; | |
261 | if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { | |
b128c09f BB |
262 | err = dmu_tx_check_ioerr(NULL, dn, 0, 0); |
263 | if (err) | |
264 | goto out; | |
9babb374 | 265 | delta -= off; |
b128c09f | 266 | } |
34dc7c2f BB |
267 | } else { |
268 | zio_t *zio = zio_root(dn->dn_objset->os_spa, | |
269 | NULL, NULL, ZIO_FLAG_CANFAIL); | |
270 | ||
271 | /* first level-0 block */ | |
272 | start = off >> dn->dn_datablkshift; | |
273 | if (P2PHASE(off, dn->dn_datablksz) || | |
274 | len < dn->dn_datablksz) { | |
275 | err = dmu_tx_check_ioerr(zio, dn, 0, start); | |
276 | if (err) | |
277 | goto out; | |
278 | } | |
279 | ||
280 | /* last level-0 block */ | |
281 | end = (off+len-1) >> dn->dn_datablkshift; | |
b128c09f | 282 | if (end != start && end <= dn->dn_maxblkid && |
34dc7c2f BB |
283 | P2PHASE(off+len, dn->dn_datablksz)) { |
284 | err = dmu_tx_check_ioerr(zio, dn, 0, end); | |
285 | if (err) | |
286 | goto out; | |
287 | } | |
288 | ||
289 | /* level-1 blocks */ | |
9babb374 BB |
290 | if (nlvls > 1) { |
291 | int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
292 | for (i = (start>>shft)+1; i < end>>shft; i++) { | |
34dc7c2f BB |
293 | err = dmu_tx_check_ioerr(zio, dn, 1, i); |
294 | if (err) | |
295 | goto out; | |
296 | } | |
297 | } | |
298 | ||
299 | err = zio_wait(zio); | |
300 | if (err) | |
301 | goto out; | |
9babb374 | 302 | delta = P2NPHASE(off, dn->dn_datablksz); |
34dc7c2f | 303 | } |
34dc7c2f | 304 | |
295304be | 305 | min_ibs = max_ibs = dn->dn_indblkshift; |
9babb374 BB |
306 | if (dn->dn_maxblkid > 0) { |
307 | /* | |
308 | * The blocksize can't change, | |
309 | * so we can make a more precise estimate. | |
310 | */ | |
311 | ASSERT(dn->dn_datablkshift != 0); | |
34dc7c2f | 312 | min_bs = max_bs = dn->dn_datablkshift; |
f1512ee6 MA |
313 | } else { |
314 | /* | |
315 | * The blocksize can increase up to the recordsize, | |
316 | * or if it is already more than the recordsize, | |
317 | * up to the next power of 2. | |
318 | */ | |
319 | min_bs = highbit64(dn->dn_datablksz - 1); | |
320 | max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); | |
9babb374 BB |
321 | } |
322 | ||
323 | /* | |
324 | * If this write is not off the end of the file | |
325 | * we need to account for overwrites/unref. | |
326 | */ | |
428870ff | 327 | if (start <= dn->dn_maxblkid) { |
d6320ddb | 328 | for (l = 0; l < DN_MAX_LEVELS; l++) |
428870ff BB |
329 | history[l] = -1ULL; |
330 | } | |
9babb374 | 331 | while (start <= dn->dn_maxblkid) { |
9babb374 BB |
332 | dmu_buf_impl_t *db; |
333 | ||
334 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
fcff0f35 PD |
335 | err = dbuf_hold_impl(dn, 0, start, |
336 | FALSE, FALSE, FTAG, &db); | |
9babb374 | 337 | rw_exit(&dn->dn_struct_rwlock); |
428870ff BB |
338 | |
339 | if (err) { | |
340 | txh->txh_tx->tx_err = err; | |
341 | return; | |
9babb374 | 342 | } |
428870ff BB |
343 | |
344 | dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, | |
345 | history); | |
9babb374 BB |
346 | dbuf_rele(db, FTAG); |
347 | if (++start > end) { | |
348 | /* | |
349 | * Account for new indirects appearing | |
350 | * before this IO gets assigned into a txg. | |
351 | */ | |
352 | bits = 64 - min_bs; | |
353 | epbs = min_ibs - SPA_BLKPTRSHIFT; | |
354 | for (bits -= epbs * (nlvls - 1); | |
355 | bits >= 0; bits -= epbs) | |
356 | txh->txh_fudge += 1ULL << max_ibs; | |
357 | goto out; | |
358 | } | |
359 | off += delta; | |
360 | if (len >= delta) | |
361 | len -= delta; | |
362 | delta = dn->dn_datablksz; | |
363 | } | |
34dc7c2f BB |
364 | } |
365 | ||
366 | /* | |
367 | * 'end' is the last thing we will access, not one past. | |
368 | * This way we won't overflow when accessing the last byte. | |
369 | */ | |
370 | start = P2ALIGN(off, 1ULL << max_bs); | |
371 | end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; | |
372 | txh->txh_space_towrite += end - start + 1; | |
373 | ||
374 | start >>= min_bs; | |
375 | end >>= min_bs; | |
376 | ||
377 | epbs = min_ibs - SPA_BLKPTRSHIFT; | |
378 | ||
379 | /* | |
380 | * The object contains at most 2^(64 - min_bs) blocks, | |
381 | * and each indirect level maps 2^epbs. | |
382 | */ | |
383 | for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { | |
384 | start >>= epbs; | |
385 | end >>= epbs; | |
9babb374 | 386 | ASSERT3U(end, >=, start); |
34dc7c2f | 387 | txh->txh_space_towrite += (end - start + 1) << max_ibs; |
9babb374 BB |
388 | if (start != 0) { |
389 | /* | |
390 | * We also need a new blkid=0 indirect block | |
391 | * to reference any existing file data. | |
392 | */ | |
393 | txh->txh_space_towrite += 1ULL << max_ibs; | |
394 | } | |
34dc7c2f BB |
395 | } |
396 | ||
34dc7c2f | 397 | out: |
9babb374 BB |
398 | if (txh->txh_space_towrite + txh->txh_space_tooverwrite > |
399 | 2 * DMU_MAX_ACCESS) | |
2e528b49 | 400 | err = SET_ERROR(EFBIG); |
9babb374 | 401 | |
34dc7c2f BB |
402 | if (err) |
403 | txh->txh_tx->tx_err = err; | |
404 | } | |
405 | ||
406 | static void | |
407 | dmu_tx_count_dnode(dmu_tx_hold_t *txh) | |
408 | { | |
409 | dnode_t *dn = txh->txh_dnode; | |
572e2857 | 410 | dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); |
34dc7c2f | 411 | uint64_t space = mdn->dn_datablksz + |
05852b34 | 412 | ((uint64_t)(mdn->dn_nlevels-1) << mdn->dn_indblkshift); |
34dc7c2f BB |
413 | |
414 | if (dn && dn->dn_dbuf->db_blkptr && | |
415 | dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, | |
428870ff | 416 | dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { |
34dc7c2f | 417 | txh->txh_space_tooverwrite += space; |
9babb374 | 418 | txh->txh_space_tounref += space; |
34dc7c2f BB |
419 | } else { |
420 | txh->txh_space_towrite += space; | |
421 | if (dn && dn->dn_dbuf->db_blkptr) | |
422 | txh->txh_space_tounref += space; | |
423 | } | |
424 | } | |
425 | ||
426 | void | |
427 | dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) | |
428 | { | |
429 | dmu_tx_hold_t *txh; | |
430 | ||
431 | ASSERT(tx->tx_txg == 0); | |
ded576e2 | 432 | ASSERT(len <= DMU_MAX_ACCESS); |
34dc7c2f BB |
433 | ASSERT(len == 0 || UINT64_MAX - off >= len - 1); |
434 | ||
435 | txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, | |
436 | object, THT_WRITE, off, len); | |
437 | if (txh == NULL) | |
438 | return; | |
439 | ||
440 | dmu_tx_count_write(txh, off, len); | |
441 | dmu_tx_count_dnode(txh); | |
442 | } | |
443 | ||
444 | static void | |
445 | dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) | |
446 | { | |
b128c09f BB |
447 | uint64_t blkid, nblks, lastblk; |
448 | uint64_t space = 0, unref = 0, skipped = 0; | |
34dc7c2f BB |
449 | dnode_t *dn = txh->txh_dnode; |
450 | dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; | |
451 | spa_t *spa = txh->txh_tx->tx_pool->dp_spa; | |
b128c09f | 452 | int epbs; |
ff80d9b1 | 453 | uint64_t l0span = 0, nl1blks = 0; |
34dc7c2f | 454 | |
b128c09f | 455 | if (dn->dn_nlevels == 0) |
34dc7c2f BB |
456 | return; |
457 | ||
458 | /* | |
b128c09f | 459 | * The struct_rwlock protects us against dn_nlevels |
34dc7c2f BB |
460 | * changing, in case (against all odds) we manage to dirty & |
461 | * sync out the changes after we check for being dirty. | |
428870ff | 462 | * Also, dbuf_hold_impl() wants us to have the struct_rwlock. |
34dc7c2f BB |
463 | */ |
464 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
b128c09f BB |
465 | epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; |
466 | if (dn->dn_maxblkid == 0) { | |
34dc7c2f BB |
467 | if (off == 0 && len >= dn->dn_datablksz) { |
468 | blkid = 0; | |
469 | nblks = 1; | |
470 | } else { | |
471 | rw_exit(&dn->dn_struct_rwlock); | |
472 | return; | |
473 | } | |
474 | } else { | |
475 | blkid = off >> dn->dn_datablkshift; | |
b128c09f | 476 | nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; |
34dc7c2f | 477 | |
383fc4a9 | 478 | if (blkid > dn->dn_maxblkid) { |
34dc7c2f BB |
479 | rw_exit(&dn->dn_struct_rwlock); |
480 | return; | |
481 | } | |
b128c09f | 482 | if (blkid + nblks > dn->dn_maxblkid) |
383fc4a9 | 483 | nblks = dn->dn_maxblkid - blkid + 1; |
34dc7c2f | 484 | |
34dc7c2f | 485 | } |
ff80d9b1 | 486 | l0span = nblks; /* save for later use to calc level > 1 overhead */ |
b128c09f | 487 | if (dn->dn_nlevels == 1) { |
34dc7c2f BB |
488 | int i; |
489 | for (i = 0; i < nblks; i++) { | |
490 | blkptr_t *bp = dn->dn_phys->dn_blkptr; | |
b128c09f | 491 | ASSERT3U(blkid + i, <, dn->dn_nblkptr); |
34dc7c2f | 492 | bp += blkid + i; |
428870ff | 493 | if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { |
34dc7c2f | 494 | dprintf_bp(bp, "can free old%s", ""); |
428870ff | 495 | space += bp_get_dsize(spa, bp); |
34dc7c2f BB |
496 | } |
497 | unref += BP_GET_ASIZE(bp); | |
498 | } | |
ff80d9b1 | 499 | nl1blks = 1; |
34dc7c2f BB |
500 | nblks = 0; |
501 | } | |
502 | ||
b128c09f | 503 | lastblk = blkid + nblks - 1; |
34dc7c2f BB |
504 | while (nblks) { |
505 | dmu_buf_impl_t *dbuf; | |
b128c09f BB |
506 | uint64_t ibyte, new_blkid; |
507 | int epb = 1 << epbs; | |
508 | int err, i, blkoff, tochk; | |
509 | blkptr_t *bp; | |
510 | ||
511 | ibyte = blkid << dn->dn_datablkshift; | |
512 | err = dnode_next_offset(dn, | |
513 | DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); | |
514 | new_blkid = ibyte >> dn->dn_datablkshift; | |
515 | if (err == ESRCH) { | |
516 | skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; | |
517 | break; | |
518 | } | |
519 | if (err) { | |
520 | txh->txh_tx->tx_err = err; | |
521 | break; | |
522 | } | |
523 | if (new_blkid > lastblk) { | |
524 | skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; | |
525 | break; | |
526 | } | |
34dc7c2f | 527 | |
b128c09f BB |
528 | if (new_blkid > blkid) { |
529 | ASSERT((new_blkid >> epbs) > (blkid >> epbs)); | |
530 | skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; | |
531 | nblks -= new_blkid - blkid; | |
532 | blkid = new_blkid; | |
533 | } | |
534 | blkoff = P2PHASE(blkid, epb); | |
535 | tochk = MIN(epb - blkoff, nblks); | |
34dc7c2f | 536 | |
fcff0f35 PD |
537 | err = dbuf_hold_impl(dn, 1, blkid >> epbs, |
538 | FALSE, FALSE, FTAG, &dbuf); | |
428870ff BB |
539 | if (err) { |
540 | txh->txh_tx->tx_err = err; | |
b128c09f | 541 | break; |
34dc7c2f | 542 | } |
428870ff BB |
543 | |
544 | txh->txh_memory_tohold += dbuf->db.db_size; | |
545 | ||
546 | /* | |
547 | * We don't check memory_tohold against DMU_MAX_ACCESS because | |
548 | * memory_tohold is an over-estimation (especially the >L1 | |
549 | * indirect blocks), so it could fail. Callers should have | |
550 | * already verified that they will not be holding too much | |
551 | * memory. | |
552 | */ | |
553 | ||
b128c09f BB |
554 | err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); |
555 | if (err != 0) { | |
34dc7c2f | 556 | txh->txh_tx->tx_err = err; |
b128c09f | 557 | dbuf_rele(dbuf, FTAG); |
34dc7c2f BB |
558 | break; |
559 | } | |
560 | ||
b128c09f BB |
561 | bp = dbuf->db.db_data; |
562 | bp += blkoff; | |
563 | ||
564 | for (i = 0; i < tochk; i++) { | |
428870ff BB |
565 | if (dsl_dataset_block_freeable(ds, &bp[i], |
566 | bp[i].blk_birth)) { | |
b128c09f | 567 | dprintf_bp(&bp[i], "can free old%s", ""); |
428870ff | 568 | space += bp_get_dsize(spa, &bp[i]); |
b128c09f BB |
569 | } |
570 | unref += BP_GET_ASIZE(bp); | |
571 | } | |
572 | dbuf_rele(dbuf, FTAG); | |
573 | ||
ff80d9b1 | 574 | ++nl1blks; |
34dc7c2f BB |
575 | blkid += tochk; |
576 | nblks -= tochk; | |
577 | } | |
578 | rw_exit(&dn->dn_struct_rwlock); | |
579 | ||
ff80d9b1 AJ |
580 | /* |
581 | * Add in memory requirements of higher-level indirects. | |
582 | * This assumes a worst-possible scenario for dn_nlevels and a | |
583 | * worst-possible distribution of l1-blocks over the region to free. | |
584 | */ | |
585 | { | |
586 | uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); | |
587 | int level = 2; | |
588 | /* | |
589 | * Here we don't use DN_MAX_LEVEL, but calculate it with the | |
590 | * given datablkshift and indblkshift. This makes the | |
591 | * difference between 19 and 8 on large files. | |
592 | */ | |
593 | int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / | |
594 | (dn->dn_indblkshift - SPA_BLKPTRSHIFT); | |
595 | ||
596 | while (level++ < maxlevel) { | |
b077fd4c | 597 | txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) |
ff80d9b1 AJ |
598 | << dn->dn_indblkshift; |
599 | blkcnt = 1 + (blkcnt >> epbs); | |
600 | } | |
601 | } | |
602 | ||
b128c09f BB |
603 | /* account for new level 1 indirect blocks that might show up */ |
604 | if (skipped > 0) { | |
605 | txh->txh_fudge += skipped << dn->dn_indblkshift; | |
606 | skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); | |
607 | txh->txh_memory_tohold += skipped << dn->dn_indblkshift; | |
608 | } | |
34dc7c2f BB |
609 | txh->txh_space_tofree += space; |
610 | txh->txh_space_tounref += unref; | |
611 | } | |
612 | ||
19d55079 MA |
613 | /* |
614 | * This function marks the transaction as being a "net free". The end | |
615 | * result is that refquotas will be disabled for this transaction, and | |
616 | * this transaction will be able to use half of the pool space overhead | |
617 | * (see dsl_pool_adjustedsize()). Therefore this function should only | |
618 | * be called for transactions that we expect will not cause a net increase | |
619 | * in the amount of space used (but it's OK if that is occasionally not true). | |
620 | */ | |
621 | void | |
622 | dmu_tx_mark_netfree(dmu_tx_t *tx) | |
623 | { | |
624 | dmu_tx_hold_t *txh; | |
625 | ||
626 | txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, | |
627 | DMU_NEW_OBJECT, THT_FREE, 0, 0); | |
628 | ||
629 | /* | |
630 | * Pretend that this operation will free 1GB of space. This | |
631 | * should be large enough to cancel out the largest write. | |
632 | * We don't want to use something like UINT64_MAX, because that would | |
633 | * cause overflows when doing math with these values (e.g. in | |
634 | * dmu_tx_try_assign()). | |
635 | */ | |
636 | txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024; | |
637 | } | |
638 | ||
34dc7c2f BB |
639 | void |
640 | dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) | |
641 | { | |
642 | dmu_tx_hold_t *txh; | |
643 | dnode_t *dn; | |
ea97f8ce | 644 | int err; |
34dc7c2f BB |
645 | zio_t *zio; |
646 | ||
647 | ASSERT(tx->tx_txg == 0); | |
648 | ||
649 | txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, | |
650 | object, THT_FREE, off, len); | |
651 | if (txh == NULL) | |
652 | return; | |
653 | dn = txh->txh_dnode; | |
e8b96c60 | 654 | dmu_tx_count_dnode(txh); |
34dc7c2f | 655 | |
34dc7c2f BB |
656 | if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) |
657 | return; | |
658 | if (len == DMU_OBJECT_END) | |
659 | len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; | |
660 | ||
ea97f8ce MA |
661 | dmu_tx_count_dnode(txh); |
662 | ||
34dc7c2f | 663 | /* |
ea97f8ce MA |
664 | * For i/o error checking, we read the first and last level-0 |
665 | * blocks if they are not aligned, and all the level-1 blocks. | |
666 | * | |
667 | * Note: dbuf_free_range() assumes that we have not instantiated | |
668 | * any level-0 dbufs that will be completely freed. Therefore we must | |
669 | * exercise care to not read or count the first and last blocks | |
670 | * if they are blocksize-aligned. | |
671 | */ | |
672 | if (dn->dn_datablkshift == 0) { | |
b663a23d | 673 | if (off != 0 || len < dn->dn_datablksz) |
92bc214c | 674 | dmu_tx_count_write(txh, 0, dn->dn_datablksz); |
ea97f8ce MA |
675 | } else { |
676 | /* first block will be modified if it is not aligned */ | |
677 | if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) | |
678 | dmu_tx_count_write(txh, off, 1); | |
679 | /* last block will be modified if it is not aligned */ | |
680 | if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) | |
681 | dmu_tx_count_write(txh, off+len, 1); | |
682 | } | |
683 | ||
684 | /* | |
685 | * Check level-1 blocks. | |
34dc7c2f BB |
686 | */ |
687 | if (dn->dn_nlevels > 1) { | |
ea97f8ce | 688 | int shift = dn->dn_datablkshift + dn->dn_indblkshift - |
34dc7c2f | 689 | SPA_BLKPTRSHIFT; |
ea97f8ce MA |
690 | uint64_t start = off >> shift; |
691 | uint64_t end = (off + len) >> shift; | |
692 | uint64_t i; | |
693 | ||
ea97f8ce | 694 | ASSERT(dn->dn_indblkshift != 0); |
34dc7c2f | 695 | |
2e7b7657 MA |
696 | /* |
697 | * dnode_reallocate() can result in an object with indirect | |
698 | * blocks having an odd data block size. In this case, | |
699 | * just check the single block. | |
700 | */ | |
701 | if (dn->dn_datablkshift == 0) | |
702 | start = end = 0; | |
703 | ||
34dc7c2f BB |
704 | zio = zio_root(tx->tx_pool->dp_spa, |
705 | NULL, NULL, ZIO_FLAG_CANFAIL); | |
706 | for (i = start; i <= end; i++) { | |
707 | uint64_t ibyte = i << shift; | |
b128c09f | 708 | err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); |
34dc7c2f | 709 | i = ibyte >> shift; |
4bda3bd0 | 710 | if (err == ESRCH || i > end) |
34dc7c2f BB |
711 | break; |
712 | if (err) { | |
713 | tx->tx_err = err; | |
714 | return; | |
715 | } | |
716 | ||
717 | err = dmu_tx_check_ioerr(zio, dn, 1, i); | |
718 | if (err) { | |
719 | tx->tx_err = err; | |
720 | return; | |
721 | } | |
722 | } | |
723 | err = zio_wait(zio); | |
724 | if (err) { | |
725 | tx->tx_err = err; | |
726 | return; | |
727 | } | |
728 | } | |
729 | ||
34dc7c2f BB |
730 | dmu_tx_count_free(txh, off, len); |
731 | } | |
732 | ||
733 | void | |
9babb374 | 734 | dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) |
34dc7c2f BB |
735 | { |
736 | dmu_tx_hold_t *txh; | |
737 | dnode_t *dn; | |
d683ddbb | 738 | dsl_dataset_phys_t *ds_phys; |
34dc7c2f BB |
739 | uint64_t nblocks; |
740 | int epbs, err; | |
741 | ||
742 | ASSERT(tx->tx_txg == 0); | |
743 | ||
744 | txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, | |
745 | object, THT_ZAP, add, (uintptr_t)name); | |
746 | if (txh == NULL) | |
747 | return; | |
748 | dn = txh->txh_dnode; | |
749 | ||
750 | dmu_tx_count_dnode(txh); | |
751 | ||
752 | if (dn == NULL) { | |
753 | /* | |
754 | * We will be able to fit a new object's entries into one leaf | |
755 | * block. So there will be at most 2 blocks total, | |
756 | * including the header block. | |
757 | */ | |
f4bae2ed | 758 | dmu_tx_count_write(txh, 0, 2ULL << fzap_default_block_shift); |
34dc7c2f BB |
759 | return; |
760 | } | |
761 | ||
9ae529ec | 762 | ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); |
34dc7c2f BB |
763 | |
764 | if (dn->dn_maxblkid == 0 && !add) { | |
22cd4a46 AL |
765 | blkptr_t *bp; |
766 | ||
34dc7c2f BB |
767 | /* |
768 | * If there is only one block (i.e. this is a micro-zap) | |
769 | * and we are not adding anything, the accounting is simple. | |
770 | */ | |
771 | err = dmu_tx_check_ioerr(NULL, dn, 0, 0); | |
772 | if (err) { | |
773 | tx->tx_err = err; | |
774 | return; | |
775 | } | |
776 | ||
777 | /* | |
778 | * Use max block size here, since we don't know how much | |
779 | * the size will change between now and the dbuf dirty call. | |
780 | */ | |
22cd4a46 | 781 | bp = &dn->dn_phys->dn_blkptr[0]; |
34dc7c2f | 782 | if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, |
22cd4a46 | 783 | bp, bp->blk_birth)) |
f1512ee6 | 784 | txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ; |
22cd4a46 | 785 | else |
f1512ee6 | 786 | txh->txh_space_towrite += MZAP_MAX_BLKSZ; |
22cd4a46 | 787 | if (!BP_IS_HOLE(bp)) |
f1512ee6 | 788 | txh->txh_space_tounref += MZAP_MAX_BLKSZ; |
34dc7c2f BB |
789 | return; |
790 | } | |
791 | ||
792 | if (dn->dn_maxblkid > 0 && name) { | |
793 | /* | |
794 | * access the name in this fat-zap so that we'll check | |
795 | * for i/o errors to the leaf blocks, etc. | |
796 | */ | |
2bce8049 | 797 | err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); |
34dc7c2f BB |
798 | if (err == EIO) { |
799 | tx->tx_err = err; | |
800 | return; | |
801 | } | |
802 | } | |
803 | ||
2bce8049 | 804 | err = zap_count_write_by_dnode(dn, name, add, |
45d1cae3 | 805 | &txh->txh_space_towrite, &txh->txh_space_tooverwrite); |
34dc7c2f BB |
806 | |
807 | /* | |
808 | * If the modified blocks are scattered to the four winds, | |
809 | * we'll have to modify an indirect twig for each. | |
810 | */ | |
811 | epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
d683ddbb | 812 | ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset); |
34dc7c2f | 813 | for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) |
d683ddbb | 814 | if (ds_phys->ds_prev_snap_obj) |
9babb374 BB |
815 | txh->txh_space_towrite += 3 << dn->dn_indblkshift; |
816 | else | |
817 | txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; | |
34dc7c2f BB |
818 | } |
819 | ||
820 | void | |
821 | dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) | |
822 | { | |
823 | dmu_tx_hold_t *txh; | |
824 | ||
825 | ASSERT(tx->tx_txg == 0); | |
826 | ||
827 | txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, | |
828 | object, THT_BONUS, 0, 0); | |
829 | if (txh) | |
830 | dmu_tx_count_dnode(txh); | |
831 | } | |
832 | ||
833 | void | |
834 | dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) | |
835 | { | |
836 | dmu_tx_hold_t *txh; | |
7d637211 | 837 | |
34dc7c2f BB |
838 | ASSERT(tx->tx_txg == 0); |
839 | ||
840 | txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, | |
841 | DMU_NEW_OBJECT, THT_SPACE, space, 0); | |
7d637211 NC |
842 | if (txh) |
843 | txh->txh_space_towrite += space; | |
34dc7c2f BB |
844 | } |
845 | ||
846 | int | |
847 | dmu_tx_holds(dmu_tx_t *tx, uint64_t object) | |
848 | { | |
849 | dmu_tx_hold_t *txh; | |
850 | int holds = 0; | |
851 | ||
852 | /* | |
853 | * By asserting that the tx is assigned, we're counting the | |
854 | * number of dn_tx_holds, which is the same as the number of | |
855 | * dn_holds. Otherwise, we'd be counting dn_holds, but | |
856 | * dn_tx_holds could be 0. | |
857 | */ | |
858 | ASSERT(tx->tx_txg != 0); | |
859 | ||
860 | /* if (tx->tx_anyobj == TRUE) */ | |
861 | /* return (0); */ | |
862 | ||
863 | for (txh = list_head(&tx->tx_holds); txh; | |
864 | txh = list_next(&tx->tx_holds, txh)) { | |
865 | if (txh->txh_dnode && txh->txh_dnode->dn_object == object) | |
866 | holds++; | |
867 | } | |
868 | ||
869 | return (holds); | |
870 | } | |
871 | ||
1c5de20a | 872 | #ifdef DEBUG_DMU_TX |
34dc7c2f BB |
873 | void |
874 | dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) | |
875 | { | |
876 | dmu_tx_hold_t *txh; | |
877 | int match_object = FALSE, match_offset = FALSE; | |
572e2857 | 878 | dnode_t *dn; |
34dc7c2f | 879 | |
572e2857 BB |
880 | DB_DNODE_ENTER(db); |
881 | dn = DB_DNODE(db); | |
99ea23c5 | 882 | ASSERT(dn != NULL); |
34dc7c2f | 883 | ASSERT(tx->tx_txg != 0); |
428870ff | 884 | ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); |
34dc7c2f BB |
885 | ASSERT3U(dn->dn_object, ==, db->db.db_object); |
886 | ||
572e2857 BB |
887 | if (tx->tx_anyobj) { |
888 | DB_DNODE_EXIT(db); | |
34dc7c2f | 889 | return; |
572e2857 | 890 | } |
34dc7c2f BB |
891 | |
892 | /* XXX No checking on the meta dnode for now */ | |
572e2857 BB |
893 | if (db->db.db_object == DMU_META_DNODE_OBJECT) { |
894 | DB_DNODE_EXIT(db); | |
34dc7c2f | 895 | return; |
572e2857 | 896 | } |
34dc7c2f BB |
897 | |
898 | for (txh = list_head(&tx->tx_holds); txh; | |
899 | txh = list_next(&tx->tx_holds, txh)) { | |
99ea23c5 | 900 | ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); |
34dc7c2f BB |
901 | if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) |
902 | match_object = TRUE; | |
903 | if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { | |
904 | int datablkshift = dn->dn_datablkshift ? | |
905 | dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; | |
906 | int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
907 | int shift = datablkshift + epbs * db->db_level; | |
908 | uint64_t beginblk = shift >= 64 ? 0 : | |
909 | (txh->txh_arg1 >> shift); | |
910 | uint64_t endblk = shift >= 64 ? 0 : | |
911 | ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); | |
912 | uint64_t blkid = db->db_blkid; | |
913 | ||
914 | /* XXX txh_arg2 better not be zero... */ | |
915 | ||
916 | dprintf("found txh type %x beginblk=%llx endblk=%llx\n", | |
917 | txh->txh_type, beginblk, endblk); | |
918 | ||
919 | switch (txh->txh_type) { | |
920 | case THT_WRITE: | |
921 | if (blkid >= beginblk && blkid <= endblk) | |
922 | match_offset = TRUE; | |
923 | /* | |
924 | * We will let this hold work for the bonus | |
428870ff BB |
925 | * or spill buffer so that we don't need to |
926 | * hold it when creating a new object. | |
34dc7c2f | 927 | */ |
428870ff BB |
928 | if (blkid == DMU_BONUS_BLKID || |
929 | blkid == DMU_SPILL_BLKID) | |
34dc7c2f BB |
930 | match_offset = TRUE; |
931 | /* | |
932 | * They might have to increase nlevels, | |
933 | * thus dirtying the new TLIBs. Or the | |
934 | * might have to change the block size, | |
935 | * thus dirying the new lvl=0 blk=0. | |
936 | */ | |
937 | if (blkid == 0) | |
938 | match_offset = TRUE; | |
939 | break; | |
940 | case THT_FREE: | |
b128c09f BB |
941 | /* |
942 | * We will dirty all the level 1 blocks in | |
943 | * the free range and perhaps the first and | |
944 | * last level 0 block. | |
945 | */ | |
946 | if (blkid >= beginblk && (blkid <= endblk || | |
947 | txh->txh_arg2 == DMU_OBJECT_END)) | |
34dc7c2f BB |
948 | match_offset = TRUE; |
949 | break; | |
428870ff BB |
950 | case THT_SPILL: |
951 | if (blkid == DMU_SPILL_BLKID) | |
952 | match_offset = TRUE; | |
953 | break; | |
34dc7c2f | 954 | case THT_BONUS: |
428870ff | 955 | if (blkid == DMU_BONUS_BLKID) |
34dc7c2f BB |
956 | match_offset = TRUE; |
957 | break; | |
958 | case THT_ZAP: | |
959 | match_offset = TRUE; | |
960 | break; | |
961 | case THT_NEWOBJECT: | |
962 | match_object = TRUE; | |
963 | break; | |
964 | default: | |
989fd514 BB |
965 | cmn_err(CE_PANIC, "bad txh_type %d", |
966 | txh->txh_type); | |
34dc7c2f BB |
967 | } |
968 | } | |
572e2857 BB |
969 | if (match_object && match_offset) { |
970 | DB_DNODE_EXIT(db); | |
34dc7c2f | 971 | return; |
572e2857 | 972 | } |
34dc7c2f | 973 | } |
572e2857 | 974 | DB_DNODE_EXIT(db); |
34dc7c2f BB |
975 | panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", |
976 | (u_longlong_t)db->db.db_object, db->db_level, | |
977 | (u_longlong_t)db->db_blkid); | |
978 | } | |
979 | #endif | |
980 | ||
e8b96c60 MA |
981 | /* |
982 | * If we can't do 10 iops, something is wrong. Let us go ahead | |
983 | * and hit zfs_dirty_data_max. | |
984 | */ | |
985 | hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */ | |
986 | int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ | |
987 | ||
988 | /* | |
989 | * We delay transactions when we've determined that the backend storage | |
990 | * isn't able to accommodate the rate of incoming writes. | |
991 | * | |
992 | * If there is already a transaction waiting, we delay relative to when | |
993 | * that transaction finishes waiting. This way the calculated min_time | |
994 | * is independent of the number of threads concurrently executing | |
995 | * transactions. | |
996 | * | |
997 | * If we are the only waiter, wait relative to when the transaction | |
998 | * started, rather than the current time. This credits the transaction for | |
999 | * "time already served", e.g. reading indirect blocks. | |
1000 | * | |
1001 | * The minimum time for a transaction to take is calculated as: | |
1002 | * min_time = scale * (dirty - min) / (max - dirty) | |
1003 | * min_time is then capped at zfs_delay_max_ns. | |
1004 | * | |
1005 | * The delay has two degrees of freedom that can be adjusted via tunables. | |
1006 | * The percentage of dirty data at which we start to delay is defined by | |
1007 | * zfs_delay_min_dirty_percent. This should typically be at or above | |
1008 | * zfs_vdev_async_write_active_max_dirty_percent so that we only start to | |
1009 | * delay after writing at full speed has failed to keep up with the incoming | |
1010 | * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly | |
1011 | * speaking, this variable determines the amount of delay at the midpoint of | |
1012 | * the curve. | |
1013 | * | |
1014 | * delay | |
1015 | * 10ms +-------------------------------------------------------------*+ | |
1016 | * | *| | |
1017 | * 9ms + *+ | |
1018 | * | *| | |
1019 | * 8ms + *+ | |
1020 | * | * | | |
1021 | * 7ms + * + | |
1022 | * | * | | |
1023 | * 6ms + * + | |
1024 | * | * | | |
1025 | * 5ms + * + | |
1026 | * | * | | |
1027 | * 4ms + * + | |
1028 | * | * | | |
1029 | * 3ms + * + | |
1030 | * | * | | |
1031 | * 2ms + (midpoint) * + | |
1032 | * | | ** | | |
1033 | * 1ms + v *** + | |
1034 | * | zfs_delay_scale ----------> ******** | | |
1035 | * 0 +-------------------------------------*********----------------+ | |
1036 | * 0% <- zfs_dirty_data_max -> 100% | |
1037 | * | |
1038 | * Note that since the delay is added to the outstanding time remaining on the | |
1039 | * most recent transaction, the delay is effectively the inverse of IOPS. | |
1040 | * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve | |
1041 | * was chosen such that small changes in the amount of accumulated dirty data | |
1042 | * in the first 3/4 of the curve yield relatively small differences in the | |
1043 | * amount of delay. | |
1044 | * | |
1045 | * The effects can be easier to understand when the amount of delay is | |
1046 | * represented on a log scale: | |
1047 | * | |
1048 | * delay | |
1049 | * 100ms +-------------------------------------------------------------++ | |
1050 | * + + | |
1051 | * | | | |
1052 | * + *+ | |
1053 | * 10ms + *+ | |
1054 | * + ** + | |
1055 | * | (midpoint) ** | | |
1056 | * + | ** + | |
1057 | * 1ms + v **** + | |
1058 | * + zfs_delay_scale ----------> ***** + | |
1059 | * | **** | | |
1060 | * + **** + | |
1061 | * 100us + ** + | |
1062 | * + * + | |
1063 | * | * | | |
1064 | * + * + | |
1065 | * 10us + * + | |
1066 | * + + | |
1067 | * | | | |
1068 | * + + | |
1069 | * +--------------------------------------------------------------+ | |
1070 | * 0% <- zfs_dirty_data_max -> 100% | |
1071 | * | |
1072 | * Note here that only as the amount of dirty data approaches its limit does | |
1073 | * the delay start to increase rapidly. The goal of a properly tuned system | |
1074 | * should be to keep the amount of dirty data out of that range by first | |
1075 | * ensuring that the appropriate limits are set for the I/O scheduler to reach | |
1076 | * optimal throughput on the backend storage, and then by changing the value | |
1077 | * of zfs_delay_scale to increase the steepness of the curve. | |
1078 | */ | |
1079 | static void | |
1080 | dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) | |
1081 | { | |
1082 | dsl_pool_t *dp = tx->tx_pool; | |
1083 | uint64_t delay_min_bytes = | |
1084 | zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; | |
1085 | hrtime_t wakeup, min_tx_time, now; | |
1086 | ||
1087 | if (dirty <= delay_min_bytes) | |
1088 | return; | |
1089 | ||
1090 | /* | |
1091 | * The caller has already waited until we are under the max. | |
1092 | * We make them pass us the amount of dirty data so we don't | |
1093 | * have to handle the case of it being >= the max, which could | |
1094 | * cause a divide-by-zero if it's == the max. | |
1095 | */ | |
1096 | ASSERT3U(dirty, <, zfs_dirty_data_max); | |
1097 | ||
1098 | now = gethrtime(); | |
1099 | min_tx_time = zfs_delay_scale * | |
1100 | (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); | |
1101 | min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); | |
1102 | if (now > tx->tx_start + min_tx_time) | |
1103 | return; | |
1104 | ||
1105 | DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, | |
1106 | uint64_t, min_tx_time); | |
1107 | ||
1108 | mutex_enter(&dp->dp_lock); | |
1109 | wakeup = MAX(tx->tx_start + min_tx_time, | |
1110 | dp->dp_last_wakeup + min_tx_time); | |
1111 | dp->dp_last_wakeup = wakeup; | |
1112 | mutex_exit(&dp->dp_lock); | |
1113 | ||
1114 | zfs_sleep_until(wakeup); | |
1115 | } | |
1116 | ||
34dc7c2f | 1117 | static int |
13fe0198 | 1118 | dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) |
34dc7c2f BB |
1119 | { |
1120 | dmu_tx_hold_t *txh; | |
1121 | spa_t *spa = tx->tx_pool->dp_spa; | |
b128c09f BB |
1122 | uint64_t memory, asize, fsize, usize; |
1123 | uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; | |
34dc7c2f | 1124 | |
c99c9001 | 1125 | ASSERT0(tx->tx_txg); |
34dc7c2f | 1126 | |
570827e1 BB |
1127 | if (tx->tx_err) { |
1128 | DMU_TX_STAT_BUMP(dmu_tx_error); | |
34dc7c2f | 1129 | return (tx->tx_err); |
570827e1 | 1130 | } |
34dc7c2f | 1131 | |
b128c09f | 1132 | if (spa_suspended(spa)) { |
570827e1 BB |
1133 | DMU_TX_STAT_BUMP(dmu_tx_suspended); |
1134 | ||
34dc7c2f BB |
1135 | /* |
1136 | * If the user has indicated a blocking failure mode | |
1137 | * then return ERESTART which will block in dmu_tx_wait(). | |
1138 | * Otherwise, return EIO so that an error can get | |
1139 | * propagated back to the VOP calls. | |
1140 | * | |
1141 | * Note that we always honor the txg_how flag regardless | |
1142 | * of the failuremode setting. | |
1143 | */ | |
1144 | if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && | |
1145 | txg_how != TXG_WAIT) | |
2e528b49 | 1146 | return (SET_ERROR(EIO)); |
34dc7c2f | 1147 | |
2e528b49 | 1148 | return (SET_ERROR(ERESTART)); |
34dc7c2f BB |
1149 | } |
1150 | ||
e8b96c60 MA |
1151 | if (!tx->tx_waited && |
1152 | dsl_pool_need_dirty_delay(tx->tx_pool)) { | |
1153 | tx->tx_wait_dirty = B_TRUE; | |
1154 | DMU_TX_STAT_BUMP(dmu_tx_dirty_delay); | |
1155 | return (ERESTART); | |
1156 | } | |
1157 | ||
34dc7c2f BB |
1158 | tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); |
1159 | tx->tx_needassign_txh = NULL; | |
1160 | ||
1161 | /* | |
1162 | * NB: No error returns are allowed after txg_hold_open, but | |
1163 | * before processing the dnode holds, due to the | |
1164 | * dmu_tx_unassign() logic. | |
1165 | */ | |
1166 | ||
b128c09f | 1167 | towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; |
34dc7c2f BB |
1168 | for (txh = list_head(&tx->tx_holds); txh; |
1169 | txh = list_next(&tx->tx_holds, txh)) { | |
1170 | dnode_t *dn = txh->txh_dnode; | |
1171 | if (dn != NULL) { | |
1172 | mutex_enter(&dn->dn_mtx); | |
1173 | if (dn->dn_assigned_txg == tx->tx_txg - 1) { | |
1174 | mutex_exit(&dn->dn_mtx); | |
1175 | tx->tx_needassign_txh = txh; | |
570827e1 | 1176 | DMU_TX_STAT_BUMP(dmu_tx_group); |
2e528b49 | 1177 | return (SET_ERROR(ERESTART)); |
34dc7c2f BB |
1178 | } |
1179 | if (dn->dn_assigned_txg == 0) | |
1180 | dn->dn_assigned_txg = tx->tx_txg; | |
1181 | ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); | |
1182 | (void) refcount_add(&dn->dn_tx_holds, tx); | |
1183 | mutex_exit(&dn->dn_mtx); | |
1184 | } | |
1185 | towrite += txh->txh_space_towrite; | |
1186 | tofree += txh->txh_space_tofree; | |
1187 | tooverwrite += txh->txh_space_tooverwrite; | |
1188 | tounref += txh->txh_space_tounref; | |
b128c09f BB |
1189 | tohold += txh->txh_memory_tohold; |
1190 | fudge += txh->txh_fudge; | |
34dc7c2f BB |
1191 | } |
1192 | ||
34dc7c2f BB |
1193 | /* |
1194 | * If a snapshot has been taken since we made our estimates, | |
1195 | * assume that we won't be able to free or overwrite anything. | |
1196 | */ | |
1197 | if (tx->tx_objset && | |
428870ff | 1198 | dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > |
34dc7c2f BB |
1199 | tx->tx_lastsnap_txg) { |
1200 | towrite += tooverwrite; | |
1201 | tooverwrite = tofree = 0; | |
1202 | } | |
1203 | ||
b128c09f BB |
1204 | /* needed allocation: worst-case estimate of write space */ |
1205 | asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); | |
1206 | /* freed space estimate: worst-case overwrite + free estimate */ | |
34dc7c2f | 1207 | fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; |
b128c09f | 1208 | /* convert unrefd space to worst-case estimate */ |
34dc7c2f | 1209 | usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); |
b128c09f BB |
1210 | /* calculate memory footprint estimate */ |
1211 | memory = towrite + tooverwrite + tohold; | |
34dc7c2f | 1212 | |
1c5de20a | 1213 | #ifdef DEBUG_DMU_TX |
b128c09f BB |
1214 | /* |
1215 | * Add in 'tohold' to account for our dirty holds on this memory | |
1216 | * XXX - the "fudge" factor is to account for skipped blocks that | |
1217 | * we missed because dnode_next_offset() misses in-core-only blocks. | |
1218 | */ | |
1219 | tx->tx_space_towrite = asize + | |
1220 | spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); | |
34dc7c2f BB |
1221 | tx->tx_space_tofree = tofree; |
1222 | tx->tx_space_tooverwrite = tooverwrite; | |
1223 | tx->tx_space_tounref = tounref; | |
1224 | #endif | |
1225 | ||
1226 | if (tx->tx_dir && asize != 0) { | |
b128c09f BB |
1227 | int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, |
1228 | asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); | |
34dc7c2f BB |
1229 | if (err) |
1230 | return (err); | |
1231 | } | |
1232 | ||
570827e1 BB |
1233 | DMU_TX_STAT_BUMP(dmu_tx_assigned); |
1234 | ||
34dc7c2f BB |
1235 | return (0); |
1236 | } | |
1237 | ||
1238 | static void | |
1239 | dmu_tx_unassign(dmu_tx_t *tx) | |
1240 | { | |
1241 | dmu_tx_hold_t *txh; | |
1242 | ||
1243 | if (tx->tx_txg == 0) | |
1244 | return; | |
1245 | ||
1246 | txg_rele_to_quiesce(&tx->tx_txgh); | |
1247 | ||
e49f1e20 WA |
1248 | /* |
1249 | * Walk the transaction's hold list, removing the hold on the | |
1250 | * associated dnode, and notifying waiters if the refcount drops to 0. | |
1251 | */ | |
981b2126 | 1252 | for (txh = list_head(&tx->tx_holds); |
1253 | txh && txh != tx->tx_needassign_txh; | |
34dc7c2f BB |
1254 | txh = list_next(&tx->tx_holds, txh)) { |
1255 | dnode_t *dn = txh->txh_dnode; | |
1256 | ||
1257 | if (dn == NULL) | |
1258 | continue; | |
1259 | mutex_enter(&dn->dn_mtx); | |
1260 | ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); | |
1261 | ||
1262 | if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { | |
1263 | dn->dn_assigned_txg = 0; | |
1264 | cv_broadcast(&dn->dn_notxholds); | |
1265 | } | |
1266 | mutex_exit(&dn->dn_mtx); | |
1267 | } | |
1268 | ||
1269 | txg_rele_to_sync(&tx->tx_txgh); | |
1270 | ||
1271 | tx->tx_lasttried_txg = tx->tx_txg; | |
1272 | tx->tx_txg = 0; | |
1273 | } | |
1274 | ||
1275 | /* | |
1276 | * Assign tx to a transaction group. txg_how can be one of: | |
1277 | * | |
1278 | * (1) TXG_WAIT. If the current open txg is full, waits until there's | |
1279 | * a new one. This should be used when you're not holding locks. | |
13fe0198 | 1280 | * It will only fail if we're truly out of space (or over quota). |
34dc7c2f BB |
1281 | * |
1282 | * (2) TXG_NOWAIT. If we can't assign into the current open txg without | |
1283 | * blocking, returns immediately with ERESTART. This should be used | |
1284 | * whenever you're holding locks. On an ERESTART error, the caller | |
1285 | * should drop locks, do a dmu_tx_wait(tx), and try again. | |
e8b96c60 MA |
1286 | * |
1287 | * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() | |
1288 | * has already been called on behalf of this operation (though | |
1289 | * most likely on a different tx). | |
34dc7c2f BB |
1290 | */ |
1291 | int | |
13fe0198 | 1292 | dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) |
34dc7c2f BB |
1293 | { |
1294 | int err; | |
1295 | ||
1296 | ASSERT(tx->tx_txg == 0); | |
e8b96c60 MA |
1297 | ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || |
1298 | txg_how == TXG_WAITED); | |
34dc7c2f BB |
1299 | ASSERT(!dsl_pool_sync_context(tx->tx_pool)); |
1300 | ||
e8b96c60 MA |
1301 | if (txg_how == TXG_WAITED) |
1302 | tx->tx_waited = B_TRUE; | |
1303 | ||
13fe0198 MA |
1304 | /* If we might wait, we must not hold the config lock. */ |
1305 | ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); | |
1306 | ||
34dc7c2f BB |
1307 | while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { |
1308 | dmu_tx_unassign(tx); | |
1309 | ||
1310 | if (err != ERESTART || txg_how != TXG_WAIT) | |
1311 | return (err); | |
1312 | ||
1313 | dmu_tx_wait(tx); | |
1314 | } | |
1315 | ||
1316 | txg_rele_to_quiesce(&tx->tx_txgh); | |
1317 | ||
1318 | return (0); | |
1319 | } | |
1320 | ||
1321 | void | |
1322 | dmu_tx_wait(dmu_tx_t *tx) | |
1323 | { | |
1324 | spa_t *spa = tx->tx_pool->dp_spa; | |
e8b96c60 | 1325 | dsl_pool_t *dp = tx->tx_pool; |
a77c4c83 | 1326 | hrtime_t before; |
34dc7c2f BB |
1327 | |
1328 | ASSERT(tx->tx_txg == 0); | |
13fe0198 | 1329 | ASSERT(!dsl_pool_config_held(tx->tx_pool)); |
34dc7c2f | 1330 | |
a77c4c83 NB |
1331 | before = gethrtime(); |
1332 | ||
e8b96c60 MA |
1333 | if (tx->tx_wait_dirty) { |
1334 | uint64_t dirty; | |
1335 | ||
1336 | /* | |
1337 | * dmu_tx_try_assign() has determined that we need to wait | |
1338 | * because we've consumed much or all of the dirty buffer | |
1339 | * space. | |
1340 | */ | |
1341 | mutex_enter(&dp->dp_lock); | |
1342 | if (dp->dp_dirty_total >= zfs_dirty_data_max) | |
1343 | DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max); | |
1344 | while (dp->dp_dirty_total >= zfs_dirty_data_max) | |
1345 | cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); | |
1346 | dirty = dp->dp_dirty_total; | |
1347 | mutex_exit(&dp->dp_lock); | |
1348 | ||
1349 | dmu_tx_delay(tx, dirty); | |
1350 | ||
1351 | tx->tx_wait_dirty = B_FALSE; | |
1352 | ||
1353 | /* | |
1354 | * Note: setting tx_waited only has effect if the caller | |
1355 | * used TX_WAIT. Otherwise they are going to destroy | |
1356 | * this tx and try again. The common case, zfs_write(), | |
1357 | * uses TX_WAIT. | |
1358 | */ | |
1359 | tx->tx_waited = B_TRUE; | |
1360 | } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { | |
1361 | /* | |
1362 | * If the pool is suspended we need to wait until it | |
1363 | * is resumed. Note that it's possible that the pool | |
1364 | * has become active after this thread has tried to | |
1365 | * obtain a tx. If that's the case then tx_lasttried_txg | |
1366 | * would not have been set. | |
1367 | */ | |
1368 | txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); | |
34dc7c2f BB |
1369 | } else if (tx->tx_needassign_txh) { |
1370 | dnode_t *dn = tx->tx_needassign_txh->txh_dnode; | |
1371 | ||
1372 | mutex_enter(&dn->dn_mtx); | |
1373 | while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) | |
1374 | cv_wait(&dn->dn_notxholds, &dn->dn_mtx); | |
1375 | mutex_exit(&dn->dn_mtx); | |
1376 | tx->tx_needassign_txh = NULL; | |
1377 | } else { | |
e8b96c60 MA |
1378 | /* |
1379 | * A dnode is assigned to the quiescing txg. Wait for its | |
1380 | * transaction to complete. | |
1381 | */ | |
34dc7c2f BB |
1382 | txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); |
1383 | } | |
a77c4c83 NB |
1384 | |
1385 | spa_tx_assign_add_nsecs(spa, gethrtime() - before); | |
34dc7c2f BB |
1386 | } |
1387 | ||
1388 | void | |
1389 | dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) | |
1390 | { | |
1c5de20a | 1391 | #ifdef DEBUG_DMU_TX |
34dc7c2f BB |
1392 | if (tx->tx_dir == NULL || delta == 0) |
1393 | return; | |
1394 | ||
1395 | if (delta > 0) { | |
1396 | ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, | |
1397 | tx->tx_space_towrite); | |
1398 | (void) refcount_add_many(&tx->tx_space_written, delta, NULL); | |
1399 | } else { | |
1400 | (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); | |
1401 | } | |
1402 | #endif | |
1403 | } | |
1404 | ||
1405 | void | |
1406 | dmu_tx_commit(dmu_tx_t *tx) | |
1407 | { | |
1408 | dmu_tx_hold_t *txh; | |
1409 | ||
1410 | ASSERT(tx->tx_txg != 0); | |
1411 | ||
e49f1e20 WA |
1412 | /* |
1413 | * Go through the transaction's hold list and remove holds on | |
1414 | * associated dnodes, notifying waiters if no holds remain. | |
1415 | */ | |
c65aa5b2 | 1416 | while ((txh = list_head(&tx->tx_holds))) { |
34dc7c2f BB |
1417 | dnode_t *dn = txh->txh_dnode; |
1418 | ||
1419 | list_remove(&tx->tx_holds, txh); | |
1420 | kmem_free(txh, sizeof (dmu_tx_hold_t)); | |
1421 | if (dn == NULL) | |
1422 | continue; | |
1423 | mutex_enter(&dn->dn_mtx); | |
1424 | ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); | |
1425 | ||
1426 | if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { | |
1427 | dn->dn_assigned_txg = 0; | |
1428 | cv_broadcast(&dn->dn_notxholds); | |
1429 | } | |
1430 | mutex_exit(&dn->dn_mtx); | |
1431 | dnode_rele(dn, tx); | |
1432 | } | |
1433 | ||
1434 | if (tx->tx_tempreserve_cookie) | |
1435 | dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); | |
1436 | ||
428870ff BB |
1437 | if (!list_is_empty(&tx->tx_callbacks)) |
1438 | txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); | |
1439 | ||
34dc7c2f BB |
1440 | if (tx->tx_anyobj == FALSE) |
1441 | txg_rele_to_sync(&tx->tx_txgh); | |
428870ff BB |
1442 | |
1443 | list_destroy(&tx->tx_callbacks); | |
34dc7c2f | 1444 | list_destroy(&tx->tx_holds); |
1c5de20a | 1445 | #ifdef DEBUG_DMU_TX |
34dc7c2f BB |
1446 | dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", |
1447 | tx->tx_space_towrite, refcount_count(&tx->tx_space_written), | |
1448 | tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); | |
1449 | refcount_destroy_many(&tx->tx_space_written, | |
1450 | refcount_count(&tx->tx_space_written)); | |
1451 | refcount_destroy_many(&tx->tx_space_freed, | |
1452 | refcount_count(&tx->tx_space_freed)); | |
1453 | #endif | |
1454 | kmem_free(tx, sizeof (dmu_tx_t)); | |
1455 | } | |
1456 | ||
1457 | void | |
1458 | dmu_tx_abort(dmu_tx_t *tx) | |
1459 | { | |
1460 | dmu_tx_hold_t *txh; | |
1461 | ||
1462 | ASSERT(tx->tx_txg == 0); | |
1463 | ||
c65aa5b2 | 1464 | while ((txh = list_head(&tx->tx_holds))) { |
34dc7c2f BB |
1465 | dnode_t *dn = txh->txh_dnode; |
1466 | ||
1467 | list_remove(&tx->tx_holds, txh); | |
1468 | kmem_free(txh, sizeof (dmu_tx_hold_t)); | |
1469 | if (dn != NULL) | |
1470 | dnode_rele(dn, tx); | |
1471 | } | |
428870ff BB |
1472 | |
1473 | /* | |
1474 | * Call any registered callbacks with an error code. | |
1475 | */ | |
1476 | if (!list_is_empty(&tx->tx_callbacks)) | |
1477 | dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); | |
1478 | ||
1479 | list_destroy(&tx->tx_callbacks); | |
34dc7c2f | 1480 | list_destroy(&tx->tx_holds); |
1c5de20a | 1481 | #ifdef DEBUG_DMU_TX |
34dc7c2f BB |
1482 | refcount_destroy_many(&tx->tx_space_written, |
1483 | refcount_count(&tx->tx_space_written)); | |
1484 | refcount_destroy_many(&tx->tx_space_freed, | |
1485 | refcount_count(&tx->tx_space_freed)); | |
1486 | #endif | |
1487 | kmem_free(tx, sizeof (dmu_tx_t)); | |
1488 | } | |
1489 | ||
1490 | uint64_t | |
1491 | dmu_tx_get_txg(dmu_tx_t *tx) | |
1492 | { | |
1493 | ASSERT(tx->tx_txg != 0); | |
1494 | return (tx->tx_txg); | |
1495 | } | |
428870ff | 1496 | |
13fe0198 MA |
1497 | dsl_pool_t * |
1498 | dmu_tx_pool(dmu_tx_t *tx) | |
1499 | { | |
1500 | ASSERT(tx->tx_pool != NULL); | |
1501 | return (tx->tx_pool); | |
1502 | } | |
1503 | ||
428870ff BB |
1504 | void |
1505 | dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) | |
1506 | { | |
1507 | dmu_tx_callback_t *dcb; | |
1508 | ||
79c76d5b | 1509 | dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); |
428870ff BB |
1510 | |
1511 | dcb->dcb_func = func; | |
1512 | dcb->dcb_data = data; | |
1513 | ||
1514 | list_insert_tail(&tx->tx_callbacks, dcb); | |
1515 | } | |
1516 | ||
1517 | /* | |
1518 | * Call all the commit callbacks on a list, with a given error code. | |
1519 | */ | |
1520 | void | |
1521 | dmu_tx_do_callbacks(list_t *cb_list, int error) | |
1522 | { | |
1523 | dmu_tx_callback_t *dcb; | |
1524 | ||
c65aa5b2 | 1525 | while ((dcb = list_head(cb_list))) { |
428870ff BB |
1526 | list_remove(cb_list, dcb); |
1527 | dcb->dcb_func(dcb->dcb_data, error); | |
1528 | kmem_free(dcb, sizeof (dmu_tx_callback_t)); | |
1529 | } | |
1530 | } | |
1531 | ||
1532 | /* | |
1533 | * Interface to hold a bunch of attributes. | |
1534 | * used for creating new files. | |
1535 | * attrsize is the total size of all attributes | |
1536 | * to be added during object creation | |
1537 | * | |
1538 | * For updating/adding a single attribute dmu_tx_hold_sa() should be used. | |
1539 | */ | |
1540 | ||
1541 | /* | |
1542 | * hold necessary attribute name for attribute registration. | |
1543 | * should be a very rare case where this is needed. If it does | |
1544 | * happen it would only happen on the first write to the file system. | |
1545 | */ | |
1546 | static void | |
1547 | dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) | |
1548 | { | |
1549 | int i; | |
1550 | ||
1551 | if (!sa->sa_need_attr_registration) | |
1552 | return; | |
1553 | ||
1554 | for (i = 0; i != sa->sa_num_attrs; i++) { | |
1555 | if (!sa->sa_attr_table[i].sa_registered) { | |
1556 | if (sa->sa_reg_attr_obj) | |
1557 | dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, | |
1558 | B_TRUE, sa->sa_attr_table[i].sa_name); | |
1559 | else | |
1560 | dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, | |
1561 | B_TRUE, sa->sa_attr_table[i].sa_name); | |
1562 | } | |
1563 | } | |
1564 | } | |
1565 | ||
1566 | ||
1567 | void | |
1568 | dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) | |
1569 | { | |
1570 | dnode_t *dn; | |
1571 | dmu_tx_hold_t *txh; | |
428870ff BB |
1572 | |
1573 | txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, | |
1574 | THT_SPILL, 0, 0); | |
7d637211 NC |
1575 | if (txh == NULL) |
1576 | return; | |
428870ff BB |
1577 | |
1578 | dn = txh->txh_dnode; | |
1579 | ||
1580 | if (dn == NULL) | |
1581 | return; | |
1582 | ||
1583 | /* If blkptr doesn't exist then add space to towrite */ | |
22cd4a46 | 1584 | if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { |
f1512ee6 | 1585 | txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; |
428870ff | 1586 | } else { |
22cd4a46 AL |
1587 | blkptr_t *bp; |
1588 | ||
50c957f7 | 1589 | bp = DN_SPILL_BLKPTR(dn->dn_phys); |
428870ff BB |
1590 | if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, |
1591 | bp, bp->blk_birth)) | |
f1512ee6 | 1592 | txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE; |
428870ff | 1593 | else |
f1512ee6 | 1594 | txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; |
22cd4a46 | 1595 | if (!BP_IS_HOLE(bp)) |
f1512ee6 | 1596 | txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE; |
428870ff BB |
1597 | } |
1598 | } | |
1599 | ||
1600 | void | |
1601 | dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) | |
1602 | { | |
1603 | sa_os_t *sa = tx->tx_objset->os_sa; | |
1604 | ||
1605 | dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); | |
1606 | ||
1607 | if (tx->tx_objset->os_sa->sa_master_obj == 0) | |
1608 | return; | |
1609 | ||
1610 | if (tx->tx_objset->os_sa->sa_layout_attr_obj) | |
1611 | dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); | |
1612 | else { | |
1613 | dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); | |
1614 | dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); | |
1615 | dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); | |
1616 | dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); | |
1617 | } | |
1618 | ||
1619 | dmu_tx_sa_registration_hold(sa, tx); | |
1620 | ||
50c957f7 | 1621 | if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill) |
428870ff BB |
1622 | return; |
1623 | ||
1624 | (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, | |
1625 | THT_SPILL, 0, 0); | |
1626 | } | |
1627 | ||
1628 | /* | |
1629 | * Hold SA attribute | |
1630 | * | |
1631 | * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) | |
1632 | * | |
1633 | * variable_size is the total size of all variable sized attributes | |
1634 | * passed to this function. It is not the total size of all | |
1635 | * variable size attributes that *may* exist on this object. | |
1636 | */ | |
1637 | void | |
1638 | dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) | |
1639 | { | |
1640 | uint64_t object; | |
1641 | sa_os_t *sa = tx->tx_objset->os_sa; | |
1642 | ||
1643 | ASSERT(hdl != NULL); | |
1644 | ||
1645 | object = sa_handle_object(hdl); | |
1646 | ||
1647 | dmu_tx_hold_bonus(tx, object); | |
1648 | ||
1649 | if (tx->tx_objset->os_sa->sa_master_obj == 0) | |
1650 | return; | |
1651 | ||
1652 | if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || | |
1653 | tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { | |
1654 | dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); | |
1655 | dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); | |
1656 | dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); | |
1657 | dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); | |
1658 | } | |
1659 | ||
1660 | dmu_tx_sa_registration_hold(sa, tx); | |
1661 | ||
1662 | if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) | |
1663 | dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); | |
1664 | ||
572e2857 | 1665 | if (sa->sa_force_spill || may_grow || hdl->sa_spill) { |
428870ff BB |
1666 | ASSERT(tx->tx_txg == 0); |
1667 | dmu_tx_hold_spill(tx, object); | |
572e2857 BB |
1668 | } else { |
1669 | dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; | |
1670 | dnode_t *dn; | |
1671 | ||
1672 | DB_DNODE_ENTER(db); | |
1673 | dn = DB_DNODE(db); | |
1674 | if (dn->dn_have_spill) { | |
1675 | ASSERT(tx->tx_txg == 0); | |
1676 | dmu_tx_hold_spill(tx, object); | |
1677 | } | |
1678 | DB_DNODE_EXIT(db); | |
428870ff BB |
1679 | } |
1680 | } | |
c28b2279 | 1681 | |
570827e1 BB |
1682 | void |
1683 | dmu_tx_init(void) | |
1684 | { | |
1685 | dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc", | |
1686 | KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t), | |
1687 | KSTAT_FLAG_VIRTUAL); | |
1688 | ||
1689 | if (dmu_tx_ksp != NULL) { | |
1690 | dmu_tx_ksp->ks_data = &dmu_tx_stats; | |
1691 | kstat_install(dmu_tx_ksp); | |
1692 | } | |
1693 | } | |
1694 | ||
1695 | void | |
1696 | dmu_tx_fini(void) | |
1697 | { | |
1698 | if (dmu_tx_ksp != NULL) { | |
1699 | kstat_delete(dmu_tx_ksp); | |
1700 | dmu_tx_ksp = NULL; | |
1701 | } | |
1702 | } | |
1703 | ||
c28b2279 BB |
1704 | #if defined(_KERNEL) && defined(HAVE_SPL) |
1705 | EXPORT_SYMBOL(dmu_tx_create); | |
1706 | EXPORT_SYMBOL(dmu_tx_hold_write); | |
1707 | EXPORT_SYMBOL(dmu_tx_hold_free); | |
1708 | EXPORT_SYMBOL(dmu_tx_hold_zap); | |
1709 | EXPORT_SYMBOL(dmu_tx_hold_bonus); | |
1710 | EXPORT_SYMBOL(dmu_tx_abort); | |
1711 | EXPORT_SYMBOL(dmu_tx_assign); | |
1712 | EXPORT_SYMBOL(dmu_tx_wait); | |
1713 | EXPORT_SYMBOL(dmu_tx_commit); | |
1714 | EXPORT_SYMBOL(dmu_tx_get_txg); | |
1715 | EXPORT_SYMBOL(dmu_tx_callback_register); | |
1716 | EXPORT_SYMBOL(dmu_tx_do_callbacks); | |
1717 | EXPORT_SYMBOL(dmu_tx_hold_spill); | |
1718 | EXPORT_SYMBOL(dmu_tx_hold_sa_create); | |
1719 | EXPORT_SYMBOL(dmu_tx_hold_sa); | |
1720 | #endif |