]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
4747a7d3 | 23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
55922e73 | 24 | * Copyright (c) 2014 Integros [integros.com] |
34dc7c2f BB |
25 | */ |
26 | ||
428870ff BB |
27 | /* Portions Copyright 2010 Robert Milkowski */ |
28 | ||
34dc7c2f BB |
29 | #include <sys/zfs_context.h> |
30 | #include <sys/spa.h> | |
31 | #include <sys/dmu.h> | |
32 | #include <sys/zap.h> | |
33 | #include <sys/arc.h> | |
34 | #include <sys/stat.h> | |
35 | #include <sys/resource.h> | |
36 | #include <sys/zil.h> | |
37 | #include <sys/zil_impl.h> | |
38 | #include <sys/dsl_dataset.h> | |
572e2857 | 39 | #include <sys/vdev_impl.h> |
34dc7c2f | 40 | #include <sys/dmu_tx.h> |
428870ff | 41 | #include <sys/dsl_pool.h> |
920dd524 | 42 | #include <sys/metaslab.h> |
49ee64e5 | 43 | #include <sys/trace_zil.h> |
a6255b7f | 44 | #include <sys/abd.h> |
34dc7c2f BB |
45 | |
46 | /* | |
1ce23dca PS |
47 | * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system |
48 | * calls that change the file system. Each itx has enough information to | |
49 | * be able to replay them after a system crash, power loss, or | |
50 | * equivalent failure mode. These are stored in memory until either: | |
34dc7c2f | 51 | * |
1ce23dca PS |
52 | * 1. they are committed to the pool by the DMU transaction group |
53 | * (txg), at which point they can be discarded; or | |
54 | * 2. they are committed to the on-disk ZIL for the dataset being | |
55 | * modified (e.g. due to an fsync, O_DSYNC, or other synchronous | |
56 | * requirement). | |
34dc7c2f | 57 | * |
1ce23dca PS |
58 | * In the event of a crash or power loss, the itxs contained by each |
59 | * dataset's on-disk ZIL will be replayed when that dataset is first | |
2fe61a7e | 60 | * instantiated (e.g. if the dataset is a normal fileystem, when it is |
1ce23dca | 61 | * first mounted). |
34dc7c2f | 62 | * |
1ce23dca PS |
63 | * As hinted at above, there is one ZIL per dataset (both the in-memory |
64 | * representation, and the on-disk representation). The on-disk format | |
65 | * consists of 3 parts: | |
66 | * | |
67 | * - a single, per-dataset, ZIL header; which points to a chain of | |
68 | * - zero or more ZIL blocks; each of which contains | |
69 | * - zero or more ZIL records | |
70 | * | |
71 | * A ZIL record holds the information necessary to replay a single | |
72 | * system call transaction. A ZIL block can hold many ZIL records, and | |
73 | * the blocks are chained together, similarly to a singly linked list. | |
74 | * | |
75 | * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL | |
76 | * block in the chain, and the ZIL header points to the first block in | |
77 | * the chain. | |
78 | * | |
79 | * Note, there is not a fixed place in the pool to hold these ZIL | |
80 | * blocks; they are dynamically allocated and freed as needed from the | |
81 | * blocks available on the pool, though they can be preferentially | |
82 | * allocated from a dedicated "log" vdev. | |
34dc7c2f BB |
83 | */ |
84 | ||
1ce23dca PS |
85 | /* |
86 | * This controls the amount of time that a ZIL block (lwb) will remain | |
87 | * "open" when it isn't "full", and it has a thread waiting for it to be | |
88 | * committed to stable storage. Please refer to the zil_commit_waiter() | |
89 | * function (and the comments within it) for more details. | |
90 | */ | |
91 | int zfs_commit_timeout_pct = 5; | |
92 | ||
b6ad9671 ED |
93 | /* |
94 | * See zil.h for more information about these fields. | |
95 | */ | |
96 | zil_stats_t zil_stats = { | |
d1d7e268 MK |
97 | { "zil_commit_count", KSTAT_DATA_UINT64 }, |
98 | { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, | |
99 | { "zil_itx_count", KSTAT_DATA_UINT64 }, | |
100 | { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, | |
101 | { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, | |
102 | { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, | |
103 | { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, | |
104 | { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, | |
105 | { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, | |
106 | { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, | |
107 | { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, | |
108 | { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, | |
109 | { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, | |
b6ad9671 ED |
110 | }; |
111 | ||
112 | static kstat_t *zil_ksp; | |
113 | ||
34dc7c2f | 114 | /* |
d3cc8b15 | 115 | * Disable intent logging replay. This global ZIL switch affects all pools. |
34dc7c2f | 116 | */ |
d3cc8b15 | 117 | int zil_replay_disable = 0; |
34dc7c2f BB |
118 | |
119 | /* | |
120 | * Tunable parameter for debugging or performance analysis. Setting | |
121 | * zfs_nocacheflush will cause corruption on power loss if a volatile | |
122 | * out-of-order write cache is enabled. | |
123 | */ | |
c409e464 | 124 | int zfs_nocacheflush = 0; |
34dc7c2f | 125 | |
1b7c1e5c GDN |
126 | /* |
127 | * Limit SLOG write size per commit executed with synchronous priority. | |
128 | * Any writes above that will be executed with lower (asynchronous) priority | |
129 | * to limit potential SLOG device abuse by single active ZIL writer. | |
130 | */ | |
131 | unsigned long zil_slog_bulk = 768 * 1024; | |
132 | ||
34dc7c2f | 133 | static kmem_cache_t *zil_lwb_cache; |
1ce23dca | 134 | static kmem_cache_t *zil_zcw_cache; |
34dc7c2f | 135 | |
572e2857 | 136 | static void zil_async_to_sync(zilog_t *zilog, uint64_t foid); |
428870ff BB |
137 | |
138 | #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ | |
139 | sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) | |
140 | ||
34dc7c2f | 141 | static int |
428870ff | 142 | zil_bp_compare(const void *x1, const void *x2) |
34dc7c2f | 143 | { |
428870ff BB |
144 | const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; |
145 | const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; | |
34dc7c2f | 146 | |
ee36c709 GN |
147 | int cmp = AVL_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); |
148 | if (likely(cmp)) | |
149 | return (cmp); | |
34dc7c2f | 150 | |
ee36c709 | 151 | return (AVL_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); |
34dc7c2f BB |
152 | } |
153 | ||
154 | static void | |
428870ff | 155 | zil_bp_tree_init(zilog_t *zilog) |
34dc7c2f | 156 | { |
428870ff BB |
157 | avl_create(&zilog->zl_bp_tree, zil_bp_compare, |
158 | sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); | |
34dc7c2f BB |
159 | } |
160 | ||
161 | static void | |
428870ff | 162 | zil_bp_tree_fini(zilog_t *zilog) |
34dc7c2f | 163 | { |
428870ff BB |
164 | avl_tree_t *t = &zilog->zl_bp_tree; |
165 | zil_bp_node_t *zn; | |
34dc7c2f BB |
166 | void *cookie = NULL; |
167 | ||
168 | while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) | |
428870ff | 169 | kmem_free(zn, sizeof (zil_bp_node_t)); |
34dc7c2f BB |
170 | |
171 | avl_destroy(t); | |
172 | } | |
173 | ||
428870ff BB |
174 | int |
175 | zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) | |
34dc7c2f | 176 | { |
428870ff | 177 | avl_tree_t *t = &zilog->zl_bp_tree; |
9b67f605 | 178 | const dva_t *dva; |
428870ff | 179 | zil_bp_node_t *zn; |
34dc7c2f BB |
180 | avl_index_t where; |
181 | ||
9b67f605 MA |
182 | if (BP_IS_EMBEDDED(bp)) |
183 | return (0); | |
184 | ||
185 | dva = BP_IDENTITY(bp); | |
186 | ||
34dc7c2f | 187 | if (avl_find(t, dva, &where) != NULL) |
2e528b49 | 188 | return (SET_ERROR(EEXIST)); |
34dc7c2f | 189 | |
79c76d5b | 190 | zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); |
34dc7c2f BB |
191 | zn->zn_dva = *dva; |
192 | avl_insert(t, zn, where); | |
193 | ||
194 | return (0); | |
195 | } | |
196 | ||
197 | static zil_header_t * | |
198 | zil_header_in_syncing_context(zilog_t *zilog) | |
199 | { | |
200 | return ((zil_header_t *)zilog->zl_header); | |
201 | } | |
202 | ||
203 | static void | |
204 | zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) | |
205 | { | |
206 | zio_cksum_t *zc = &bp->blk_cksum; | |
207 | ||
208 | zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL); | |
209 | zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL); | |
210 | zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); | |
211 | zc->zc_word[ZIL_ZC_SEQ] = 1ULL; | |
212 | } | |
213 | ||
214 | /* | |
428870ff | 215 | * Read a log block and make sure it's valid. |
34dc7c2f BB |
216 | */ |
217 | static int | |
b5256303 TC |
218 | zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, |
219 | blkptr_t *nbp, void *dst, char **end) | |
34dc7c2f | 220 | { |
428870ff | 221 | enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; |
2a432414 | 222 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff | 223 | arc_buf_t *abuf = NULL; |
5dbd68a3 | 224 | zbookmark_phys_t zb; |
34dc7c2f BB |
225 | int error; |
226 | ||
428870ff BB |
227 | if (zilog->zl_header->zh_claim_txg == 0) |
228 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
34dc7c2f | 229 | |
428870ff BB |
230 | if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) |
231 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
34dc7c2f | 232 | |
b5256303 TC |
233 | if (!decrypt) |
234 | zio_flags |= ZIO_FLAG_RAW; | |
235 | ||
428870ff BB |
236 | SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], |
237 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
238 | ||
b5256303 TC |
239 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, |
240 | &abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); | |
34dc7c2f BB |
241 | |
242 | if (error == 0) { | |
34dc7c2f BB |
243 | zio_cksum_t cksum = bp->blk_cksum; |
244 | ||
245 | /* | |
b128c09f BB |
246 | * Validate the checksummed log block. |
247 | * | |
34dc7c2f BB |
248 | * Sequence numbers should be... sequential. The checksum |
249 | * verifier for the next block should be bp's checksum plus 1. | |
b128c09f BB |
250 | * |
251 | * Also check the log chain linkage and size used. | |
34dc7c2f BB |
252 | */ |
253 | cksum.zc_word[ZIL_ZC_SEQ]++; | |
254 | ||
428870ff BB |
255 | if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { |
256 | zil_chain_t *zilc = abuf->b_data; | |
257 | char *lr = (char *)(zilc + 1); | |
258 | uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); | |
34dc7c2f | 259 | |
428870ff BB |
260 | if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, |
261 | sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { | |
2e528b49 | 262 | error = SET_ERROR(ECKSUM); |
428870ff | 263 | } else { |
f1512ee6 | 264 | ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); |
428870ff BB |
265 | bcopy(lr, dst, len); |
266 | *end = (char *)dst + len; | |
267 | *nbp = zilc->zc_next_blk; | |
268 | } | |
269 | } else { | |
270 | char *lr = abuf->b_data; | |
271 | uint64_t size = BP_GET_LSIZE(bp); | |
272 | zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; | |
273 | ||
274 | if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, | |
275 | sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || | |
276 | (zilc->zc_nused > (size - sizeof (*zilc)))) { | |
2e528b49 | 277 | error = SET_ERROR(ECKSUM); |
428870ff | 278 | } else { |
f1512ee6 MA |
279 | ASSERT3U(zilc->zc_nused, <=, |
280 | SPA_OLD_MAXBLOCKSIZE); | |
428870ff BB |
281 | bcopy(lr, dst, zilc->zc_nused); |
282 | *end = (char *)dst + zilc->zc_nused; | |
283 | *nbp = zilc->zc_next_blk; | |
284 | } | |
34dc7c2f | 285 | } |
428870ff | 286 | |
d3c2ae1c | 287 | arc_buf_destroy(abuf, &abuf); |
428870ff BB |
288 | } |
289 | ||
290 | return (error); | |
291 | } | |
292 | ||
293 | /* | |
294 | * Read a TX_WRITE log data block. | |
295 | */ | |
296 | static int | |
297 | zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) | |
298 | { | |
299 | enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; | |
300 | const blkptr_t *bp = &lr->lr_blkptr; | |
2a432414 | 301 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff | 302 | arc_buf_t *abuf = NULL; |
5dbd68a3 | 303 | zbookmark_phys_t zb; |
428870ff BB |
304 | int error; |
305 | ||
306 | if (BP_IS_HOLE(bp)) { | |
307 | if (wbuf != NULL) | |
308 | bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); | |
309 | return (0); | |
34dc7c2f BB |
310 | } |
311 | ||
428870ff BB |
312 | if (zilog->zl_header->zh_claim_txg == 0) |
313 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
314 | ||
b5256303 TC |
315 | /* |
316 | * If we are not using the resulting data, we are just checking that | |
317 | * it hasn't been corrupted so we don't need to waste CPU time | |
318 | * decompressing and decrypting it. | |
319 | */ | |
320 | if (wbuf == NULL) | |
321 | zio_flags |= ZIO_FLAG_RAW; | |
322 | ||
428870ff BB |
323 | SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, |
324 | ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); | |
325 | ||
294f6806 | 326 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, |
428870ff BB |
327 | ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); |
328 | ||
329 | if (error == 0) { | |
330 | if (wbuf != NULL) | |
331 | bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); | |
d3c2ae1c | 332 | arc_buf_destroy(abuf, &abuf); |
428870ff | 333 | } |
34dc7c2f BB |
334 | |
335 | return (error); | |
336 | } | |
337 | ||
338 | /* | |
339 | * Parse the intent log, and call parse_func for each valid record within. | |
34dc7c2f | 340 | */ |
428870ff | 341 | int |
34dc7c2f | 342 | zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, |
b5256303 TC |
343 | zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, |
344 | boolean_t decrypt) | |
34dc7c2f BB |
345 | { |
346 | const zil_header_t *zh = zilog->zl_header; | |
428870ff BB |
347 | boolean_t claimed = !!zh->zh_claim_txg; |
348 | uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; | |
349 | uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; | |
350 | uint64_t max_blk_seq = 0; | |
351 | uint64_t max_lr_seq = 0; | |
352 | uint64_t blk_count = 0; | |
353 | uint64_t lr_count = 0; | |
354 | blkptr_t blk, next_blk; | |
34dc7c2f | 355 | char *lrbuf, *lrp; |
428870ff | 356 | int error = 0; |
34dc7c2f | 357 | |
d1d7e268 | 358 | bzero(&next_blk, sizeof (blkptr_t)); |
d4ed6673 | 359 | |
428870ff BB |
360 | /* |
361 | * Old logs didn't record the maximum zh_claim_lr_seq. | |
362 | */ | |
363 | if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) | |
364 | claim_lr_seq = UINT64_MAX; | |
34dc7c2f BB |
365 | |
366 | /* | |
367 | * Starting at the block pointed to by zh_log we read the log chain. | |
368 | * For each block in the chain we strongly check that block to | |
369 | * ensure its validity. We stop when an invalid block is found. | |
370 | * For each block pointer in the chain we call parse_blk_func(). | |
371 | * For each record in each valid block we call parse_lr_func(). | |
372 | * If the log has been claimed, stop if we encounter a sequence | |
373 | * number greater than the highest claimed sequence number. | |
374 | */ | |
f1512ee6 | 375 | lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); |
428870ff | 376 | zil_bp_tree_init(zilog); |
34dc7c2f | 377 | |
428870ff BB |
378 | for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { |
379 | uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; | |
380 | int reclen; | |
d4ed6673 | 381 | char *end = NULL; |
34dc7c2f | 382 | |
428870ff BB |
383 | if (blk_seq > claim_blk_seq) |
384 | break; | |
b5256303 TC |
385 | |
386 | error = parse_blk_func(zilog, &blk, arg, txg); | |
387 | if (error != 0) | |
428870ff BB |
388 | break; |
389 | ASSERT3U(max_blk_seq, <, blk_seq); | |
390 | max_blk_seq = blk_seq; | |
391 | blk_count++; | |
34dc7c2f | 392 | |
428870ff BB |
393 | if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) |
394 | break; | |
34dc7c2f | 395 | |
b5256303 TC |
396 | error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, |
397 | lrbuf, &end); | |
13fe0198 | 398 | if (error != 0) |
34dc7c2f BB |
399 | break; |
400 | ||
428870ff | 401 | for (lrp = lrbuf; lrp < end; lrp += reclen) { |
34dc7c2f BB |
402 | lr_t *lr = (lr_t *)lrp; |
403 | reclen = lr->lrc_reclen; | |
404 | ASSERT3U(reclen, >=, sizeof (lr_t)); | |
428870ff BB |
405 | if (lr->lrc_seq > claim_lr_seq) |
406 | goto done; | |
b5256303 TC |
407 | |
408 | error = parse_lr_func(zilog, lr, arg, txg); | |
409 | if (error != 0) | |
428870ff BB |
410 | goto done; |
411 | ASSERT3U(max_lr_seq, <, lr->lrc_seq); | |
412 | max_lr_seq = lr->lrc_seq; | |
413 | lr_count++; | |
34dc7c2f | 414 | } |
34dc7c2f | 415 | } |
428870ff BB |
416 | done: |
417 | zilog->zl_parse_error = error; | |
418 | zilog->zl_parse_blk_seq = max_blk_seq; | |
419 | zilog->zl_parse_lr_seq = max_lr_seq; | |
420 | zilog->zl_parse_blk_count = blk_count; | |
421 | zilog->zl_parse_lr_count = lr_count; | |
422 | ||
423 | ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || | |
b5256303 TC |
424 | (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq) || |
425 | (decrypt && error == EIO)); | |
428870ff BB |
426 | |
427 | zil_bp_tree_fini(zilog); | |
f1512ee6 | 428 | zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); |
34dc7c2f | 429 | |
428870ff | 430 | return (error); |
34dc7c2f BB |
431 | } |
432 | ||
428870ff | 433 | static int |
34dc7c2f BB |
434 | zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) |
435 | { | |
34dc7c2f BB |
436 | /* |
437 | * Claim log block if not already committed and not already claimed. | |
428870ff | 438 | * If tx == NULL, just verify that the block is claimable. |
34dc7c2f | 439 | */ |
b0bc7a84 MG |
440 | if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || |
441 | zil_bp_tree_add(zilog, bp) != 0) | |
428870ff BB |
442 | return (0); |
443 | ||
444 | return (zio_wait(zio_claim(NULL, zilog->zl_spa, | |
445 | tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, | |
446 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); | |
34dc7c2f BB |
447 | } |
448 | ||
428870ff | 449 | static int |
34dc7c2f BB |
450 | zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) |
451 | { | |
428870ff BB |
452 | lr_write_t *lr = (lr_write_t *)lrc; |
453 | int error; | |
454 | ||
455 | if (lrc->lrc_txtype != TX_WRITE) | |
456 | return (0); | |
457 | ||
458 | /* | |
459 | * If the block is not readable, don't claim it. This can happen | |
460 | * in normal operation when a log block is written to disk before | |
461 | * some of the dmu_sync() blocks it points to. In this case, the | |
462 | * transaction cannot have been committed to anyone (we would have | |
463 | * waited for all writes to be stable first), so it is semantically | |
464 | * correct to declare this the end of the log. | |
465 | */ | |
b5256303 TC |
466 | if (lr->lr_blkptr.blk_birth >= first_txg) { |
467 | error = zil_read_log_data(zilog, lr, NULL); | |
468 | if (error != 0) | |
469 | return (error); | |
470 | } | |
471 | ||
428870ff | 472 | return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); |
34dc7c2f BB |
473 | } |
474 | ||
475 | /* ARGSUSED */ | |
428870ff | 476 | static int |
34dc7c2f BB |
477 | zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) |
478 | { | |
428870ff BB |
479 | zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
480 | ||
481 | return (0); | |
34dc7c2f BB |
482 | } |
483 | ||
428870ff | 484 | static int |
34dc7c2f BB |
485 | zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) |
486 | { | |
428870ff BB |
487 | lr_write_t *lr = (lr_write_t *)lrc; |
488 | blkptr_t *bp = &lr->lr_blkptr; | |
489 | ||
34dc7c2f BB |
490 | /* |
491 | * If we previously claimed it, we need to free it. | |
492 | */ | |
428870ff | 493 | if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && |
b0bc7a84 MG |
494 | bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && |
495 | !BP_IS_HOLE(bp)) | |
428870ff BB |
496 | zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
497 | ||
498 | return (0); | |
499 | } | |
500 | ||
1ce23dca PS |
501 | static int |
502 | zil_lwb_vdev_compare(const void *x1, const void *x2) | |
503 | { | |
504 | const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; | |
505 | const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; | |
506 | ||
507 | return (AVL_CMP(v1, v2)); | |
508 | } | |
509 | ||
428870ff | 510 | static lwb_t * |
1b7c1e5c GDN |
511 | zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg, |
512 | boolean_t fastwrite) | |
428870ff BB |
513 | { |
514 | lwb_t *lwb; | |
515 | ||
79c76d5b | 516 | lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); |
428870ff BB |
517 | lwb->lwb_zilog = zilog; |
518 | lwb->lwb_blk = *bp; | |
920dd524 | 519 | lwb->lwb_fastwrite = fastwrite; |
1b7c1e5c | 520 | lwb->lwb_slog = slog; |
1ce23dca | 521 | lwb->lwb_state = LWB_STATE_CLOSED; |
428870ff BB |
522 | lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); |
523 | lwb->lwb_max_txg = txg; | |
1ce23dca PS |
524 | lwb->lwb_write_zio = NULL; |
525 | lwb->lwb_root_zio = NULL; | |
428870ff | 526 | lwb->lwb_tx = NULL; |
1ce23dca | 527 | lwb->lwb_issued_timestamp = 0; |
428870ff BB |
528 | if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { |
529 | lwb->lwb_nused = sizeof (zil_chain_t); | |
530 | lwb->lwb_sz = BP_GET_LSIZE(bp); | |
531 | } else { | |
532 | lwb->lwb_nused = 0; | |
533 | lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); | |
34dc7c2f | 534 | } |
428870ff BB |
535 | |
536 | mutex_enter(&zilog->zl_lock); | |
537 | list_insert_tail(&zilog->zl_lwb_list, lwb); | |
538 | mutex_exit(&zilog->zl_lock); | |
539 | ||
1ce23dca PS |
540 | ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); |
541 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); | |
2fe61a7e PS |
542 | VERIFY(list_is_empty(&lwb->lwb_waiters)); |
543 | VERIFY(list_is_empty(&lwb->lwb_itxs)); | |
1ce23dca | 544 | |
428870ff | 545 | return (lwb); |
34dc7c2f BB |
546 | } |
547 | ||
1ce23dca PS |
548 | static void |
549 | zil_free_lwb(zilog_t *zilog, lwb_t *lwb) | |
550 | { | |
551 | ASSERT(MUTEX_HELD(&zilog->zl_lock)); | |
552 | ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); | |
2fe61a7e PS |
553 | VERIFY(list_is_empty(&lwb->lwb_waiters)); |
554 | VERIFY(list_is_empty(&lwb->lwb_itxs)); | |
1ce23dca | 555 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); |
1ce23dca PS |
556 | ASSERT3P(lwb->lwb_write_zio, ==, NULL); |
557 | ASSERT3P(lwb->lwb_root_zio, ==, NULL); | |
2fe61a7e PS |
558 | ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); |
559 | ASSERT(lwb->lwb_state == LWB_STATE_CLOSED || | |
560 | lwb->lwb_state == LWB_STATE_DONE); | |
1ce23dca PS |
561 | |
562 | /* | |
563 | * Clear the zilog's field to indicate this lwb is no longer | |
564 | * valid, and prevent use-after-free errors. | |
565 | */ | |
566 | if (zilog->zl_last_lwb_opened == lwb) | |
567 | zilog->zl_last_lwb_opened = NULL; | |
568 | ||
569 | kmem_cache_free(zil_lwb_cache, lwb); | |
570 | } | |
571 | ||
29809a6c MA |
572 | /* |
573 | * Called when we create in-memory log transactions so that we know | |
574 | * to cleanup the itxs at the end of spa_sync(). | |
575 | */ | |
576 | void | |
577 | zilog_dirty(zilog_t *zilog, uint64_t txg) | |
578 | { | |
579 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
580 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); | |
581 | ||
1ce23dca PS |
582 | ASSERT(spa_writeable(zilog->zl_spa)); |
583 | ||
0c66c32d | 584 | if (ds->ds_is_snapshot) |
29809a6c MA |
585 | panic("dirtying snapshot!"); |
586 | ||
13fe0198 | 587 | if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { |
29809a6c MA |
588 | /* up the hold count until we can be written out */ |
589 | dmu_buf_add_ref(ds->ds_dbuf, zilog); | |
1ce23dca PS |
590 | |
591 | zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); | |
29809a6c MA |
592 | } |
593 | } | |
594 | ||
55922e73 GW |
595 | /* |
596 | * Determine if the zil is dirty in the specified txg. Callers wanting to | |
597 | * ensure that the dirty state does not change must hold the itxg_lock for | |
598 | * the specified txg. Holding the lock will ensure that the zil cannot be | |
599 | * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current | |
600 | * state. | |
601 | */ | |
602 | boolean_t | |
603 | zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) | |
604 | { | |
605 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
606 | ||
607 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) | |
608 | return (B_TRUE); | |
609 | return (B_FALSE); | |
610 | } | |
611 | ||
612 | /* | |
613 | * Determine if the zil is dirty. The zil is considered dirty if it has | |
614 | * any pending itx records that have not been cleaned by zil_clean(). | |
615 | */ | |
29809a6c MA |
616 | boolean_t |
617 | zilog_is_dirty(zilog_t *zilog) | |
618 | { | |
619 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
29809a6c | 620 | |
1c27024e | 621 | for (int t = 0; t < TXG_SIZE; t++) { |
29809a6c MA |
622 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) |
623 | return (B_TRUE); | |
624 | } | |
625 | return (B_FALSE); | |
626 | } | |
627 | ||
34dc7c2f BB |
628 | /* |
629 | * Create an on-disk intent log. | |
630 | */ | |
428870ff | 631 | static lwb_t * |
34dc7c2f BB |
632 | zil_create(zilog_t *zilog) |
633 | { | |
634 | const zil_header_t *zh = zilog->zl_header; | |
428870ff | 635 | lwb_t *lwb = NULL; |
34dc7c2f BB |
636 | uint64_t txg = 0; |
637 | dmu_tx_t *tx = NULL; | |
638 | blkptr_t blk; | |
639 | int error = 0; | |
920dd524 | 640 | boolean_t fastwrite = FALSE; |
1b7c1e5c | 641 | boolean_t slog = FALSE; |
34dc7c2f BB |
642 | |
643 | /* | |
644 | * Wait for any previous destroy to complete. | |
645 | */ | |
646 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
647 | ||
648 | ASSERT(zh->zh_claim_txg == 0); | |
649 | ASSERT(zh->zh_replay_seq == 0); | |
650 | ||
651 | blk = zh->zh_log; | |
652 | ||
653 | /* | |
428870ff BB |
654 | * Allocate an initial log block if: |
655 | * - there isn't one already | |
4e33ba4c | 656 | * - the existing block is the wrong endianness |
34dc7c2f | 657 | */ |
fb5f0bc8 | 658 | if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { |
34dc7c2f | 659 | tx = dmu_tx_create(zilog->zl_os); |
1ce23dca | 660 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
661 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
662 | txg = dmu_tx_get_txg(tx); | |
663 | ||
fb5f0bc8 | 664 | if (!BP_IS_HOLE(&blk)) { |
428870ff | 665 | zio_free_zil(zilog->zl_spa, txg, &blk); |
fb5f0bc8 BB |
666 | BP_ZERO(&blk); |
667 | } | |
668 | ||
b5256303 | 669 | error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, |
1b7c1e5c | 670 | ZIL_MIN_BLKSZ, &slog); |
920dd524 | 671 | fastwrite = TRUE; |
34dc7c2f BB |
672 | |
673 | if (error == 0) | |
674 | zil_init_log_chain(zilog, &blk); | |
675 | } | |
676 | ||
677 | /* | |
1ce23dca | 678 | * Allocate a log write block (lwb) for the first log block. |
34dc7c2f | 679 | */ |
428870ff | 680 | if (error == 0) |
1b7c1e5c | 681 | lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite); |
34dc7c2f BB |
682 | |
683 | /* | |
684 | * If we just allocated the first log block, commit our transaction | |
2fe61a7e | 685 | * and wait for zil_sync() to stuff the block pointer into zh_log. |
34dc7c2f BB |
686 | * (zh is part of the MOS, so we cannot modify it in open context.) |
687 | */ | |
688 | if (tx != NULL) { | |
689 | dmu_tx_commit(tx); | |
690 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
691 | } | |
692 | ||
693 | ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); | |
428870ff BB |
694 | |
695 | return (lwb); | |
34dc7c2f BB |
696 | } |
697 | ||
698 | /* | |
1ce23dca PS |
699 | * In one tx, free all log blocks and clear the log header. If keep_first |
700 | * is set, then we're replaying a log with no content. We want to keep the | |
701 | * first block, however, so that the first synchronous transaction doesn't | |
702 | * require a txg_wait_synced() in zil_create(). We don't need to | |
703 | * txg_wait_synced() here either when keep_first is set, because both | |
704 | * zil_create() and zil_destroy() will wait for any in-progress destroys | |
705 | * to complete. | |
34dc7c2f BB |
706 | */ |
707 | void | |
708 | zil_destroy(zilog_t *zilog, boolean_t keep_first) | |
709 | { | |
710 | const zil_header_t *zh = zilog->zl_header; | |
711 | lwb_t *lwb; | |
712 | dmu_tx_t *tx; | |
713 | uint64_t txg; | |
714 | ||
715 | /* | |
716 | * Wait for any previous destroy to complete. | |
717 | */ | |
718 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
719 | ||
428870ff BB |
720 | zilog->zl_old_header = *zh; /* debugging aid */ |
721 | ||
34dc7c2f BB |
722 | if (BP_IS_HOLE(&zh->zh_log)) |
723 | return; | |
724 | ||
725 | tx = dmu_tx_create(zilog->zl_os); | |
1ce23dca | 726 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
727 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
728 | txg = dmu_tx_get_txg(tx); | |
729 | ||
730 | mutex_enter(&zilog->zl_lock); | |
731 | ||
34dc7c2f BB |
732 | ASSERT3U(zilog->zl_destroy_txg, <, txg); |
733 | zilog->zl_destroy_txg = txg; | |
734 | zilog->zl_keep_first = keep_first; | |
735 | ||
736 | if (!list_is_empty(&zilog->zl_lwb_list)) { | |
737 | ASSERT(zh->zh_claim_txg == 0); | |
3e31d2b0 | 738 | VERIFY(!keep_first); |
34dc7c2f | 739 | while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { |
920dd524 ED |
740 | if (lwb->lwb_fastwrite) |
741 | metaslab_fastwrite_unmark(zilog->zl_spa, | |
742 | &lwb->lwb_blk); | |
1ce23dca | 743 | |
34dc7c2f BB |
744 | list_remove(&zilog->zl_lwb_list, lwb); |
745 | if (lwb->lwb_buf != NULL) | |
746 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
1ce23dca PS |
747 | zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); |
748 | zil_free_lwb(zilog, lwb); | |
34dc7c2f | 749 | } |
428870ff | 750 | } else if (!keep_first) { |
29809a6c | 751 | zil_destroy_sync(zilog, tx); |
34dc7c2f BB |
752 | } |
753 | mutex_exit(&zilog->zl_lock); | |
754 | ||
755 | dmu_tx_commit(tx); | |
756 | } | |
757 | ||
29809a6c MA |
758 | void |
759 | zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) | |
760 | { | |
761 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
762 | (void) zil_parse(zilog, zil_free_log_block, | |
b5256303 | 763 | zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); |
29809a6c MA |
764 | } |
765 | ||
34dc7c2f | 766 | int |
9c43027b | 767 | zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) |
34dc7c2f BB |
768 | { |
769 | dmu_tx_t *tx = txarg; | |
770 | uint64_t first_txg = dmu_tx_get_txg(tx); | |
771 | zilog_t *zilog; | |
772 | zil_header_t *zh; | |
773 | objset_t *os; | |
774 | int error; | |
775 | ||
9c43027b | 776 | error = dmu_objset_own_obj(dp, ds->ds_object, |
b5256303 | 777 | DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); |
13fe0198 | 778 | if (error != 0) { |
6d9036f3 MA |
779 | /* |
780 | * EBUSY indicates that the objset is inconsistent, in which | |
781 | * case it can not have a ZIL. | |
782 | */ | |
783 | if (error != EBUSY) { | |
9c43027b AJ |
784 | cmn_err(CE_WARN, "can't open objset for %llu, error %u", |
785 | (unsigned long long)ds->ds_object, error); | |
6d9036f3 MA |
786 | } |
787 | ||
34dc7c2f BB |
788 | return (0); |
789 | } | |
790 | ||
791 | zilog = dmu_objset_zil(os); | |
792 | zh = zil_header_in_syncing_context(zilog); | |
793 | ||
428870ff | 794 | if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { |
9babb374 | 795 | if (!BP_IS_HOLE(&zh->zh_log)) |
428870ff | 796 | zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); |
9babb374 | 797 | BP_ZERO(&zh->zh_log); |
b5256303 TC |
798 | if (os->os_encrypted) |
799 | os->os_next_write_raw = B_TRUE; | |
9babb374 | 800 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
b5256303 | 801 | dmu_objset_disown(os, B_FALSE, FTAG); |
428870ff | 802 | return (0); |
9babb374 BB |
803 | } |
804 | ||
34dc7c2f BB |
805 | /* |
806 | * Claim all log blocks if we haven't already done so, and remember | |
807 | * the highest claimed sequence number. This ensures that if we can | |
808 | * read only part of the log now (e.g. due to a missing device), | |
809 | * but we can read the entire log later, we will not try to replay | |
810 | * or destroy beyond the last block we successfully claimed. | |
811 | */ | |
812 | ASSERT3U(zh->zh_claim_txg, <=, first_txg); | |
813 | if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { | |
428870ff | 814 | (void) zil_parse(zilog, zil_claim_log_block, |
b5256303 | 815 | zil_claim_log_record, tx, first_txg, B_FALSE); |
428870ff BB |
816 | zh->zh_claim_txg = first_txg; |
817 | zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; | |
818 | zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; | |
819 | if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) | |
820 | zh->zh_flags |= ZIL_REPLAY_NEEDED; | |
821 | zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; | |
34dc7c2f BB |
822 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
823 | } | |
824 | ||
825 | ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); | |
b5256303 | 826 | dmu_objset_disown(os, B_FALSE, FTAG); |
34dc7c2f BB |
827 | return (0); |
828 | } | |
829 | ||
b128c09f BB |
830 | /* |
831 | * Check the log by walking the log chain. | |
832 | * Checksum errors are ok as they indicate the end of the chain. | |
833 | * Any other error (no device or read failure) returns an error. | |
834 | */ | |
9c43027b | 835 | /* ARGSUSED */ |
b128c09f | 836 | int |
9c43027b | 837 | zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) |
b128c09f BB |
838 | { |
839 | zilog_t *zilog; | |
b128c09f | 840 | objset_t *os; |
572e2857 | 841 | blkptr_t *bp; |
b128c09f BB |
842 | int error; |
843 | ||
428870ff BB |
844 | ASSERT(tx == NULL); |
845 | ||
9c43027b | 846 | error = dmu_objset_from_ds(ds, &os); |
13fe0198 | 847 | if (error != 0) { |
9c43027b AJ |
848 | cmn_err(CE_WARN, "can't open objset %llu, error %d", |
849 | (unsigned long long)ds->ds_object, error); | |
b128c09f BB |
850 | return (0); |
851 | } | |
852 | ||
853 | zilog = dmu_objset_zil(os); | |
572e2857 BB |
854 | bp = (blkptr_t *)&zilog->zl_header->zh_log; |
855 | ||
856 | /* | |
857 | * Check the first block and determine if it's on a log device | |
858 | * which may have been removed or faulted prior to loading this | |
859 | * pool. If so, there's no point in checking the rest of the log | |
860 | * as its content should have already been synced to the pool. | |
861 | */ | |
862 | if (!BP_IS_HOLE(bp)) { | |
863 | vdev_t *vd; | |
864 | boolean_t valid = B_TRUE; | |
865 | ||
866 | spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); | |
867 | vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); | |
868 | if (vd->vdev_islog && vdev_is_dead(vd)) | |
869 | valid = vdev_log_state_valid(vd); | |
870 | spa_config_exit(os->os_spa, SCL_STATE, FTAG); | |
871 | ||
9c43027b | 872 | if (!valid) |
572e2857 | 873 | return (0); |
572e2857 | 874 | } |
b128c09f | 875 | |
428870ff BB |
876 | /* |
877 | * Because tx == NULL, zil_claim_log_block() will not actually claim | |
878 | * any blocks, but just determine whether it is possible to do so. | |
879 | * In addition to checking the log chain, zil_claim_log_block() | |
880 | * will invoke zio_claim() with a done func of spa_claim_notify(), | |
881 | * which will update spa_max_claim_txg. See spa_load() for details. | |
882 | */ | |
883 | error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, | |
b5256303 TC |
884 | zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa), |
885 | B_FALSE); | |
428870ff | 886 | |
428870ff | 887 | return ((error == ECKSUM || error == ENOENT) ? 0 : error); |
b128c09f BB |
888 | } |
889 | ||
1ce23dca PS |
890 | /* |
891 | * When an itx is "skipped", this function is used to properly mark the | |
892 | * waiter as "done, and signal any thread(s) waiting on it. An itx can | |
893 | * be skipped (and not committed to an lwb) for a variety of reasons, | |
894 | * one of them being that the itx was committed via spa_sync(), prior to | |
895 | * it being committed to an lwb; this can happen if a thread calling | |
896 | * zil_commit() is racing with spa_sync(). | |
897 | */ | |
898 | static void | |
899 | zil_commit_waiter_skip(zil_commit_waiter_t *zcw) | |
34dc7c2f | 900 | { |
1ce23dca PS |
901 | mutex_enter(&zcw->zcw_lock); |
902 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
903 | zcw->zcw_done = B_TRUE; | |
904 | cv_broadcast(&zcw->zcw_cv); | |
905 | mutex_exit(&zcw->zcw_lock); | |
906 | } | |
34dc7c2f | 907 | |
1ce23dca PS |
908 | /* |
909 | * This function is used when the given waiter is to be linked into an | |
910 | * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. | |
911 | * At this point, the waiter will no longer be referenced by the itx, | |
912 | * and instead, will be referenced by the lwb. | |
913 | */ | |
914 | static void | |
915 | zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) | |
916 | { | |
2fe61a7e PS |
917 | /* |
918 | * The lwb_waiters field of the lwb is protected by the zilog's | |
919 | * zl_lock, thus it must be held when calling this function. | |
920 | */ | |
921 | ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); | |
922 | ||
1ce23dca PS |
923 | mutex_enter(&zcw->zcw_lock); |
924 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
925 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
926 | ASSERT3P(lwb, !=, NULL); | |
927 | ASSERT(lwb->lwb_state == LWB_STATE_OPENED || | |
928 | lwb->lwb_state == LWB_STATE_ISSUED); | |
929 | ||
930 | list_insert_tail(&lwb->lwb_waiters, zcw); | |
931 | zcw->zcw_lwb = lwb; | |
932 | mutex_exit(&zcw->zcw_lock); | |
933 | } | |
934 | ||
935 | /* | |
936 | * This function is used when zio_alloc_zil() fails to allocate a ZIL | |
937 | * block, and the given waiter must be linked to the "nolwb waiters" | |
938 | * list inside of zil_process_commit_list(). | |
939 | */ | |
940 | static void | |
941 | zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) | |
942 | { | |
943 | mutex_enter(&zcw->zcw_lock); | |
944 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
945 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
946 | list_insert_tail(nolwb, zcw); | |
947 | mutex_exit(&zcw->zcw_lock); | |
34dc7c2f BB |
948 | } |
949 | ||
950 | void | |
1ce23dca | 951 | zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) |
34dc7c2f | 952 | { |
1ce23dca | 953 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
34dc7c2f BB |
954 | avl_index_t where; |
955 | zil_vdev_node_t *zv, zvsearch; | |
956 | int ndvas = BP_GET_NDVAS(bp); | |
957 | int i; | |
958 | ||
959 | if (zfs_nocacheflush) | |
960 | return; | |
961 | ||
1ce23dca | 962 | mutex_enter(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
963 | for (i = 0; i < ndvas; i++) { |
964 | zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); | |
965 | if (avl_find(t, &zvsearch, &where) == NULL) { | |
79c76d5b | 966 | zv = kmem_alloc(sizeof (*zv), KM_SLEEP); |
34dc7c2f BB |
967 | zv->zv_vdev = zvsearch.zv_vdev; |
968 | avl_insert(t, zv, where); | |
969 | } | |
970 | } | |
1ce23dca | 971 | mutex_exit(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
972 | } |
973 | ||
1ce23dca PS |
974 | void |
975 | zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) | |
976 | { | |
977 | lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); | |
978 | } | |
979 | ||
980 | /* | |
981 | * This function is a called after all VDEVs associated with a given lwb | |
982 | * write have completed their DKIOCFLUSHWRITECACHE command; or as soon | |
983 | * as the lwb write completes, if "zfs_nocacheflush" is set. | |
984 | * | |
985 | * The intention is for this function to be called as soon as the | |
986 | * contents of an lwb are considered "stable" on disk, and will survive | |
987 | * any sudden loss of power. At this point, any threads waiting for the | |
988 | * lwb to reach this state are signalled, and the "waiter" structures | |
989 | * are marked "done". | |
990 | */ | |
572e2857 | 991 | static void |
1ce23dca | 992 | zil_lwb_flush_vdevs_done(zio_t *zio) |
34dc7c2f | 993 | { |
1ce23dca PS |
994 | lwb_t *lwb = zio->io_private; |
995 | zilog_t *zilog = lwb->lwb_zilog; | |
996 | dmu_tx_t *tx = lwb->lwb_tx; | |
997 | zil_commit_waiter_t *zcw; | |
998 | itx_t *itx; | |
999 | ||
1000 | spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); | |
1001 | ||
1002 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
34dc7c2f | 1003 | |
1ce23dca | 1004 | mutex_enter(&zilog->zl_lock); |
34dc7c2f BB |
1005 | |
1006 | /* | |
1ce23dca PS |
1007 | * Ensure the lwb buffer pointer is cleared before releasing the |
1008 | * txg. If we have had an allocation failure and the txg is | |
1009 | * waiting to sync then we want zil_sync() to remove the lwb so | |
1010 | * that it's not picked up as the next new one in | |
1011 | * zil_process_commit_list(). zil_sync() will only remove the | |
1012 | * lwb if lwb_buf is null. | |
34dc7c2f | 1013 | */ |
1ce23dca PS |
1014 | lwb->lwb_buf = NULL; |
1015 | lwb->lwb_tx = NULL; | |
34dc7c2f | 1016 | |
1ce23dca PS |
1017 | ASSERT3U(lwb->lwb_issued_timestamp, >, 0); |
1018 | zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp; | |
34dc7c2f | 1019 | |
1ce23dca PS |
1020 | lwb->lwb_root_zio = NULL; |
1021 | lwb->lwb_state = LWB_STATE_DONE; | |
34dc7c2f | 1022 | |
1ce23dca PS |
1023 | if (zilog->zl_last_lwb_opened == lwb) { |
1024 | /* | |
1025 | * Remember the highest committed log sequence number | |
1026 | * for ztest. We only update this value when all the log | |
1027 | * writes succeeded, because ztest wants to ASSERT that | |
1028 | * it got the whole log chain. | |
1029 | */ | |
1030 | zilog->zl_commit_lr_seq = zilog->zl_lr_seq; | |
1031 | } | |
1032 | ||
1033 | while ((itx = list_head(&lwb->lwb_itxs)) != NULL) { | |
1034 | list_remove(&lwb->lwb_itxs, itx); | |
1035 | zil_itx_destroy(itx); | |
1036 | } | |
1037 | ||
1038 | while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) { | |
1039 | mutex_enter(&zcw->zcw_lock); | |
1040 | ||
1041 | ASSERT(list_link_active(&zcw->zcw_node)); | |
1042 | list_remove(&lwb->lwb_waiters, zcw); | |
1043 | ||
1044 | ASSERT3P(zcw->zcw_lwb, ==, lwb); | |
1045 | zcw->zcw_lwb = NULL; | |
1046 | ||
1047 | zcw->zcw_zio_error = zio->io_error; | |
1048 | ||
1049 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
1050 | zcw->zcw_done = B_TRUE; | |
1051 | cv_broadcast(&zcw->zcw_cv); | |
1052 | ||
1053 | mutex_exit(&zcw->zcw_lock); | |
34dc7c2f BB |
1054 | } |
1055 | ||
1ce23dca PS |
1056 | mutex_exit(&zilog->zl_lock); |
1057 | ||
34dc7c2f | 1058 | /* |
1ce23dca PS |
1059 | * Now that we've written this log block, we have a stable pointer |
1060 | * to the next block in the chain, so it's OK to let the txg in | |
1061 | * which we allocated the next block sync. | |
34dc7c2f | 1062 | */ |
1ce23dca | 1063 | dmu_tx_commit(tx); |
34dc7c2f BB |
1064 | } |
1065 | ||
1066 | /* | |
1ce23dca PS |
1067 | * This is called when an lwb write completes. This means, this specific |
1068 | * lwb was written to disk, and all dependent lwb have also been | |
1069 | * written to disk. | |
1070 | * | |
1071 | * At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to | |
1072 | * the VDEVs involved in writing out this specific lwb. The lwb will be | |
1073 | * "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the | |
1074 | * zio completion callback for the lwb's root zio. | |
34dc7c2f BB |
1075 | */ |
1076 | static void | |
1077 | zil_lwb_write_done(zio_t *zio) | |
1078 | { | |
1079 | lwb_t *lwb = zio->io_private; | |
1ce23dca | 1080 | spa_t *spa = zio->io_spa; |
34dc7c2f | 1081 | zilog_t *zilog = lwb->lwb_zilog; |
1ce23dca PS |
1082 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
1083 | void *cookie = NULL; | |
1084 | zil_vdev_node_t *zv; | |
1085 | ||
1086 | ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); | |
34dc7c2f | 1087 | |
b128c09f | 1088 | ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); |
b128c09f BB |
1089 | ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); |
1090 | ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); | |
1091 | ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); | |
1092 | ASSERT(!BP_IS_GANG(zio->io_bp)); | |
1093 | ASSERT(!BP_IS_HOLE(zio->io_bp)); | |
9b67f605 | 1094 | ASSERT(BP_GET_FILL(zio->io_bp) == 0); |
b128c09f | 1095 | |
a6255b7f | 1096 | abd_put(zio->io_abd); |
1ce23dca PS |
1097 | |
1098 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); | |
1099 | ||
34dc7c2f | 1100 | mutex_enter(&zilog->zl_lock); |
1ce23dca | 1101 | lwb->lwb_write_zio = NULL; |
920dd524 | 1102 | lwb->lwb_fastwrite = FALSE; |
428870ff | 1103 | mutex_exit(&zilog->zl_lock); |
9babb374 | 1104 | |
1ce23dca PS |
1105 | if (avl_numnodes(t) == 0) |
1106 | return; | |
1107 | ||
9babb374 | 1108 | /* |
1ce23dca PS |
1109 | * If there was an IO error, we're not going to call zio_flush() |
1110 | * on these vdevs, so we simply empty the tree and free the | |
1111 | * nodes. We avoid calling zio_flush() since there isn't any | |
1112 | * good reason for doing so, after the lwb block failed to be | |
1113 | * written out. | |
9babb374 | 1114 | */ |
1ce23dca PS |
1115 | if (zio->io_error != 0) { |
1116 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) | |
1117 | kmem_free(zv, sizeof (*zv)); | |
1118 | return; | |
1119 | } | |
1120 | ||
1121 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { | |
1122 | vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); | |
1123 | if (vd != NULL) | |
1124 | zio_flush(lwb->lwb_root_zio, vd); | |
1125 | kmem_free(zv, sizeof (*zv)); | |
1126 | } | |
34dc7c2f BB |
1127 | } |
1128 | ||
1129 | /* | |
1ce23dca PS |
1130 | * This function's purpose is to "open" an lwb such that it is ready to |
1131 | * accept new itxs being committed to it. To do this, the lwb's zio | |
1132 | * structures are created, and linked to the lwb. This function is | |
1133 | * idempotent; if the passed in lwb has already been opened, this | |
1134 | * function is essentially a no-op. | |
34dc7c2f BB |
1135 | */ |
1136 | static void | |
1ce23dca | 1137 | zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) |
34dc7c2f | 1138 | { |
5dbd68a3 | 1139 | zbookmark_phys_t zb; |
1b7c1e5c | 1140 | zio_priority_t prio; |
34dc7c2f | 1141 | |
1b2b0aca | 1142 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1143 | ASSERT3P(lwb, !=, NULL); |
1144 | EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); | |
1145 | EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); | |
1146 | ||
428870ff BB |
1147 | SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], |
1148 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, | |
1149 | lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
34dc7c2f | 1150 | |
920dd524 ED |
1151 | /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */ |
1152 | mutex_enter(&zilog->zl_lock); | |
1ce23dca | 1153 | if (lwb->lwb_root_zio == NULL) { |
a6255b7f DQ |
1154 | abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, |
1155 | BP_GET_LSIZE(&lwb->lwb_blk)); | |
1ce23dca | 1156 | |
920dd524 ED |
1157 | if (!lwb->lwb_fastwrite) { |
1158 | metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk); | |
1159 | lwb->lwb_fastwrite = 1; | |
1160 | } | |
1ce23dca | 1161 | |
1b7c1e5c GDN |
1162 | if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) |
1163 | prio = ZIO_PRIORITY_SYNC_WRITE; | |
1164 | else | |
1165 | prio = ZIO_PRIORITY_ASYNC_WRITE; | |
1ce23dca PS |
1166 | |
1167 | lwb->lwb_root_zio = zio_root(zilog->zl_spa, | |
1168 | zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); | |
1169 | ASSERT3P(lwb->lwb_root_zio, !=, NULL); | |
1170 | ||
1171 | lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, | |
1172 | zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd, | |
1173 | BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb, | |
1174 | prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | | |
920dd524 | 1175 | ZIO_FLAG_FASTWRITE, &zb); |
1ce23dca PS |
1176 | ASSERT3P(lwb->lwb_write_zio, !=, NULL); |
1177 | ||
1178 | lwb->lwb_state = LWB_STATE_OPENED; | |
1179 | ||
1180 | /* | |
1181 | * The zilog's "zl_last_lwb_opened" field is used to | |
1182 | * build the lwb/zio dependency chain, which is used to | |
1183 | * preserve the ordering of lwb completions that is | |
1184 | * required by the semantics of the ZIL. Each new lwb | |
1185 | * zio becomes a parent of the "previous" lwb zio, such | |
1186 | * that the new lwb's zio cannot complete until the | |
1187 | * "previous" lwb's zio completes. | |
1188 | * | |
1189 | * This is required by the semantics of zil_commit(); | |
1190 | * the commit waiters attached to the lwbs will be woken | |
1191 | * in the lwb zio's completion callback, so this zio | |
1192 | * dependency graph ensures the waiters are woken in the | |
1193 | * correct order (the same order the lwbs were created). | |
1194 | */ | |
1195 | lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; | |
1196 | if (last_lwb_opened != NULL && | |
1197 | last_lwb_opened->lwb_state != LWB_STATE_DONE) { | |
1198 | ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || | |
1199 | last_lwb_opened->lwb_state == LWB_STATE_ISSUED); | |
1200 | ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); | |
1201 | zio_add_child(lwb->lwb_root_zio, | |
1202 | last_lwb_opened->lwb_root_zio); | |
1203 | } | |
1204 | zilog->zl_last_lwb_opened = lwb; | |
34dc7c2f | 1205 | } |
920dd524 | 1206 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
1207 | |
1208 | ASSERT3P(lwb->lwb_root_zio, !=, NULL); | |
1209 | ASSERT3P(lwb->lwb_write_zio, !=, NULL); | |
1210 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
34dc7c2f BB |
1211 | } |
1212 | ||
428870ff BB |
1213 | /* |
1214 | * Define a limited set of intent log block sizes. | |
d3cc8b15 | 1215 | * |
428870ff BB |
1216 | * These must be a multiple of 4KB. Note only the amount used (again |
1217 | * aligned to 4KB) actually gets written. However, we can't always just | |
f1512ee6 | 1218 | * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. |
428870ff BB |
1219 | */ |
1220 | uint64_t zil_block_buckets[] = { | |
1221 | 4096, /* non TX_WRITE */ | |
1222 | 8192+4096, /* data base */ | |
1223 | 32*1024 + 4096, /* NFS writes */ | |
1224 | UINT64_MAX | |
1225 | }; | |
1226 | ||
34dc7c2f BB |
1227 | /* |
1228 | * Start a log block write and advance to the next log block. | |
1229 | * Calls are serialized. | |
1230 | */ | |
1231 | static lwb_t * | |
1ce23dca | 1232 | zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) |
34dc7c2f | 1233 | { |
428870ff BB |
1234 | lwb_t *nlwb = NULL; |
1235 | zil_chain_t *zilc; | |
34dc7c2f | 1236 | spa_t *spa = zilog->zl_spa; |
428870ff BB |
1237 | blkptr_t *bp; |
1238 | dmu_tx_t *tx; | |
34dc7c2f | 1239 | uint64_t txg; |
428870ff BB |
1240 | uint64_t zil_blksz, wsz; |
1241 | int i, error; | |
1b7c1e5c | 1242 | boolean_t slog; |
428870ff | 1243 | |
1b2b0aca | 1244 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1245 | ASSERT3P(lwb->lwb_root_zio, !=, NULL); |
1246 | ASSERT3P(lwb->lwb_write_zio, !=, NULL); | |
1247 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
1248 | ||
428870ff BB |
1249 | if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { |
1250 | zilc = (zil_chain_t *)lwb->lwb_buf; | |
1251 | bp = &zilc->zc_next_blk; | |
1252 | } else { | |
1253 | zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); | |
1254 | bp = &zilc->zc_next_blk; | |
1255 | } | |
34dc7c2f | 1256 | |
428870ff | 1257 | ASSERT(lwb->lwb_nused <= lwb->lwb_sz); |
34dc7c2f BB |
1258 | |
1259 | /* | |
1260 | * Allocate the next block and save its address in this block | |
1261 | * before writing it in order to establish the log chain. | |
1262 | * Note that if the allocation of nlwb synced before we wrote | |
1263 | * the block that points at it (lwb), we'd leak it if we crashed. | |
428870ff BB |
1264 | * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). |
1265 | * We dirty the dataset to ensure that zil_sync() will be called | |
1266 | * to clean up in the event of allocation failure or I/O failure. | |
34dc7c2f | 1267 | */ |
1ce23dca | 1268 | |
428870ff | 1269 | tx = dmu_tx_create(zilog->zl_os); |
e98b6117 AG |
1270 | |
1271 | /* | |
1272 | * Since we are not going to create any new dirty data and we can even | |
1273 | * help with clearing the existing dirty data, we should not be subject | |
1274 | * to the dirty data based delays. | |
1275 | * We (ab)use TXG_WAITED to bypass the delay mechanism. | |
1276 | * One side effect from using TXG_WAITED is that dmu_tx_assign() can | |
1277 | * fail if the pool is suspended. Those are dramatic circumstances, | |
1278 | * so we return NULL to signal that the normal ZIL processing is not | |
1279 | * possible and txg_wait_synced() should be used to ensure that the data | |
1280 | * is on disk. | |
1281 | */ | |
1282 | error = dmu_tx_assign(tx, TXG_WAITED); | |
1283 | if (error != 0) { | |
1ce23dca | 1284 | ASSERT(error == EIO || error == ERESTART); |
e98b6117 AG |
1285 | dmu_tx_abort(tx); |
1286 | return (NULL); | |
1287 | } | |
428870ff BB |
1288 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
1289 | txg = dmu_tx_get_txg(tx); | |
1290 | ||
1291 | lwb->lwb_tx = tx; | |
34dc7c2f BB |
1292 | |
1293 | /* | |
428870ff BB |
1294 | * Log blocks are pre-allocated. Here we select the size of the next |
1295 | * block, based on size used in the last block. | |
1296 | * - first find the smallest bucket that will fit the block from a | |
1297 | * limited set of block sizes. This is because it's faster to write | |
1298 | * blocks allocated from the same metaslab as they are adjacent or | |
1299 | * close. | |
1300 | * - next find the maximum from the new suggested size and an array of | |
1301 | * previous sizes. This lessens a picket fence effect of wrongly | |
2fe61a7e | 1302 | * guessing the size if we have a stream of say 2k, 64k, 2k, 64k |
428870ff BB |
1303 | * requests. |
1304 | * | |
1305 | * Note we only write what is used, but we can't just allocate | |
1306 | * the maximum block size because we can exhaust the available | |
1307 | * pool log space. | |
34dc7c2f | 1308 | */ |
428870ff BB |
1309 | zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); |
1310 | for (i = 0; zil_blksz > zil_block_buckets[i]; i++) | |
1311 | continue; | |
1312 | zil_blksz = zil_block_buckets[i]; | |
1313 | if (zil_blksz == UINT64_MAX) | |
f1512ee6 | 1314 | zil_blksz = SPA_OLD_MAXBLOCKSIZE; |
428870ff BB |
1315 | zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; |
1316 | for (i = 0; i < ZIL_PREV_BLKS; i++) | |
1317 | zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); | |
1318 | zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); | |
34dc7c2f BB |
1319 | |
1320 | BP_ZERO(bp); | |
b5256303 | 1321 | error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog); |
1b7c1e5c | 1322 | if (slog) { |
b6ad9671 ED |
1323 | ZIL_STAT_BUMP(zil_itx_metaslab_slog_count); |
1324 | ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused); | |
d1d7e268 | 1325 | } else { |
b6ad9671 ED |
1326 | ZIL_STAT_BUMP(zil_itx_metaslab_normal_count); |
1327 | ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused); | |
1328 | } | |
13fe0198 | 1329 | if (error == 0) { |
428870ff BB |
1330 | ASSERT3U(bp->blk_birth, ==, txg); |
1331 | bp->blk_cksum = lwb->lwb_blk.blk_cksum; | |
1332 | bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; | |
34dc7c2f BB |
1333 | |
1334 | /* | |
1ce23dca | 1335 | * Allocate a new log write block (lwb). |
34dc7c2f | 1336 | */ |
1b7c1e5c | 1337 | nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE); |
34dc7c2f BB |
1338 | } |
1339 | ||
428870ff BB |
1340 | if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { |
1341 | /* For Slim ZIL only write what is used. */ | |
1342 | wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); | |
1343 | ASSERT3U(wsz, <=, lwb->lwb_sz); | |
1ce23dca | 1344 | zio_shrink(lwb->lwb_write_zio, wsz); |
34dc7c2f | 1345 | |
428870ff BB |
1346 | } else { |
1347 | wsz = lwb->lwb_sz; | |
1348 | } | |
34dc7c2f | 1349 | |
428870ff BB |
1350 | zilc->zc_pad = 0; |
1351 | zilc->zc_nused = lwb->lwb_nused; | |
1352 | zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; | |
34dc7c2f BB |
1353 | |
1354 | /* | |
428870ff | 1355 | * clear unused data for security |
34dc7c2f | 1356 | */ |
428870ff | 1357 | bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); |
34dc7c2f | 1358 | |
1ce23dca PS |
1359 | spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER); |
1360 | ||
1361 | zil_lwb_add_block(lwb, &lwb->lwb_blk); | |
1362 | lwb->lwb_issued_timestamp = gethrtime(); | |
1363 | lwb->lwb_state = LWB_STATE_ISSUED; | |
1364 | ||
1365 | zio_nowait(lwb->lwb_root_zio); | |
1366 | zio_nowait(lwb->lwb_write_zio); | |
34dc7c2f BB |
1367 | |
1368 | /* | |
428870ff BB |
1369 | * If there was an allocation failure then nlwb will be null which |
1370 | * forces a txg_wait_synced(). | |
34dc7c2f | 1371 | */ |
34dc7c2f BB |
1372 | return (nlwb); |
1373 | } | |
1374 | ||
1375 | static lwb_t * | |
1376 | zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) | |
1377 | { | |
1b7c1e5c GDN |
1378 | lr_t *lrcb, *lrc; |
1379 | lr_write_t *lrwb, *lrw; | |
428870ff | 1380 | char *lr_buf; |
1b7c1e5c | 1381 | uint64_t dlen, dnow, lwb_sp, reclen, txg; |
34dc7c2f | 1382 | |
1b2b0aca | 1383 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1384 | ASSERT3P(lwb, !=, NULL); |
1385 | ASSERT3P(lwb->lwb_buf, !=, NULL); | |
1386 | ||
1387 | zil_lwb_write_open(zilog, lwb); | |
428870ff | 1388 | |
1ce23dca PS |
1389 | lrc = &itx->itx_lr; |
1390 | lrw = (lr_write_t *)lrc; | |
1391 | ||
1392 | /* | |
1393 | * A commit itx doesn't represent any on-disk state; instead | |
1394 | * it's simply used as a place holder on the commit list, and | |
1395 | * provides a mechanism for attaching a "commit waiter" onto the | |
1396 | * correct lwb (such that the waiter can be signalled upon | |
1397 | * completion of that lwb). Thus, we don't process this itx's | |
1398 | * log record if it's a commit itx (these itx's don't have log | |
1399 | * records), and instead link the itx's waiter onto the lwb's | |
1400 | * list of waiters. | |
1401 | * | |
1402 | * For more details, see the comment above zil_commit(). | |
1403 | */ | |
1404 | if (lrc->lrc_txtype == TX_COMMIT) { | |
2fe61a7e | 1405 | mutex_enter(&zilog->zl_lock); |
1ce23dca PS |
1406 | zil_commit_waiter_link_lwb(itx->itx_private, lwb); |
1407 | itx->itx_private = NULL; | |
2fe61a7e | 1408 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
1409 | return (lwb); |
1410 | } | |
34dc7c2f | 1411 | |
1b7c1e5c | 1412 | if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { |
34dc7c2f | 1413 | dlen = P2ROUNDUP_TYPED( |
428870ff | 1414 | lrw->lr_length, sizeof (uint64_t), uint64_t); |
1b7c1e5c GDN |
1415 | } else { |
1416 | dlen = 0; | |
1417 | } | |
1418 | reclen = lrc->lrc_reclen; | |
34dc7c2f | 1419 | zilog->zl_cur_used += (reclen + dlen); |
1b7c1e5c | 1420 | txg = lrc->lrc_txg; |
34dc7c2f | 1421 | |
1ce23dca | 1422 | ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen)); |
34dc7c2f | 1423 | |
1b7c1e5c | 1424 | cont: |
34dc7c2f BB |
1425 | /* |
1426 | * If this record won't fit in the current log block, start a new one. | |
1b7c1e5c | 1427 | * For WR_NEED_COPY optimize layout for minimal number of chunks. |
34dc7c2f | 1428 | */ |
1b7c1e5c GDN |
1429 | lwb_sp = lwb->lwb_sz - lwb->lwb_nused; |
1430 | if (reclen > lwb_sp || (reclen + dlen > lwb_sp && | |
1431 | lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 || | |
1432 | lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) { | |
1ce23dca | 1433 | lwb = zil_lwb_write_issue(zilog, lwb); |
34dc7c2f BB |
1434 | if (lwb == NULL) |
1435 | return (NULL); | |
1ce23dca | 1436 | zil_lwb_write_open(zilog, lwb); |
428870ff | 1437 | ASSERT(LWB_EMPTY(lwb)); |
1b7c1e5c GDN |
1438 | lwb_sp = lwb->lwb_sz - lwb->lwb_nused; |
1439 | ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); | |
34dc7c2f BB |
1440 | } |
1441 | ||
1b7c1e5c | 1442 | dnow = MIN(dlen, lwb_sp - reclen); |
428870ff BB |
1443 | lr_buf = lwb->lwb_buf + lwb->lwb_nused; |
1444 | bcopy(lrc, lr_buf, reclen); | |
1b7c1e5c GDN |
1445 | lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */ |
1446 | lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */ | |
34dc7c2f | 1447 | |
b6ad9671 ED |
1448 | ZIL_STAT_BUMP(zil_itx_count); |
1449 | ||
34dc7c2f BB |
1450 | /* |
1451 | * If it's a write, fetch the data or get its blkptr as appropriate. | |
1452 | */ | |
1453 | if (lrc->lrc_txtype == TX_WRITE) { | |
1454 | if (txg > spa_freeze_txg(zilog->zl_spa)) | |
1455 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
b6ad9671 ED |
1456 | if (itx->itx_wr_state == WR_COPIED) { |
1457 | ZIL_STAT_BUMP(zil_itx_copied_count); | |
1458 | ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length); | |
1459 | } else { | |
34dc7c2f BB |
1460 | char *dbuf; |
1461 | int error; | |
1462 | ||
1b7c1e5c | 1463 | if (itx->itx_wr_state == WR_NEED_COPY) { |
428870ff | 1464 | dbuf = lr_buf + reclen; |
1b7c1e5c GDN |
1465 | lrcb->lrc_reclen += dnow; |
1466 | if (lrwb->lr_length > dnow) | |
1467 | lrwb->lr_length = dnow; | |
1468 | lrw->lr_offset += dnow; | |
1469 | lrw->lr_length -= dnow; | |
b6ad9671 | 1470 | ZIL_STAT_BUMP(zil_itx_needcopy_count); |
d1d7e268 MK |
1471 | ZIL_STAT_INCR(zil_itx_needcopy_bytes, |
1472 | lrw->lr_length); | |
34dc7c2f | 1473 | } else { |
1ce23dca | 1474 | ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); |
34dc7c2f | 1475 | dbuf = NULL; |
b6ad9671 | 1476 | ZIL_STAT_BUMP(zil_itx_indirect_count); |
d1d7e268 MK |
1477 | ZIL_STAT_INCR(zil_itx_indirect_bytes, |
1478 | lrw->lr_length); | |
34dc7c2f | 1479 | } |
1ce23dca PS |
1480 | |
1481 | /* | |
1482 | * We pass in the "lwb_write_zio" rather than | |
1483 | * "lwb_root_zio" so that the "lwb_write_zio" | |
1484 | * becomes the parent of any zio's created by | |
1485 | * the "zl_get_data" callback. The vdevs are | |
1486 | * flushed after the "lwb_write_zio" completes, | |
1487 | * so we want to make sure that completion | |
1488 | * callback waits for these additional zio's, | |
1489 | * such that the vdevs used by those zio's will | |
1490 | * be included in the lwb's vdev tree, and those | |
1491 | * vdevs will be properly flushed. If we passed | |
1492 | * in "lwb_root_zio" here, then these additional | |
1493 | * vdevs may not be flushed; e.g. if these zio's | |
1494 | * completed after "lwb_write_zio" completed. | |
1495 | */ | |
1496 | error = zilog->zl_get_data(itx->itx_private, | |
1497 | lrwb, dbuf, lwb, lwb->lwb_write_zio); | |
1498 | ||
45d1cae3 BB |
1499 | if (error == EIO) { |
1500 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
1501 | return (lwb); | |
1502 | } | |
13fe0198 | 1503 | if (error != 0) { |
34dc7c2f BB |
1504 | ASSERT(error == ENOENT || error == EEXIST || |
1505 | error == EALREADY); | |
1506 | return (lwb); | |
1507 | } | |
1508 | } | |
1509 | } | |
1510 | ||
428870ff BB |
1511 | /* |
1512 | * We're actually making an entry, so update lrc_seq to be the | |
1513 | * log record sequence number. Note that this is generally not | |
1514 | * equal to the itx sequence number because not all transactions | |
1515 | * are synchronous, and sometimes spa_sync() gets there first. | |
1516 | */ | |
1ce23dca | 1517 | lrcb->lrc_seq = ++zilog->zl_lr_seq; |
1b7c1e5c | 1518 | lwb->lwb_nused += reclen + dnow; |
1ce23dca PS |
1519 | |
1520 | zil_lwb_add_txg(lwb, txg); | |
1521 | ||
428870ff | 1522 | ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); |
c99c9001 | 1523 | ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); |
34dc7c2f | 1524 | |
1b7c1e5c GDN |
1525 | dlen -= dnow; |
1526 | if (dlen > 0) { | |
1527 | zilog->zl_cur_used += reclen; | |
1528 | goto cont; | |
1529 | } | |
1530 | ||
34dc7c2f BB |
1531 | return (lwb); |
1532 | } | |
1533 | ||
1534 | itx_t * | |
1535 | zil_itx_create(uint64_t txtype, size_t lrsize) | |
1536 | { | |
72841b9f | 1537 | size_t itxsize; |
34dc7c2f BB |
1538 | itx_t *itx; |
1539 | ||
1540 | lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t); | |
72841b9f | 1541 | itxsize = offsetof(itx_t, itx_lr) + lrsize; |
34dc7c2f | 1542 | |
72841b9f | 1543 | itx = zio_data_buf_alloc(itxsize); |
34dc7c2f BB |
1544 | itx->itx_lr.lrc_txtype = txtype; |
1545 | itx->itx_lr.lrc_reclen = lrsize; | |
34dc7c2f | 1546 | itx->itx_lr.lrc_seq = 0; /* defensive */ |
572e2857 | 1547 | itx->itx_sync = B_TRUE; /* default is synchronous */ |
119a394a ED |
1548 | itx->itx_callback = NULL; |
1549 | itx->itx_callback_data = NULL; | |
72841b9f | 1550 | itx->itx_size = itxsize; |
34dc7c2f BB |
1551 | |
1552 | return (itx); | |
1553 | } | |
1554 | ||
428870ff BB |
1555 | void |
1556 | zil_itx_destroy(itx_t *itx) | |
1557 | { | |
1ce23dca PS |
1558 | IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); |
1559 | IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
1560 | ||
1561 | if (itx->itx_callback != NULL) | |
1562 | itx->itx_callback(itx->itx_callback_data); | |
1563 | ||
72841b9f | 1564 | zio_data_buf_free(itx, itx->itx_size); |
428870ff BB |
1565 | } |
1566 | ||
572e2857 BB |
1567 | /* |
1568 | * Free up the sync and async itxs. The itxs_t has already been detached | |
1569 | * so no locks are needed. | |
1570 | */ | |
1571 | static void | |
1572 | zil_itxg_clean(itxs_t *itxs) | |
34dc7c2f | 1573 | { |
572e2857 BB |
1574 | itx_t *itx; |
1575 | list_t *list; | |
1576 | avl_tree_t *t; | |
1577 | void *cookie; | |
1578 | itx_async_node_t *ian; | |
1579 | ||
1580 | list = &itxs->i_sync_list; | |
1581 | while ((itx = list_head(list)) != NULL) { | |
1ce23dca PS |
1582 | /* |
1583 | * In the general case, commit itxs will not be found | |
1584 | * here, as they'll be committed to an lwb via | |
1585 | * zil_lwb_commit(), and free'd in that function. Having | |
1586 | * said that, it is still possible for commit itxs to be | |
1587 | * found here, due to the following race: | |
1588 | * | |
1589 | * - a thread calls zil_commit() which assigns the | |
1590 | * commit itx to a per-txg i_sync_list | |
1591 | * - zil_itxg_clean() is called (e.g. via spa_sync()) | |
1592 | * while the waiter is still on the i_sync_list | |
1593 | * | |
1594 | * There's nothing to prevent syncing the txg while the | |
1595 | * waiter is on the i_sync_list. This normally doesn't | |
1596 | * happen because spa_sync() is slower than zil_commit(), | |
1597 | * but if zil_commit() calls txg_wait_synced() (e.g. | |
1598 | * because zil_create() or zil_commit_writer_stall() is | |
1599 | * called) we will hit this case. | |
1600 | */ | |
1601 | if (itx->itx_lr.lrc_txtype == TX_COMMIT) | |
1602 | zil_commit_waiter_skip(itx->itx_private); | |
1603 | ||
572e2857 | 1604 | list_remove(list, itx); |
19ea3d25 | 1605 | zil_itx_destroy(itx); |
572e2857 | 1606 | } |
34dc7c2f | 1607 | |
572e2857 BB |
1608 | cookie = NULL; |
1609 | t = &itxs->i_async_tree; | |
1610 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
1611 | list = &ian->ia_list; | |
1612 | while ((itx = list_head(list)) != NULL) { | |
1613 | list_remove(list, itx); | |
1ce23dca PS |
1614 | /* commit itxs should never be on the async lists. */ |
1615 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 1616 | zil_itx_destroy(itx); |
572e2857 BB |
1617 | } |
1618 | list_destroy(list); | |
1619 | kmem_free(ian, sizeof (itx_async_node_t)); | |
1620 | } | |
1621 | avl_destroy(t); | |
34dc7c2f | 1622 | |
572e2857 BB |
1623 | kmem_free(itxs, sizeof (itxs_t)); |
1624 | } | |
34dc7c2f | 1625 | |
572e2857 BB |
1626 | static int |
1627 | zil_aitx_compare(const void *x1, const void *x2) | |
1628 | { | |
1629 | const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; | |
1630 | const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; | |
1631 | ||
ee36c709 | 1632 | return (AVL_CMP(o1, o2)); |
34dc7c2f BB |
1633 | } |
1634 | ||
1635 | /* | |
572e2857 | 1636 | * Remove all async itx with the given oid. |
34dc7c2f BB |
1637 | */ |
1638 | static void | |
572e2857 | 1639 | zil_remove_async(zilog_t *zilog, uint64_t oid) |
34dc7c2f | 1640 | { |
572e2857 BB |
1641 | uint64_t otxg, txg; |
1642 | itx_async_node_t *ian; | |
1643 | avl_tree_t *t; | |
1644 | avl_index_t where; | |
34dc7c2f BB |
1645 | list_t clean_list; |
1646 | itx_t *itx; | |
1647 | ||
572e2857 | 1648 | ASSERT(oid != 0); |
34dc7c2f BB |
1649 | list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); |
1650 | ||
572e2857 BB |
1651 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
1652 | otxg = ZILTEST_TXG; | |
1653 | else | |
1654 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
34dc7c2f | 1655 | |
572e2857 BB |
1656 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
1657 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
1658 | ||
1659 | mutex_enter(&itxg->itxg_lock); | |
1660 | if (itxg->itxg_txg != txg) { | |
1661 | mutex_exit(&itxg->itxg_lock); | |
1662 | continue; | |
1663 | } | |
34dc7c2f | 1664 | |
572e2857 BB |
1665 | /* |
1666 | * Locate the object node and append its list. | |
1667 | */ | |
1668 | t = &itxg->itxg_itxs->i_async_tree; | |
1669 | ian = avl_find(t, &oid, &where); | |
1670 | if (ian != NULL) | |
1671 | list_move_tail(&clean_list, &ian->ia_list); | |
1672 | mutex_exit(&itxg->itxg_lock); | |
1673 | } | |
34dc7c2f BB |
1674 | while ((itx = list_head(&clean_list)) != NULL) { |
1675 | list_remove(&clean_list, itx); | |
1ce23dca PS |
1676 | /* commit itxs should never be on the async lists. */ |
1677 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 1678 | zil_itx_destroy(itx); |
34dc7c2f BB |
1679 | } |
1680 | list_destroy(&clean_list); | |
1681 | } | |
1682 | ||
572e2857 BB |
1683 | void |
1684 | zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) | |
1685 | { | |
1686 | uint64_t txg; | |
1687 | itxg_t *itxg; | |
1688 | itxs_t *itxs, *clean = NULL; | |
1689 | ||
1690 | /* | |
1691 | * Object ids can be re-instantiated in the next txg so | |
1692 | * remove any async transactions to avoid future leaks. | |
1693 | * This can happen if a fsync occurs on the re-instantiated | |
1694 | * object for a WR_INDIRECT or WR_NEED_COPY write, which gets | |
1695 | * the new file data and flushes a write record for the old object. | |
1696 | */ | |
1697 | if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE) | |
1698 | zil_remove_async(zilog, itx->itx_oid); | |
1699 | ||
1700 | /* | |
1701 | * Ensure the data of a renamed file is committed before the rename. | |
1702 | */ | |
1703 | if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) | |
1704 | zil_async_to_sync(zilog, itx->itx_oid); | |
1705 | ||
29809a6c | 1706 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) |
572e2857 BB |
1707 | txg = ZILTEST_TXG; |
1708 | else | |
1709 | txg = dmu_tx_get_txg(tx); | |
1710 | ||
1711 | itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
1712 | mutex_enter(&itxg->itxg_lock); | |
1713 | itxs = itxg->itxg_itxs; | |
1714 | if (itxg->itxg_txg != txg) { | |
1715 | if (itxs != NULL) { | |
1716 | /* | |
1717 | * The zil_clean callback hasn't got around to cleaning | |
1718 | * this itxg. Save the itxs for release below. | |
1719 | * This should be rare. | |
1720 | */ | |
55922e73 GW |
1721 | zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " |
1722 | "txg %llu", itxg->itxg_txg); | |
572e2857 BB |
1723 | clean = itxg->itxg_itxs; |
1724 | } | |
572e2857 | 1725 | itxg->itxg_txg = txg; |
d1d7e268 | 1726 | itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), |
79c76d5b | 1727 | KM_SLEEP); |
572e2857 BB |
1728 | |
1729 | list_create(&itxs->i_sync_list, sizeof (itx_t), | |
1730 | offsetof(itx_t, itx_node)); | |
1731 | avl_create(&itxs->i_async_tree, zil_aitx_compare, | |
1732 | sizeof (itx_async_node_t), | |
1733 | offsetof(itx_async_node_t, ia_node)); | |
1734 | } | |
1735 | if (itx->itx_sync) { | |
1736 | list_insert_tail(&itxs->i_sync_list, itx); | |
572e2857 BB |
1737 | } else { |
1738 | avl_tree_t *t = &itxs->i_async_tree; | |
50c957f7 NB |
1739 | uint64_t foid = |
1740 | LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); | |
572e2857 BB |
1741 | itx_async_node_t *ian; |
1742 | avl_index_t where; | |
1743 | ||
1744 | ian = avl_find(t, &foid, &where); | |
1745 | if (ian == NULL) { | |
d1d7e268 | 1746 | ian = kmem_alloc(sizeof (itx_async_node_t), |
79c76d5b | 1747 | KM_SLEEP); |
572e2857 BB |
1748 | list_create(&ian->ia_list, sizeof (itx_t), |
1749 | offsetof(itx_t, itx_node)); | |
1750 | ian->ia_foid = foid; | |
1751 | avl_insert(t, ian, where); | |
1752 | } | |
1753 | list_insert_tail(&ian->ia_list, itx); | |
1754 | } | |
1755 | ||
1756 | itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); | |
1ce23dca PS |
1757 | |
1758 | /* | |
1759 | * We don't want to dirty the ZIL using ZILTEST_TXG, because | |
1760 | * zil_clean() will never be called using ZILTEST_TXG. Thus, we | |
1761 | * need to be careful to always dirty the ZIL using the "real" | |
1762 | * TXG (not itxg_txg) even when the SPA is frozen. | |
1763 | */ | |
1764 | zilog_dirty(zilog, dmu_tx_get_txg(tx)); | |
572e2857 BB |
1765 | mutex_exit(&itxg->itxg_lock); |
1766 | ||
1767 | /* Release the old itxs now we've dropped the lock */ | |
1768 | if (clean != NULL) | |
1769 | zil_itxg_clean(clean); | |
1770 | } | |
1771 | ||
34dc7c2f BB |
1772 | /* |
1773 | * If there are any in-memory intent log transactions which have now been | |
29809a6c MA |
1774 | * synced then start up a taskq to free them. We should only do this after we |
1775 | * have written out the uberblocks (i.e. txg has been comitted) so that | |
1776 | * don't inadvertently clean out in-memory log records that would be required | |
1777 | * by zil_commit(). | |
34dc7c2f BB |
1778 | */ |
1779 | void | |
572e2857 | 1780 | zil_clean(zilog_t *zilog, uint64_t synced_txg) |
34dc7c2f | 1781 | { |
572e2857 BB |
1782 | itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; |
1783 | itxs_t *clean_me; | |
34dc7c2f | 1784 | |
1ce23dca PS |
1785 | ASSERT3U(synced_txg, <, ZILTEST_TXG); |
1786 | ||
572e2857 BB |
1787 | mutex_enter(&itxg->itxg_lock); |
1788 | if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { | |
1789 | mutex_exit(&itxg->itxg_lock); | |
1790 | return; | |
1791 | } | |
1792 | ASSERT3U(itxg->itxg_txg, <=, synced_txg); | |
a032ac4b | 1793 | ASSERT3U(itxg->itxg_txg, !=, 0); |
572e2857 BB |
1794 | clean_me = itxg->itxg_itxs; |
1795 | itxg->itxg_itxs = NULL; | |
1796 | itxg->itxg_txg = 0; | |
1797 | mutex_exit(&itxg->itxg_lock); | |
1798 | /* | |
1799 | * Preferably start a task queue to free up the old itxs but | |
1800 | * if taskq_dispatch can't allocate resources to do that then | |
1801 | * free it in-line. This should be rare. Note, using TQ_SLEEP | |
1802 | * created a bad performance problem. | |
1803 | */ | |
a032ac4b BB |
1804 | ASSERT3P(zilog->zl_dmu_pool, !=, NULL); |
1805 | ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); | |
1806 | taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, | |
1807 | (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP); | |
1808 | if (id == TASKQID_INVALID) | |
572e2857 BB |
1809 | zil_itxg_clean(clean_me); |
1810 | } | |
1811 | ||
1812 | /* | |
1ce23dca PS |
1813 | * This function will traverse the queue of itxs that need to be |
1814 | * committed, and move them onto the ZIL's zl_itx_commit_list. | |
572e2857 BB |
1815 | */ |
1816 | static void | |
1817 | zil_get_commit_list(zilog_t *zilog) | |
1818 | { | |
1819 | uint64_t otxg, txg; | |
1820 | list_t *commit_list = &zilog->zl_itx_commit_list; | |
572e2857 | 1821 | |
1b2b0aca | 1822 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 1823 | |
572e2857 BB |
1824 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
1825 | otxg = ZILTEST_TXG; | |
1826 | else | |
1827 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
1828 | ||
55922e73 GW |
1829 | /* |
1830 | * This is inherently racy, since there is nothing to prevent | |
1831 | * the last synced txg from changing. That's okay since we'll | |
1832 | * only commit things in the future. | |
1833 | */ | |
572e2857 BB |
1834 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
1835 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
1836 | ||
1837 | mutex_enter(&itxg->itxg_lock); | |
1838 | if (itxg->itxg_txg != txg) { | |
1839 | mutex_exit(&itxg->itxg_lock); | |
1840 | continue; | |
1841 | } | |
1842 | ||
55922e73 GW |
1843 | /* |
1844 | * If we're adding itx records to the zl_itx_commit_list, | |
1845 | * then the zil better be dirty in this "txg". We can assert | |
1846 | * that here since we're holding the itxg_lock which will | |
1847 | * prevent spa_sync from cleaning it. Once we add the itxs | |
1848 | * to the zl_itx_commit_list we must commit it to disk even | |
1849 | * if it's unnecessary (i.e. the txg was synced). | |
1850 | */ | |
1851 | ASSERT(zilog_is_dirty_in_txg(zilog, txg) || | |
1852 | spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); | |
572e2857 | 1853 | list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); |
572e2857 BB |
1854 | |
1855 | mutex_exit(&itxg->itxg_lock); | |
1856 | } | |
572e2857 BB |
1857 | } |
1858 | ||
1859 | /* | |
1860 | * Move the async itxs for a specified object to commit into sync lists. | |
1861 | */ | |
1862 | static void | |
1863 | zil_async_to_sync(zilog_t *zilog, uint64_t foid) | |
1864 | { | |
1865 | uint64_t otxg, txg; | |
1866 | itx_async_node_t *ian; | |
1867 | avl_tree_t *t; | |
1868 | avl_index_t where; | |
1869 | ||
1870 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ | |
1871 | otxg = ZILTEST_TXG; | |
1872 | else | |
1873 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
1874 | ||
55922e73 GW |
1875 | /* |
1876 | * This is inherently racy, since there is nothing to prevent | |
1877 | * the last synced txg from changing. | |
1878 | */ | |
572e2857 BB |
1879 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
1880 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
1881 | ||
1882 | mutex_enter(&itxg->itxg_lock); | |
1883 | if (itxg->itxg_txg != txg) { | |
1884 | mutex_exit(&itxg->itxg_lock); | |
1885 | continue; | |
1886 | } | |
1887 | ||
1888 | /* | |
1889 | * If a foid is specified then find that node and append its | |
1890 | * list. Otherwise walk the tree appending all the lists | |
1891 | * to the sync list. We add to the end rather than the | |
1892 | * beginning to ensure the create has happened. | |
1893 | */ | |
1894 | t = &itxg->itxg_itxs->i_async_tree; | |
1895 | if (foid != 0) { | |
1896 | ian = avl_find(t, &foid, &where); | |
1897 | if (ian != NULL) { | |
1898 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
1899 | &ian->ia_list); | |
1900 | } | |
1901 | } else { | |
1902 | void *cookie = NULL; | |
1903 | ||
1904 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
1905 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
1906 | &ian->ia_list); | |
1907 | list_destroy(&ian->ia_list); | |
1908 | kmem_free(ian, sizeof (itx_async_node_t)); | |
1909 | } | |
1910 | } | |
1911 | mutex_exit(&itxg->itxg_lock); | |
34dc7c2f | 1912 | } |
34dc7c2f BB |
1913 | } |
1914 | ||
1ce23dca PS |
1915 | /* |
1916 | * This function will prune commit itxs that are at the head of the | |
1917 | * commit list (it won't prune past the first non-commit itx), and | |
1918 | * either: a) attach them to the last lwb that's still pending | |
1919 | * completion, or b) skip them altogether. | |
1920 | * | |
1921 | * This is used as a performance optimization to prevent commit itxs | |
1922 | * from generating new lwbs when it's unnecessary to do so. | |
1923 | */ | |
b128c09f | 1924 | static void |
1ce23dca | 1925 | zil_prune_commit_list(zilog_t *zilog) |
34dc7c2f | 1926 | { |
572e2857 | 1927 | itx_t *itx; |
34dc7c2f | 1928 | |
1b2b0aca | 1929 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 | 1930 | |
1ce23dca PS |
1931 | while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { |
1932 | lr_t *lrc = &itx->itx_lr; | |
1933 | if (lrc->lrc_txtype != TX_COMMIT) | |
1934 | break; | |
572e2857 | 1935 | |
1ce23dca PS |
1936 | mutex_enter(&zilog->zl_lock); |
1937 | ||
1938 | lwb_t *last_lwb = zilog->zl_last_lwb_opened; | |
1939 | if (last_lwb == NULL || last_lwb->lwb_state == LWB_STATE_DONE) { | |
1940 | /* | |
1941 | * All of the itxs this waiter was waiting on | |
1942 | * must have already completed (or there were | |
1943 | * never any itx's for it to wait on), so it's | |
1944 | * safe to skip this waiter and mark it done. | |
1945 | */ | |
1946 | zil_commit_waiter_skip(itx->itx_private); | |
1947 | } else { | |
1948 | zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); | |
1949 | itx->itx_private = NULL; | |
1950 | } | |
1951 | ||
1952 | mutex_exit(&zilog->zl_lock); | |
1953 | ||
1954 | list_remove(&zilog->zl_itx_commit_list, itx); | |
1955 | zil_itx_destroy(itx); | |
1956 | } | |
1957 | ||
1958 | IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
1959 | } | |
1960 | ||
1961 | static void | |
1962 | zil_commit_writer_stall(zilog_t *zilog) | |
1963 | { | |
1964 | /* | |
1965 | * When zio_alloc_zil() fails to allocate the next lwb block on | |
1966 | * disk, we must call txg_wait_synced() to ensure all of the | |
1967 | * lwbs in the zilog's zl_lwb_list are synced and then freed (in | |
1968 | * zil_sync()), such that any subsequent ZIL writer (i.e. a call | |
1969 | * to zil_process_commit_list()) will have to call zil_create(), | |
1970 | * and start a new ZIL chain. | |
1971 | * | |
1972 | * Since zil_alloc_zil() failed, the lwb that was previously | |
1973 | * issued does not have a pointer to the "next" lwb on disk. | |
1974 | * Thus, if another ZIL writer thread was to allocate the "next" | |
1975 | * on-disk lwb, that block could be leaked in the event of a | |
1976 | * crash (because the previous lwb on-disk would not point to | |
1977 | * it). | |
1978 | * | |
1b2b0aca | 1979 | * We must hold the zilog's zl_issuer_lock while we do this, to |
1ce23dca PS |
1980 | * ensure no new threads enter zil_process_commit_list() until |
1981 | * all lwb's in the zl_lwb_list have been synced and freed | |
1982 | * (which is achieved via the txg_wait_synced() call). | |
1983 | */ | |
1b2b0aca | 1984 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1985 | txg_wait_synced(zilog->zl_dmu_pool, 0); |
1986 | ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); | |
1987 | } | |
1988 | ||
1989 | /* | |
1990 | * This function will traverse the commit list, creating new lwbs as | |
1991 | * needed, and committing the itxs from the commit list to these newly | |
1992 | * created lwbs. Additionally, as a new lwb is created, the previous | |
1993 | * lwb will be issued to the zio layer to be written to disk. | |
1994 | */ | |
1995 | static void | |
1996 | zil_process_commit_list(zilog_t *zilog) | |
1997 | { | |
1998 | spa_t *spa = zilog->zl_spa; | |
1999 | list_t nolwb_itxs; | |
2000 | list_t nolwb_waiters; | |
2001 | lwb_t *lwb; | |
2002 | itx_t *itx; | |
2003 | ||
1b2b0aca | 2004 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 BB |
2005 | |
2006 | /* | |
2007 | * Return if there's nothing to commit before we dirty the fs by | |
2008 | * calling zil_create(). | |
2009 | */ | |
1ce23dca | 2010 | if (list_head(&zilog->zl_itx_commit_list) == NULL) |
572e2857 | 2011 | return; |
34dc7c2f | 2012 | |
1ce23dca PS |
2013 | list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); |
2014 | list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), | |
2015 | offsetof(zil_commit_waiter_t, zcw_node)); | |
2016 | ||
2017 | lwb = list_tail(&zilog->zl_lwb_list); | |
2018 | if (lwb == NULL) { | |
2019 | lwb = zil_create(zilog); | |
34dc7c2f | 2020 | } else { |
1ce23dca PS |
2021 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); |
2022 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); | |
34dc7c2f BB |
2023 | } |
2024 | ||
1ce23dca PS |
2025 | while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { |
2026 | lr_t *lrc = &itx->itx_lr; | |
2027 | uint64_t txg = lrc->lrc_txg; | |
2028 | ||
55922e73 | 2029 | ASSERT3U(txg, !=, 0); |
34dc7c2f | 2030 | |
1ce23dca PS |
2031 | if (lrc->lrc_txtype == TX_COMMIT) { |
2032 | DTRACE_PROBE2(zil__process__commit__itx, | |
2033 | zilog_t *, zilog, itx_t *, itx); | |
2034 | } else { | |
2035 | DTRACE_PROBE2(zil__process__normal__itx, | |
2036 | zilog_t *, zilog, itx_t *, itx); | |
2037 | } | |
2038 | ||
2039 | list_remove(&zilog->zl_itx_commit_list, itx); | |
2040 | ||
1ce23dca PS |
2041 | boolean_t synced = txg <= spa_last_synced_txg(spa); |
2042 | boolean_t frozen = txg > spa_freeze_txg(spa); | |
2043 | ||
2fe61a7e PS |
2044 | /* |
2045 | * If the txg of this itx has already been synced out, then | |
2046 | * we don't need to commit this itx to an lwb. This is | |
2047 | * because the data of this itx will have already been | |
2048 | * written to the main pool. This is inherently racy, and | |
2049 | * it's still ok to commit an itx whose txg has already | |
2050 | * been synced; this will result in a write that's | |
2051 | * unnecessary, but will do no harm. | |
2052 | * | |
2053 | * With that said, we always want to commit TX_COMMIT itxs | |
2054 | * to an lwb, regardless of whether or not that itx's txg | |
2055 | * has been synced out. We do this to ensure any OPENED lwb | |
2056 | * will always have at least one zil_commit_waiter_t linked | |
2057 | * to the lwb. | |
2058 | * | |
2059 | * As a counter-example, if we skipped TX_COMMIT itx's | |
2060 | * whose txg had already been synced, the following | |
2061 | * situation could occur if we happened to be racing with | |
2062 | * spa_sync: | |
2063 | * | |
2064 | * 1. We commit a non-TX_COMMIT itx to an lwb, where the | |
2065 | * itx's txg is 10 and the last synced txg is 9. | |
2066 | * 2. spa_sync finishes syncing out txg 10. | |
2067 | * 3. We move to the next itx in the list, it's a TX_COMMIT | |
2068 | * whose txg is 10, so we skip it rather than committing | |
2069 | * it to the lwb used in (1). | |
2070 | * | |
2071 | * If the itx that is skipped in (3) is the last TX_COMMIT | |
2072 | * itx in the commit list, than it's possible for the lwb | |
2073 | * used in (1) to remain in the OPENED state indefinitely. | |
2074 | * | |
2075 | * To prevent the above scenario from occurring, ensuring | |
2076 | * that once an lwb is OPENED it will transition to ISSUED | |
2077 | * and eventually DONE, we always commit TX_COMMIT itx's to | |
2078 | * an lwb here, even if that itx's txg has already been | |
2079 | * synced. | |
2080 | * | |
2081 | * Finally, if the pool is frozen, we _always_ commit the | |
2082 | * itx. The point of freezing the pool is to prevent data | |
2083 | * from being written to the main pool via spa_sync, and | |
2084 | * instead rely solely on the ZIL to persistently store the | |
2085 | * data; i.e. when the pool is frozen, the last synced txg | |
2086 | * value can't be trusted. | |
2087 | */ | |
2088 | if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { | |
1ce23dca PS |
2089 | if (lwb != NULL) { |
2090 | lwb = zil_lwb_commit(zilog, itx, lwb); | |
2091 | ||
2092 | if (lwb == NULL) | |
2093 | list_insert_tail(&nolwb_itxs, itx); | |
2094 | else | |
2095 | list_insert_tail(&lwb->lwb_itxs, itx); | |
2096 | } else { | |
2097 | if (lrc->lrc_txtype == TX_COMMIT) { | |
2098 | zil_commit_waiter_link_nolwb( | |
2099 | itx->itx_private, &nolwb_waiters); | |
2100 | } | |
2101 | ||
2102 | list_insert_tail(&nolwb_itxs, itx); | |
2103 | } | |
2104 | } else { | |
2fe61a7e | 2105 | ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); |
1ce23dca PS |
2106 | zil_itx_destroy(itx); |
2107 | } | |
34dc7c2f | 2108 | } |
34dc7c2f | 2109 | |
1ce23dca PS |
2110 | if (lwb == NULL) { |
2111 | /* | |
2112 | * This indicates zio_alloc_zil() failed to allocate the | |
2113 | * "next" lwb on-disk. When this happens, we must stall | |
2114 | * the ZIL write pipeline; see the comment within | |
2115 | * zil_commit_writer_stall() for more details. | |
2116 | */ | |
2117 | zil_commit_writer_stall(zilog); | |
34dc7c2f | 2118 | |
1ce23dca PS |
2119 | /* |
2120 | * Additionally, we have to signal and mark the "nolwb" | |
2121 | * waiters as "done" here, since without an lwb, we | |
2122 | * can't do this via zil_lwb_flush_vdevs_done() like | |
2123 | * normal. | |
2124 | */ | |
2125 | zil_commit_waiter_t *zcw; | |
2126 | while ((zcw = list_head(&nolwb_waiters)) != NULL) { | |
2127 | zil_commit_waiter_skip(zcw); | |
2128 | list_remove(&nolwb_waiters, zcw); | |
2129 | } | |
2130 | ||
2131 | /* | |
2132 | * And finally, we have to destroy the itx's that | |
2133 | * couldn't be committed to an lwb; this will also call | |
2134 | * the itx's callback if one exists for the itx. | |
2135 | */ | |
2136 | while ((itx = list_head(&nolwb_itxs)) != NULL) { | |
2137 | list_remove(&nolwb_itxs, itx); | |
2138 | zil_itx_destroy(itx); | |
2139 | } | |
2140 | } else { | |
2141 | ASSERT(list_is_empty(&nolwb_waiters)); | |
2142 | ASSERT3P(lwb, !=, NULL); | |
2143 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); | |
2144 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE); | |
2145 | ||
2146 | /* | |
2147 | * At this point, the ZIL block pointed at by the "lwb" | |
2148 | * variable is in one of the following states: "closed" | |
2149 | * or "open". | |
2150 | * | |
2fe61a7e PS |
2151 | * If it's "closed", then no itxs have been committed to |
2152 | * it, so there's no point in issuing its zio (i.e. it's | |
2153 | * "empty"). | |
1ce23dca | 2154 | * |
2fe61a7e PS |
2155 | * If it's "open", then it contains one or more itxs that |
2156 | * eventually need to be committed to stable storage. In | |
2157 | * this case we intentionally do not issue the lwb's zio | |
2158 | * to disk yet, and instead rely on one of the following | |
2159 | * two mechanisms for issuing the zio: | |
1ce23dca | 2160 | * |
2fe61a7e | 2161 | * 1. Ideally, there will be more ZIL activity occurring |
1ce23dca | 2162 | * on the system, such that this function will be |
2fe61a7e | 2163 | * immediately called again (not necessarily by the same |
1ce23dca PS |
2164 | * thread) and this lwb's zio will be issued via |
2165 | * zil_lwb_commit(). This way, the lwb is guaranteed to | |
2166 | * be "full" when it is issued to disk, and we'll make | |
2167 | * use of the lwb's size the best we can. | |
2168 | * | |
2fe61a7e | 2169 | * 2. If there isn't sufficient ZIL activity occurring on |
1ce23dca PS |
2170 | * the system, such that this lwb's zio isn't issued via |
2171 | * zil_lwb_commit(), zil_commit_waiter() will issue the | |
2172 | * lwb's zio. If this occurs, the lwb is not guaranteed | |
2173 | * to be "full" by the time its zio is issued, and means | |
2174 | * the size of the lwb was "too large" given the amount | |
2fe61a7e | 2175 | * of ZIL activity occurring on the system at that time. |
1ce23dca PS |
2176 | * |
2177 | * We do this for a couple of reasons: | |
2178 | * | |
2179 | * 1. To try and reduce the number of IOPs needed to | |
2180 | * write the same number of itxs. If an lwb has space | |
2fe61a7e | 2181 | * available in its buffer for more itxs, and more itxs |
1ce23dca PS |
2182 | * will be committed relatively soon (relative to the |
2183 | * latency of performing a write), then it's beneficial | |
2184 | * to wait for these "next" itxs. This way, more itxs | |
2185 | * can be committed to stable storage with fewer writes. | |
2186 | * | |
2187 | * 2. To try and use the largest lwb block size that the | |
2188 | * incoming rate of itxs can support. Again, this is to | |
2189 | * try and pack as many itxs into as few lwbs as | |
2190 | * possible, without significantly impacting the latency | |
2191 | * of each individual itx. | |
2192 | */ | |
2193 | } | |
2194 | } | |
2195 | ||
2196 | /* | |
2197 | * This function is responsible for ensuring the passed in commit waiter | |
2198 | * (and associated commit itx) is committed to an lwb. If the waiter is | |
2199 | * not already committed to an lwb, all itxs in the zilog's queue of | |
2200 | * itxs will be processed. The assumption is the passed in waiter's | |
2201 | * commit itx will found in the queue just like the other non-commit | |
2202 | * itxs, such that when the entire queue is processed, the waiter will | |
2fe61a7e | 2203 | * have been committed to an lwb. |
1ce23dca PS |
2204 | * |
2205 | * The lwb associated with the passed in waiter is not guaranteed to | |
2206 | * have been issued by the time this function completes. If the lwb is | |
2207 | * not issued, we rely on future calls to zil_commit_writer() to issue | |
2208 | * the lwb, or the timeout mechanism found in zil_commit_waiter(). | |
2209 | */ | |
2210 | static void | |
2211 | zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2212 | { | |
2213 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); | |
2214 | ASSERT(spa_writeable(zilog->zl_spa)); | |
1ce23dca | 2215 | |
1b2b0aca | 2216 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca PS |
2217 | |
2218 | if (zcw->zcw_lwb != NULL || zcw->zcw_done) { | |
2219 | /* | |
2220 | * It's possible that, while we were waiting to acquire | |
1b2b0aca | 2221 | * the "zl_issuer_lock", another thread committed this |
1ce23dca PS |
2222 | * waiter to an lwb. If that occurs, we bail out early, |
2223 | * without processing any of the zilog's queue of itxs. | |
2224 | * | |
2225 | * On certain workloads and system configurations, the | |
1b2b0aca | 2226 | * "zl_issuer_lock" can become highly contended. In an |
1ce23dca PS |
2227 | * attempt to reduce this contention, we immediately drop |
2228 | * the lock if the waiter has already been processed. | |
2229 | * | |
2230 | * We've measured this optimization to reduce CPU spent | |
2231 | * contending on this lock by up to 5%, using a system | |
2232 | * with 32 CPUs, low latency storage (~50 usec writes), | |
2233 | * and 1024 threads performing sync writes. | |
2234 | */ | |
2235 | goto out; | |
2236 | } | |
2237 | ||
2238 | ZIL_STAT_BUMP(zil_commit_writer_count); | |
2239 | ||
2240 | zil_get_commit_list(zilog); | |
2241 | zil_prune_commit_list(zilog); | |
2242 | zil_process_commit_list(zilog); | |
2243 | ||
2244 | out: | |
1b2b0aca | 2245 | mutex_exit(&zilog->zl_issuer_lock); |
1ce23dca PS |
2246 | } |
2247 | ||
2248 | static void | |
2249 | zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2250 | { | |
1b2b0aca | 2251 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
2252 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); |
2253 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
2254 | ||
2255 | lwb_t *lwb = zcw->zcw_lwb; | |
2256 | ASSERT3P(lwb, !=, NULL); | |
2257 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); | |
34dc7c2f BB |
2258 | |
2259 | /* | |
1ce23dca PS |
2260 | * If the lwb has already been issued by another thread, we can |
2261 | * immediately return since there's no work to be done (the | |
2262 | * point of this function is to issue the lwb). Additionally, we | |
1b2b0aca | 2263 | * do this prior to acquiring the zl_issuer_lock, to avoid |
1ce23dca | 2264 | * acquiring it when it's not necessary to do so. |
34dc7c2f | 2265 | */ |
1ce23dca PS |
2266 | if (lwb->lwb_state == LWB_STATE_ISSUED || |
2267 | lwb->lwb_state == LWB_STATE_DONE) | |
2268 | return; | |
34dc7c2f | 2269 | |
1ce23dca PS |
2270 | /* |
2271 | * In order to call zil_lwb_write_issue() we must hold the | |
1b2b0aca | 2272 | * zilog's "zl_issuer_lock". We can't simply acquire that lock, |
1ce23dca | 2273 | * since we're already holding the commit waiter's "zcw_lock", |
2fe61a7e | 2274 | * and those two locks are acquired in the opposite order |
1ce23dca PS |
2275 | * elsewhere. |
2276 | */ | |
2277 | mutex_exit(&zcw->zcw_lock); | |
1b2b0aca | 2278 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca | 2279 | mutex_enter(&zcw->zcw_lock); |
34dc7c2f | 2280 | |
1ce23dca PS |
2281 | /* |
2282 | * Since we just dropped and re-acquired the commit waiter's | |
2283 | * lock, we have to re-check to see if the waiter was marked | |
2284 | * "done" during that process. If the waiter was marked "done", | |
2285 | * the "lwb" pointer is no longer valid (it can be free'd after | |
2286 | * the waiter is marked "done"), so without this check we could | |
2287 | * wind up with a use-after-free error below. | |
2288 | */ | |
2289 | if (zcw->zcw_done) | |
2290 | goto out; | |
119a394a | 2291 | |
1ce23dca PS |
2292 | ASSERT3P(lwb, ==, zcw->zcw_lwb); |
2293 | ||
2294 | /* | |
2fe61a7e PS |
2295 | * We've already checked this above, but since we hadn't acquired |
2296 | * the zilog's zl_issuer_lock, we have to perform this check a | |
2297 | * second time while holding the lock. | |
2298 | * | |
2299 | * We don't need to hold the zl_lock since the lwb cannot transition | |
2300 | * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb | |
2301 | * _can_ transition from ISSUED to DONE, but it's OK to race with | |
2302 | * that transition since we treat the lwb the same, whether it's in | |
2303 | * the ISSUED or DONE states. | |
2304 | * | |
2305 | * The important thing, is we treat the lwb differently depending on | |
2306 | * if it's ISSUED or OPENED, and block any other threads that might | |
2307 | * attempt to issue this lwb. For that reason we hold the | |
2308 | * zl_issuer_lock when checking the lwb_state; we must not call | |
1ce23dca | 2309 | * zil_lwb_write_issue() if the lwb had already been issued. |
2fe61a7e PS |
2310 | * |
2311 | * See the comment above the lwb_state_t structure definition for | |
2312 | * more details on the lwb states, and locking requirements. | |
1ce23dca PS |
2313 | */ |
2314 | if (lwb->lwb_state == LWB_STATE_ISSUED || | |
2315 | lwb->lwb_state == LWB_STATE_DONE) | |
2316 | goto out; | |
2317 | ||
2318 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
2319 | ||
2320 | /* | |
2321 | * As described in the comments above zil_commit_waiter() and | |
2322 | * zil_process_commit_list(), we need to issue this lwb's zio | |
2323 | * since we've reached the commit waiter's timeout and it still | |
2324 | * hasn't been issued. | |
2325 | */ | |
2326 | lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb); | |
2327 | ||
2328 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); | |
2329 | ||
2330 | /* | |
2331 | * Since the lwb's zio hadn't been issued by the time this thread | |
2332 | * reached its timeout, we reset the zilog's "zl_cur_used" field | |
2333 | * to influence the zil block size selection algorithm. | |
2334 | * | |
2335 | * By having to issue the lwb's zio here, it means the size of the | |
2336 | * lwb was too large, given the incoming throughput of itxs. By | |
2337 | * setting "zl_cur_used" to zero, we communicate this fact to the | |
2fe61a7e | 2338 | * block size selection algorithm, so it can take this information |
1ce23dca PS |
2339 | * into account, and potentially select a smaller size for the |
2340 | * next lwb block that is allocated. | |
2341 | */ | |
2342 | zilog->zl_cur_used = 0; | |
2343 | ||
2344 | if (nlwb == NULL) { | |
2345 | /* | |
2346 | * When zil_lwb_write_issue() returns NULL, this | |
2347 | * indicates zio_alloc_zil() failed to allocate the | |
2348 | * "next" lwb on-disk. When this occurs, the ZIL write | |
2349 | * pipeline must be stalled; see the comment within the | |
2350 | * zil_commit_writer_stall() function for more details. | |
2351 | * | |
2352 | * We must drop the commit waiter's lock prior to | |
2353 | * calling zil_commit_writer_stall() or else we can wind | |
2354 | * up with the following deadlock: | |
2355 | * | |
2356 | * - This thread is waiting for the txg to sync while | |
2357 | * holding the waiter's lock; txg_wait_synced() is | |
2358 | * used within txg_commit_writer_stall(). | |
2359 | * | |
2360 | * - The txg can't sync because it is waiting for this | |
2361 | * lwb's zio callback to call dmu_tx_commit(). | |
2362 | * | |
2363 | * - The lwb's zio callback can't call dmu_tx_commit() | |
2364 | * because it's blocked trying to acquire the waiter's | |
2365 | * lock, which occurs prior to calling dmu_tx_commit() | |
2366 | */ | |
2367 | mutex_exit(&zcw->zcw_lock); | |
2368 | zil_commit_writer_stall(zilog); | |
2369 | mutex_enter(&zcw->zcw_lock); | |
119a394a ED |
2370 | } |
2371 | ||
1ce23dca | 2372 | out: |
1b2b0aca | 2373 | mutex_exit(&zilog->zl_issuer_lock); |
1ce23dca PS |
2374 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); |
2375 | } | |
2376 | ||
2377 | /* | |
2378 | * This function is responsible for performing the following two tasks: | |
2379 | * | |
2380 | * 1. its primary responsibility is to block until the given "commit | |
2381 | * waiter" is considered "done". | |
2382 | * | |
2383 | * 2. its secondary responsibility is to issue the zio for the lwb that | |
2384 | * the given "commit waiter" is waiting on, if this function has | |
2385 | * waited "long enough" and the lwb is still in the "open" state. | |
2386 | * | |
2387 | * Given a sufficient amount of itxs being generated and written using | |
2388 | * the ZIL, the lwb's zio will be issued via the zil_lwb_commit() | |
2389 | * function. If this does not occur, this secondary responsibility will | |
2390 | * ensure the lwb is issued even if there is not other synchronous | |
2391 | * activity on the system. | |
2392 | * | |
2393 | * For more details, see zil_process_commit_list(); more specifically, | |
2394 | * the comment at the bottom of that function. | |
2395 | */ | |
2396 | static void | |
2397 | zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2398 | { | |
2399 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); | |
1b2b0aca | 2400 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 2401 | ASSERT(spa_writeable(zilog->zl_spa)); |
1ce23dca PS |
2402 | |
2403 | mutex_enter(&zcw->zcw_lock); | |
428870ff BB |
2404 | |
2405 | /* | |
1ce23dca PS |
2406 | * The timeout is scaled based on the lwb latency to avoid |
2407 | * significantly impacting the latency of each individual itx. | |
2408 | * For more details, see the comment at the bottom of the | |
2409 | * zil_process_commit_list() function. | |
428870ff | 2410 | */ |
1ce23dca PS |
2411 | int pct = MAX(zfs_commit_timeout_pct, 1); |
2412 | hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; | |
2413 | hrtime_t wakeup = gethrtime() + sleep; | |
2414 | boolean_t timedout = B_FALSE; | |
2415 | ||
2416 | while (!zcw->zcw_done) { | |
2417 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); | |
2418 | ||
2419 | lwb_t *lwb = zcw->zcw_lwb; | |
2420 | ||
2421 | /* | |
2422 | * Usually, the waiter will have a non-NULL lwb field here, | |
2423 | * but it's possible for it to be NULL as a result of | |
2424 | * zil_commit() racing with spa_sync(). | |
2425 | * | |
2426 | * When zil_clean() is called, it's possible for the itxg | |
2427 | * list (which may be cleaned via a taskq) to contain | |
2428 | * commit itxs. When this occurs, the commit waiters linked | |
2429 | * off of these commit itxs will not be committed to an | |
2430 | * lwb. Additionally, these commit waiters will not be | |
2431 | * marked done until zil_commit_waiter_skip() is called via | |
2432 | * zil_itxg_clean(). | |
2433 | * | |
2434 | * Thus, it's possible for this commit waiter (i.e. the | |
2435 | * "zcw" variable) to be found in this "in between" state; | |
2436 | * where it's "zcw_lwb" field is NULL, and it hasn't yet | |
2437 | * been skipped, so it's "zcw_done" field is still B_FALSE. | |
2438 | */ | |
2439 | IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); | |
2440 | ||
2441 | if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { | |
2442 | ASSERT3B(timedout, ==, B_FALSE); | |
2443 | ||
2444 | /* | |
2445 | * If the lwb hasn't been issued yet, then we | |
2446 | * need to wait with a timeout, in case this | |
2447 | * function needs to issue the lwb after the | |
2448 | * timeout is reached; responsibility (2) from | |
2449 | * the comment above this function. | |
2450 | */ | |
2451 | clock_t timeleft = cv_timedwait_hires(&zcw->zcw_cv, | |
2452 | &zcw->zcw_lock, wakeup, USEC2NSEC(1), | |
2453 | CALLOUT_FLAG_ABSOLUTE); | |
2454 | ||
2455 | if (timeleft >= 0 || zcw->zcw_done) | |
2456 | continue; | |
2457 | ||
2458 | timedout = B_TRUE; | |
2459 | zil_commit_waiter_timeout(zilog, zcw); | |
2460 | ||
2461 | if (!zcw->zcw_done) { | |
2462 | /* | |
2463 | * If the commit waiter has already been | |
2464 | * marked "done", it's possible for the | |
2465 | * waiter's lwb structure to have already | |
2466 | * been freed. Thus, we can only reliably | |
2467 | * make these assertions if the waiter | |
2468 | * isn't done. | |
2469 | */ | |
2470 | ASSERT3P(lwb, ==, zcw->zcw_lwb); | |
2471 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); | |
2472 | } | |
2473 | } else { | |
2474 | /* | |
2475 | * If the lwb isn't open, then it must have already | |
2476 | * been issued. In that case, there's no need to | |
2477 | * use a timeout when waiting for the lwb to | |
2478 | * complete. | |
2479 | * | |
2480 | * Additionally, if the lwb is NULL, the waiter | |
2fe61a7e | 2481 | * will soon be signaled and marked done via |
1ce23dca PS |
2482 | * zil_clean() and zil_itxg_clean(), so no timeout |
2483 | * is required. | |
2484 | */ | |
2485 | ||
2486 | IMPLY(lwb != NULL, | |
2487 | lwb->lwb_state == LWB_STATE_ISSUED || | |
2488 | lwb->lwb_state == LWB_STATE_DONE); | |
2489 | cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); | |
2490 | } | |
2491 | } | |
2492 | ||
2493 | mutex_exit(&zcw->zcw_lock); | |
2494 | } | |
2495 | ||
2496 | static zil_commit_waiter_t * | |
2497 | zil_alloc_commit_waiter(void) | |
2498 | { | |
2499 | zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); | |
2500 | ||
2501 | cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); | |
2502 | mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); | |
2503 | list_link_init(&zcw->zcw_node); | |
2504 | zcw->zcw_lwb = NULL; | |
2505 | zcw->zcw_done = B_FALSE; | |
2506 | zcw->zcw_zio_error = 0; | |
2507 | ||
2508 | return (zcw); | |
2509 | } | |
2510 | ||
2511 | static void | |
2512 | zil_free_commit_waiter(zil_commit_waiter_t *zcw) | |
2513 | { | |
2514 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
2515 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
2516 | ASSERT3B(zcw->zcw_done, ==, B_TRUE); | |
2517 | mutex_destroy(&zcw->zcw_lock); | |
2518 | cv_destroy(&zcw->zcw_cv); | |
2519 | kmem_cache_free(zil_zcw_cache, zcw); | |
34dc7c2f BB |
2520 | } |
2521 | ||
2522 | /* | |
1ce23dca PS |
2523 | * This function is used to create a TX_COMMIT itx and assign it. This |
2524 | * way, it will be linked into the ZIL's list of synchronous itxs, and | |
2525 | * then later committed to an lwb (or skipped) when | |
2526 | * zil_process_commit_list() is called. | |
2527 | */ | |
2528 | static void | |
2529 | zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2530 | { | |
2531 | dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); | |
2532 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); | |
2533 | ||
2534 | itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); | |
2535 | itx->itx_sync = B_TRUE; | |
2536 | itx->itx_private = zcw; | |
2537 | ||
2538 | zil_itx_assign(zilog, itx, tx); | |
2539 | ||
2540 | dmu_tx_commit(tx); | |
2541 | } | |
2542 | ||
2543 | /* | |
2544 | * Commit ZFS Intent Log transactions (itxs) to stable storage. | |
2545 | * | |
2546 | * When writing ZIL transactions to the on-disk representation of the | |
2547 | * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple | |
2548 | * itxs can be committed to a single lwb. Once a lwb is written and | |
2549 | * committed to stable storage (i.e. the lwb is written, and vdevs have | |
2550 | * been flushed), each itx that was committed to that lwb is also | |
2551 | * considered to be committed to stable storage. | |
2552 | * | |
2553 | * When an itx is committed to an lwb, the log record (lr_t) contained | |
2554 | * by the itx is copied into the lwb's zio buffer, and once this buffer | |
2555 | * is written to disk, it becomes an on-disk ZIL block. | |
2556 | * | |
2557 | * As itxs are generated, they're inserted into the ZIL's queue of | |
2558 | * uncommitted itxs. The semantics of zil_commit() are such that it will | |
2559 | * block until all itxs that were in the queue when it was called, are | |
2560 | * committed to stable storage. | |
2561 | * | |
2562 | * If "foid" is zero, this means all "synchronous" and "asynchronous" | |
2563 | * itxs, for all objects in the dataset, will be committed to stable | |
2564 | * storage prior to zil_commit() returning. If "foid" is non-zero, all | |
2565 | * "synchronous" itxs for all objects, but only "asynchronous" itxs | |
2566 | * that correspond to the foid passed in, will be committed to stable | |
2567 | * storage prior to zil_commit() returning. | |
2568 | * | |
2569 | * Generally speaking, when zil_commit() is called, the consumer doesn't | |
2570 | * actually care about _all_ of the uncommitted itxs. Instead, they're | |
2571 | * simply trying to waiting for a specific itx to be committed to disk, | |
2572 | * but the interface(s) for interacting with the ZIL don't allow such | |
2573 | * fine-grained communication. A better interface would allow a consumer | |
2574 | * to create and assign an itx, and then pass a reference to this itx to | |
2575 | * zil_commit(); such that zil_commit() would return as soon as that | |
2576 | * specific itx was committed to disk (instead of waiting for _all_ | |
2577 | * itxs to be committed). | |
2578 | * | |
2579 | * When a thread calls zil_commit() a special "commit itx" will be | |
2580 | * generated, along with a corresponding "waiter" for this commit itx. | |
2581 | * zil_commit() will wait on this waiter's CV, such that when the waiter | |
2fe61a7e | 2582 | * is marked done, and signaled, zil_commit() will return. |
1ce23dca PS |
2583 | * |
2584 | * This commit itx is inserted into the queue of uncommitted itxs. This | |
2585 | * provides an easy mechanism for determining which itxs were in the | |
2586 | * queue prior to zil_commit() having been called, and which itxs were | |
2587 | * added after zil_commit() was called. | |
2588 | * | |
2589 | * The commit it is special; it doesn't have any on-disk representation. | |
2590 | * When a commit itx is "committed" to an lwb, the waiter associated | |
2591 | * with it is linked onto the lwb's list of waiters. Then, when that lwb | |
2fe61a7e | 2592 | * completes, each waiter on the lwb's list is marked done and signaled |
1ce23dca PS |
2593 | * -- allowing the thread waiting on the waiter to return from zil_commit(). |
2594 | * | |
2595 | * It's important to point out a few critical factors that allow us | |
2596 | * to make use of the commit itxs, commit waiters, per-lwb lists of | |
2597 | * commit waiters, and zio completion callbacks like we're doing: | |
572e2857 | 2598 | * |
1ce23dca | 2599 | * 1. The list of waiters for each lwb is traversed, and each commit |
2fe61a7e | 2600 | * waiter is marked "done" and signaled, in the zio completion |
1ce23dca | 2601 | * callback of the lwb's zio[*]. |
572e2857 | 2602 | * |
2fe61a7e | 2603 | * * Actually, the waiters are signaled in the zio completion |
1ce23dca PS |
2604 | * callback of the root zio for the DKIOCFLUSHWRITECACHE commands |
2605 | * that are sent to the vdevs upon completion of the lwb zio. | |
572e2857 | 2606 | * |
1ce23dca PS |
2607 | * 2. When the itxs are inserted into the ZIL's queue of uncommitted |
2608 | * itxs, the order in which they are inserted is preserved[*]; as | |
2609 | * itxs are added to the queue, they are added to the tail of | |
2610 | * in-memory linked lists. | |
572e2857 | 2611 | * |
1ce23dca PS |
2612 | * When committing the itxs to lwbs (to be written to disk), they |
2613 | * are committed in the same order in which the itxs were added to | |
2614 | * the uncommitted queue's linked list(s); i.e. the linked list of | |
2615 | * itxs to commit is traversed from head to tail, and each itx is | |
2616 | * committed to an lwb in that order. | |
2617 | * | |
2618 | * * To clarify: | |
2619 | * | |
2620 | * - the order of "sync" itxs is preserved w.r.t. other | |
2621 | * "sync" itxs, regardless of the corresponding objects. | |
2622 | * - the order of "async" itxs is preserved w.r.t. other | |
2623 | * "async" itxs corresponding to the same object. | |
2624 | * - the order of "async" itxs is *not* preserved w.r.t. other | |
2625 | * "async" itxs corresponding to different objects. | |
2626 | * - the order of "sync" itxs w.r.t. "async" itxs (or vice | |
2627 | * versa) is *not* preserved, even for itxs that correspond | |
2628 | * to the same object. | |
2629 | * | |
2630 | * For more details, see: zil_itx_assign(), zil_async_to_sync(), | |
2631 | * zil_get_commit_list(), and zil_process_commit_list(). | |
2632 | * | |
2633 | * 3. The lwbs represent a linked list of blocks on disk. Thus, any | |
2634 | * lwb cannot be considered committed to stable storage, until its | |
2635 | * "previous" lwb is also committed to stable storage. This fact, | |
2636 | * coupled with the fact described above, means that itxs are | |
2637 | * committed in (roughly) the order in which they were generated. | |
2638 | * This is essential because itxs are dependent on prior itxs. | |
2639 | * Thus, we *must not* deem an itx as being committed to stable | |
2640 | * storage, until *all* prior itxs have also been committed to | |
2641 | * stable storage. | |
2642 | * | |
2643 | * To enforce this ordering of lwb zio's, while still leveraging as | |
2644 | * much of the underlying storage performance as possible, we rely | |
2645 | * on two fundamental concepts: | |
2646 | * | |
2647 | * 1. The creation and issuance of lwb zio's is protected by | |
1b2b0aca | 2648 | * the zilog's "zl_issuer_lock", which ensures only a single |
1ce23dca PS |
2649 | * thread is creating and/or issuing lwb's at a time |
2650 | * 2. The "previous" lwb is a child of the "current" lwb | |
2fe61a7e | 2651 | * (leveraging the zio parent-child dependency graph) |
1ce23dca PS |
2652 | * |
2653 | * By relying on this parent-child zio relationship, we can have | |
2654 | * many lwb zio's concurrently issued to the underlying storage, | |
2655 | * but the order in which they complete will be the same order in | |
2656 | * which they were created. | |
34dc7c2f BB |
2657 | */ |
2658 | void | |
572e2857 | 2659 | zil_commit(zilog_t *zilog, uint64_t foid) |
34dc7c2f | 2660 | { |
1ce23dca PS |
2661 | /* |
2662 | * We should never attempt to call zil_commit on a snapshot for | |
2663 | * a couple of reasons: | |
2664 | * | |
2665 | * 1. A snapshot may never be modified, thus it cannot have any | |
2666 | * in-flight itxs that would have modified the dataset. | |
2667 | * | |
2668 | * 2. By design, when zil_commit() is called, a commit itx will | |
2669 | * be assigned to this zilog; as a result, the zilog will be | |
2670 | * dirtied. We must not dirty the zilog of a snapshot; there's | |
2671 | * checks in the code that enforce this invariant, and will | |
2672 | * cause a panic if it's not upheld. | |
2673 | */ | |
2674 | ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); | |
34dc7c2f | 2675 | |
572e2857 BB |
2676 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
2677 | return; | |
34dc7c2f | 2678 | |
1ce23dca PS |
2679 | if (!spa_writeable(zilog->zl_spa)) { |
2680 | /* | |
2681 | * If the SPA is not writable, there should never be any | |
2682 | * pending itxs waiting to be committed to disk. If that | |
2683 | * weren't true, we'd skip writing those itxs out, and | |
2fe61a7e | 2684 | * would break the semantics of zil_commit(); thus, we're |
1ce23dca PS |
2685 | * verifying that truth before we return to the caller. |
2686 | */ | |
2687 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
2688 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
2689 | for (int i = 0; i < TXG_SIZE; i++) | |
2690 | ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); | |
2691 | return; | |
2692 | } | |
2693 | ||
2694 | /* | |
2695 | * If the ZIL is suspended, we don't want to dirty it by calling | |
2696 | * zil_commit_itx_assign() below, nor can we write out | |
2697 | * lwbs like would be done in zil_commit_write(). Thus, we | |
2698 | * simply rely on txg_wait_synced() to maintain the necessary | |
2699 | * semantics, and avoid calling those functions altogether. | |
2700 | */ | |
2701 | if (zilog->zl_suspend > 0) { | |
2702 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
2703 | return; | |
2704 | } | |
2705 | ||
2fe61a7e PS |
2706 | zil_commit_impl(zilog, foid); |
2707 | } | |
2708 | ||
2709 | void | |
2710 | zil_commit_impl(zilog_t *zilog, uint64_t foid) | |
2711 | { | |
b6ad9671 ED |
2712 | ZIL_STAT_BUMP(zil_commit_count); |
2713 | ||
1ce23dca PS |
2714 | /* |
2715 | * Move the "async" itxs for the specified foid to the "sync" | |
2716 | * queues, such that they will be later committed (or skipped) | |
2717 | * to an lwb when zil_process_commit_list() is called. | |
2718 | * | |
2719 | * Since these "async" itxs must be committed prior to this | |
2720 | * call to zil_commit returning, we must perform this operation | |
2721 | * before we call zil_commit_itx_assign(). | |
2722 | */ | |
572e2857 | 2723 | zil_async_to_sync(zilog, foid); |
34dc7c2f | 2724 | |
1ce23dca PS |
2725 | /* |
2726 | * We allocate a new "waiter" structure which will initially be | |
2727 | * linked to the commit itx using the itx's "itx_private" field. | |
2728 | * Since the commit itx doesn't represent any on-disk state, | |
2729 | * when it's committed to an lwb, rather than copying the its | |
2730 | * lr_t into the lwb's buffer, the commit itx's "waiter" will be | |
2731 | * added to the lwb's list of waiters. Then, when the lwb is | |
2732 | * committed to stable storage, each waiter in the lwb's list of | |
2733 | * waiters will be marked "done", and signalled. | |
2734 | * | |
2735 | * We must create the waiter and assign the commit itx prior to | |
2736 | * calling zil_commit_writer(), or else our specific commit itx | |
2737 | * is not guaranteed to be committed to an lwb prior to calling | |
2738 | * zil_commit_waiter(). | |
2739 | */ | |
2740 | zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); | |
2741 | zil_commit_itx_assign(zilog, zcw); | |
428870ff | 2742 | |
1ce23dca PS |
2743 | zil_commit_writer(zilog, zcw); |
2744 | zil_commit_waiter(zilog, zcw); | |
428870ff | 2745 | |
1ce23dca PS |
2746 | if (zcw->zcw_zio_error != 0) { |
2747 | /* | |
2748 | * If there was an error writing out the ZIL blocks that | |
2749 | * this thread is waiting on, then we fallback to | |
2750 | * relying on spa_sync() to write out the data this | |
2751 | * thread is waiting on. Obviously this has performance | |
2752 | * implications, but the expectation is for this to be | |
2753 | * an exceptional case, and shouldn't occur often. | |
2754 | */ | |
2755 | DTRACE_PROBE2(zil__commit__io__error, | |
2756 | zilog_t *, zilog, zil_commit_waiter_t *, zcw); | |
2757 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
2758 | } | |
8c0712fd | 2759 | |
1ce23dca | 2760 | zil_free_commit_waiter(zcw); |
428870ff BB |
2761 | } |
2762 | ||
34dc7c2f BB |
2763 | /* |
2764 | * Called in syncing context to free committed log blocks and update log header. | |
2765 | */ | |
2766 | void | |
2767 | zil_sync(zilog_t *zilog, dmu_tx_t *tx) | |
2768 | { | |
2769 | zil_header_t *zh = zil_header_in_syncing_context(zilog); | |
2770 | uint64_t txg = dmu_tx_get_txg(tx); | |
2771 | spa_t *spa = zilog->zl_spa; | |
428870ff | 2772 | uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; |
34dc7c2f BB |
2773 | lwb_t *lwb; |
2774 | ||
9babb374 BB |
2775 | /* |
2776 | * We don't zero out zl_destroy_txg, so make sure we don't try | |
2777 | * to destroy it twice. | |
2778 | */ | |
2779 | if (spa_sync_pass(spa) != 1) | |
2780 | return; | |
2781 | ||
34dc7c2f BB |
2782 | mutex_enter(&zilog->zl_lock); |
2783 | ||
2784 | ASSERT(zilog->zl_stop_sync == 0); | |
2785 | ||
428870ff BB |
2786 | if (*replayed_seq != 0) { |
2787 | ASSERT(zh->zh_replay_seq < *replayed_seq); | |
2788 | zh->zh_replay_seq = *replayed_seq; | |
2789 | *replayed_seq = 0; | |
2790 | } | |
34dc7c2f BB |
2791 | |
2792 | if (zilog->zl_destroy_txg == txg) { | |
2793 | blkptr_t blk = zh->zh_log; | |
2794 | ||
2795 | ASSERT(list_head(&zilog->zl_lwb_list) == NULL); | |
34dc7c2f BB |
2796 | |
2797 | bzero(zh, sizeof (zil_header_t)); | |
fb5f0bc8 | 2798 | bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); |
34dc7c2f BB |
2799 | |
2800 | if (zilog->zl_keep_first) { | |
2801 | /* | |
2802 | * If this block was part of log chain that couldn't | |
2803 | * be claimed because a device was missing during | |
2804 | * zil_claim(), but that device later returns, | |
2805 | * then this block could erroneously appear valid. | |
2806 | * To guard against this, assign a new GUID to the new | |
2807 | * log chain so it doesn't matter what blk points to. | |
2808 | */ | |
2809 | zil_init_log_chain(zilog, &blk); | |
2810 | zh->zh_log = blk; | |
2811 | } | |
2812 | } | |
2813 | ||
9babb374 | 2814 | while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { |
34dc7c2f BB |
2815 | zh->zh_log = lwb->lwb_blk; |
2816 | if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) | |
2817 | break; | |
2818 | list_remove(&zilog->zl_lwb_list, lwb); | |
1ce23dca PS |
2819 | zio_free(spa, txg, &lwb->lwb_blk); |
2820 | zil_free_lwb(zilog, lwb); | |
34dc7c2f BB |
2821 | |
2822 | /* | |
2823 | * If we don't have anything left in the lwb list then | |
2824 | * we've had an allocation failure and we need to zero | |
2825 | * out the zil_header blkptr so that we don't end | |
2826 | * up freeing the same block twice. | |
2827 | */ | |
2828 | if (list_head(&zilog->zl_lwb_list) == NULL) | |
2829 | BP_ZERO(&zh->zh_log); | |
2830 | } | |
920dd524 ED |
2831 | |
2832 | /* | |
2833 | * Remove fastwrite on any blocks that have been pre-allocated for | |
2834 | * the next commit. This prevents fastwrite counter pollution by | |
2835 | * unused, long-lived LWBs. | |
2836 | */ | |
2837 | for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) { | |
1ce23dca | 2838 | if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) { |
920dd524 ED |
2839 | metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); |
2840 | lwb->lwb_fastwrite = 0; | |
2841 | } | |
2842 | } | |
2843 | ||
34dc7c2f BB |
2844 | mutex_exit(&zilog->zl_lock); |
2845 | } | |
2846 | ||
1ce23dca PS |
2847 | /* ARGSUSED */ |
2848 | static int | |
2849 | zil_lwb_cons(void *vbuf, void *unused, int kmflag) | |
2850 | { | |
2851 | lwb_t *lwb = vbuf; | |
2852 | list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); | |
2853 | list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), | |
2854 | offsetof(zil_commit_waiter_t, zcw_node)); | |
2855 | avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, | |
2856 | sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); | |
2857 | mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); | |
2858 | return (0); | |
2859 | } | |
2860 | ||
2861 | /* ARGSUSED */ | |
2862 | static void | |
2863 | zil_lwb_dest(void *vbuf, void *unused) | |
2864 | { | |
2865 | lwb_t *lwb = vbuf; | |
2866 | mutex_destroy(&lwb->lwb_vdev_lock); | |
2867 | avl_destroy(&lwb->lwb_vdev_tree); | |
2868 | list_destroy(&lwb->lwb_waiters); | |
2869 | list_destroy(&lwb->lwb_itxs); | |
2870 | } | |
2871 | ||
34dc7c2f BB |
2872 | void |
2873 | zil_init(void) | |
2874 | { | |
2875 | zil_lwb_cache = kmem_cache_create("zil_lwb_cache", | |
1ce23dca PS |
2876 | sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); |
2877 | ||
2878 | zil_zcw_cache = kmem_cache_create("zil_zcw_cache", | |
2879 | sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
b6ad9671 ED |
2880 | |
2881 | zil_ksp = kstat_create("zfs", 0, "zil", "misc", | |
d1d7e268 | 2882 | KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), |
b6ad9671 ED |
2883 | KSTAT_FLAG_VIRTUAL); |
2884 | ||
2885 | if (zil_ksp != NULL) { | |
2886 | zil_ksp->ks_data = &zil_stats; | |
2887 | kstat_install(zil_ksp); | |
2888 | } | |
34dc7c2f BB |
2889 | } |
2890 | ||
2891 | void | |
2892 | zil_fini(void) | |
2893 | { | |
1ce23dca | 2894 | kmem_cache_destroy(zil_zcw_cache); |
34dc7c2f | 2895 | kmem_cache_destroy(zil_lwb_cache); |
b6ad9671 ED |
2896 | |
2897 | if (zil_ksp != NULL) { | |
2898 | kstat_delete(zil_ksp); | |
2899 | zil_ksp = NULL; | |
2900 | } | |
34dc7c2f BB |
2901 | } |
2902 | ||
428870ff BB |
2903 | void |
2904 | zil_set_sync(zilog_t *zilog, uint64_t sync) | |
2905 | { | |
2906 | zilog->zl_sync = sync; | |
2907 | } | |
2908 | ||
2909 | void | |
2910 | zil_set_logbias(zilog_t *zilog, uint64_t logbias) | |
2911 | { | |
2912 | zilog->zl_logbias = logbias; | |
2913 | } | |
2914 | ||
34dc7c2f BB |
2915 | zilog_t * |
2916 | zil_alloc(objset_t *os, zil_header_t *zh_phys) | |
2917 | { | |
2918 | zilog_t *zilog; | |
2919 | ||
79c76d5b | 2920 | zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); |
34dc7c2f BB |
2921 | |
2922 | zilog->zl_header = zh_phys; | |
2923 | zilog->zl_os = os; | |
2924 | zilog->zl_spa = dmu_objset_spa(os); | |
2925 | zilog->zl_dmu_pool = dmu_objset_pool(os); | |
2926 | zilog->zl_destroy_txg = TXG_INITIAL - 1; | |
428870ff BB |
2927 | zilog->zl_logbias = dmu_objset_logbias(os); |
2928 | zilog->zl_sync = dmu_objset_syncprop(os); | |
1ce23dca PS |
2929 | zilog->zl_dirty_max_txg = 0; |
2930 | zilog->zl_last_lwb_opened = NULL; | |
2931 | zilog->zl_last_lwb_latency = 0; | |
34dc7c2f BB |
2932 | |
2933 | mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); | |
1b2b0aca | 2934 | mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 2935 | |
1c27024e | 2936 | for (int i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
2937 | mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, |
2938 | MUTEX_DEFAULT, NULL); | |
2939 | } | |
34dc7c2f BB |
2940 | |
2941 | list_create(&zilog->zl_lwb_list, sizeof (lwb_t), | |
2942 | offsetof(lwb_t, lwb_node)); | |
2943 | ||
572e2857 BB |
2944 | list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), |
2945 | offsetof(itx_t, itx_node)); | |
2946 | ||
34dc7c2f BB |
2947 | cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); |
2948 | ||
2949 | return (zilog); | |
2950 | } | |
2951 | ||
2952 | void | |
2953 | zil_free(zilog_t *zilog) | |
2954 | { | |
d6320ddb | 2955 | int i; |
34dc7c2f BB |
2956 | |
2957 | zilog->zl_stop_sync = 1; | |
2958 | ||
13fe0198 MA |
2959 | ASSERT0(zilog->zl_suspend); |
2960 | ASSERT0(zilog->zl_suspending); | |
2961 | ||
3e31d2b0 | 2962 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
34dc7c2f BB |
2963 | list_destroy(&zilog->zl_lwb_list); |
2964 | ||
572e2857 BB |
2965 | ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); |
2966 | list_destroy(&zilog->zl_itx_commit_list); | |
2967 | ||
d6320ddb | 2968 | for (i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
2969 | /* |
2970 | * It's possible for an itx to be generated that doesn't dirty | |
2971 | * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() | |
2972 | * callback to remove the entry. We remove those here. | |
2973 | * | |
2974 | * Also free up the ziltest itxs. | |
2975 | */ | |
2976 | if (zilog->zl_itxg[i].itxg_itxs) | |
2977 | zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); | |
2978 | mutex_destroy(&zilog->zl_itxg[i].itxg_lock); | |
2979 | } | |
2980 | ||
1b2b0aca | 2981 | mutex_destroy(&zilog->zl_issuer_lock); |
34dc7c2f BB |
2982 | mutex_destroy(&zilog->zl_lock); |
2983 | ||
34dc7c2f BB |
2984 | cv_destroy(&zilog->zl_cv_suspend); |
2985 | ||
2986 | kmem_free(zilog, sizeof (zilog_t)); | |
2987 | } | |
2988 | ||
34dc7c2f BB |
2989 | /* |
2990 | * Open an intent log. | |
2991 | */ | |
2992 | zilog_t * | |
2993 | zil_open(objset_t *os, zil_get_data_t *get_data) | |
2994 | { | |
2995 | zilog_t *zilog = dmu_objset_zil(os); | |
2996 | ||
1ce23dca PS |
2997 | ASSERT3P(zilog->zl_get_data, ==, NULL); |
2998 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
3e31d2b0 ES |
2999 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
3000 | ||
34dc7c2f | 3001 | zilog->zl_get_data = get_data; |
34dc7c2f BB |
3002 | |
3003 | return (zilog); | |
3004 | } | |
3005 | ||
3006 | /* | |
3007 | * Close an intent log. | |
3008 | */ | |
3009 | void | |
3010 | zil_close(zilog_t *zilog) | |
3011 | { | |
3e31d2b0 | 3012 | lwb_t *lwb; |
1ce23dca | 3013 | uint64_t txg; |
572e2857 | 3014 | |
1ce23dca PS |
3015 | if (!dmu_objset_is_snapshot(zilog->zl_os)) { |
3016 | zil_commit(zilog, 0); | |
3017 | } else { | |
3018 | ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); | |
3019 | ASSERT0(zilog->zl_dirty_max_txg); | |
3020 | ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); | |
3021 | } | |
572e2857 | 3022 | |
572e2857 | 3023 | mutex_enter(&zilog->zl_lock); |
3e31d2b0 | 3024 | lwb = list_tail(&zilog->zl_lwb_list); |
1ce23dca PS |
3025 | if (lwb == NULL) |
3026 | txg = zilog->zl_dirty_max_txg; | |
3027 | else | |
3028 | txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); | |
572e2857 | 3029 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
3030 | |
3031 | /* | |
3032 | * We need to use txg_wait_synced() to wait long enough for the | |
3033 | * ZIL to be clean, and to wait for all pending lwbs to be | |
3034 | * written out. | |
3035 | */ | |
3036 | if (txg != 0) | |
34dc7c2f | 3037 | txg_wait_synced(zilog->zl_dmu_pool, txg); |
55922e73 GW |
3038 | |
3039 | if (zilog_is_dirty(zilog)) | |
3040 | zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg); | |
50c957f7 | 3041 | if (txg < spa_freeze_txg(zilog->zl_spa)) |
55922e73 | 3042 | VERIFY(!zilog_is_dirty(zilog)); |
34dc7c2f | 3043 | |
34dc7c2f | 3044 | zilog->zl_get_data = NULL; |
3e31d2b0 ES |
3045 | |
3046 | /* | |
1ce23dca | 3047 | * We should have only one lwb left on the list; remove it now. |
3e31d2b0 ES |
3048 | */ |
3049 | mutex_enter(&zilog->zl_lock); | |
3050 | lwb = list_head(&zilog->zl_lwb_list); | |
3051 | if (lwb != NULL) { | |
1ce23dca PS |
3052 | ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list)); |
3053 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); | |
3054 | ||
920dd524 ED |
3055 | if (lwb->lwb_fastwrite) |
3056 | metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); | |
1ce23dca | 3057 | |
3e31d2b0 ES |
3058 | list_remove(&zilog->zl_lwb_list, lwb); |
3059 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
1ce23dca | 3060 | zil_free_lwb(zilog, lwb); |
3e31d2b0 ES |
3061 | } |
3062 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
3063 | } |
3064 | ||
13fe0198 MA |
3065 | static char *suspend_tag = "zil suspending"; |
3066 | ||
34dc7c2f BB |
3067 | /* |
3068 | * Suspend an intent log. While in suspended mode, we still honor | |
3069 | * synchronous semantics, but we rely on txg_wait_synced() to do it. | |
13fe0198 MA |
3070 | * On old version pools, we suspend the log briefly when taking a |
3071 | * snapshot so that it will have an empty intent log. | |
3072 | * | |
3073 | * Long holds are not really intended to be used the way we do here -- | |
3074 | * held for such a short time. A concurrent caller of dsl_dataset_long_held() | |
3075 | * could fail. Therefore we take pains to only put a long hold if it is | |
3076 | * actually necessary. Fortunately, it will only be necessary if the | |
3077 | * objset is currently mounted (or the ZVOL equivalent). In that case it | |
3078 | * will already have a long hold, so we are not really making things any worse. | |
3079 | * | |
3080 | * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or | |
3081 | * zvol_state_t), and use their mechanism to prevent their hold from being | |
3082 | * dropped (e.g. VFS_HOLD()). However, that would be even more pain for | |
3083 | * very little gain. | |
3084 | * | |
3085 | * if cookiep == NULL, this does both the suspend & resume. | |
3086 | * Otherwise, it returns with the dataset "long held", and the cookie | |
3087 | * should be passed into zil_resume(). | |
34dc7c2f BB |
3088 | */ |
3089 | int | |
13fe0198 | 3090 | zil_suspend(const char *osname, void **cookiep) |
34dc7c2f | 3091 | { |
13fe0198 MA |
3092 | objset_t *os; |
3093 | zilog_t *zilog; | |
3094 | const zil_header_t *zh; | |
3095 | int error; | |
3096 | ||
3097 | error = dmu_objset_hold(osname, suspend_tag, &os); | |
3098 | if (error != 0) | |
3099 | return (error); | |
3100 | zilog = dmu_objset_zil(os); | |
34dc7c2f BB |
3101 | |
3102 | mutex_enter(&zilog->zl_lock); | |
13fe0198 MA |
3103 | zh = zilog->zl_header; |
3104 | ||
9babb374 | 3105 | if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ |
34dc7c2f | 3106 | mutex_exit(&zilog->zl_lock); |
13fe0198 | 3107 | dmu_objset_rele(os, suspend_tag); |
2e528b49 | 3108 | return (SET_ERROR(EBUSY)); |
34dc7c2f | 3109 | } |
13fe0198 MA |
3110 | |
3111 | /* | |
3112 | * Don't put a long hold in the cases where we can avoid it. This | |
3113 | * is when there is no cookie so we are doing a suspend & resume | |
3114 | * (i.e. called from zil_vdev_offline()), and there's nothing to do | |
3115 | * for the suspend because it's already suspended, or there's no ZIL. | |
3116 | */ | |
3117 | if (cookiep == NULL && !zilog->zl_suspending && | |
3118 | (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { | |
3119 | mutex_exit(&zilog->zl_lock); | |
3120 | dmu_objset_rele(os, suspend_tag); | |
3121 | return (0); | |
3122 | } | |
3123 | ||
3124 | dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); | |
3125 | dsl_pool_rele(dmu_objset_pool(os), suspend_tag); | |
3126 | ||
3127 | zilog->zl_suspend++; | |
3128 | ||
3129 | if (zilog->zl_suspend > 1) { | |
34dc7c2f | 3130 | /* |
13fe0198 | 3131 | * Someone else is already suspending it. |
34dc7c2f BB |
3132 | * Just wait for them to finish. |
3133 | */ | |
13fe0198 | 3134 | |
34dc7c2f BB |
3135 | while (zilog->zl_suspending) |
3136 | cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); | |
34dc7c2f | 3137 | mutex_exit(&zilog->zl_lock); |
13fe0198 MA |
3138 | |
3139 | if (cookiep == NULL) | |
3140 | zil_resume(os); | |
3141 | else | |
3142 | *cookiep = os; | |
3143 | return (0); | |
3144 | } | |
3145 | ||
3146 | /* | |
3147 | * If there is no pointer to an on-disk block, this ZIL must not | |
3148 | * be active (e.g. filesystem not mounted), so there's nothing | |
3149 | * to clean up. | |
3150 | */ | |
3151 | if (BP_IS_HOLE(&zh->zh_log)) { | |
3152 | ASSERT(cookiep != NULL); /* fast path already handled */ | |
3153 | ||
3154 | *cookiep = os; | |
3155 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
3156 | return (0); |
3157 | } | |
13fe0198 | 3158 | |
4807c0ba TC |
3159 | /* |
3160 | * The ZIL has work to do. Ensure that the associated encryption | |
3161 | * key will remain mapped while we are committing the log by | |
3162 | * grabbing a reference to it. If the key isn't loaded we have no | |
3163 | * choice but to return an error until the wrapping key is loaded. | |
3164 | */ | |
3165 | if (os->os_encrypted && spa_keystore_create_mapping(os->os_spa, | |
3166 | dmu_objset_ds(os), FTAG) != 0) { | |
3167 | zilog->zl_suspend--; | |
3168 | mutex_exit(&zilog->zl_lock); | |
3169 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); | |
3170 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
3171 | return (SET_ERROR(EBUSY)); | |
3172 | } | |
3173 | ||
34dc7c2f BB |
3174 | zilog->zl_suspending = B_TRUE; |
3175 | mutex_exit(&zilog->zl_lock); | |
3176 | ||
2fe61a7e PS |
3177 | /* |
3178 | * We need to use zil_commit_impl to ensure we wait for all | |
3179 | * LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed | |
3180 | * to disk before proceeding. If we used zil_commit instead, it | |
3181 | * would just call txg_wait_synced(), because zl_suspend is set. | |
3182 | * txg_wait_synced() doesn't wait for these lwb's to be | |
3183 | * LWB_STATE_DONE before returning. | |
3184 | */ | |
3185 | zil_commit_impl(zilog, 0); | |
3186 | ||
3187 | /* | |
3188 | * Now that we've ensured all lwb's are LWB_STATE_DONE, we use | |
3189 | * txg_wait_synced() to ensure the data from the zilog has | |
3190 | * migrated to the main pool before calling zil_destroy(). | |
3191 | */ | |
3192 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
34dc7c2f BB |
3193 | |
3194 | zil_destroy(zilog, B_FALSE); | |
3195 | ||
3196 | mutex_enter(&zilog->zl_lock); | |
3197 | zilog->zl_suspending = B_FALSE; | |
3198 | cv_broadcast(&zilog->zl_cv_suspend); | |
3199 | mutex_exit(&zilog->zl_lock); | |
3200 | ||
4807c0ba TC |
3201 | if (os->os_encrypted) { |
3202 | /* | |
3203 | * Encrypted datasets need to wait for all data to be | |
3204 | * synced out before removing the mapping. | |
3205 | * | |
3206 | * XXX: Depending on the number of datasets with | |
3207 | * outstanding ZIL data on a given log device, this | |
3208 | * might cause spa_offline_log() to take a long time. | |
3209 | */ | |
3210 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
3211 | VERIFY0(spa_keystore_remove_mapping(os->os_spa, | |
3212 | dmu_objset_id(os), FTAG)); | |
3213 | } | |
3214 | ||
13fe0198 MA |
3215 | if (cookiep == NULL) |
3216 | zil_resume(os); | |
3217 | else | |
3218 | *cookiep = os; | |
34dc7c2f BB |
3219 | return (0); |
3220 | } | |
3221 | ||
3222 | void | |
13fe0198 | 3223 | zil_resume(void *cookie) |
34dc7c2f | 3224 | { |
13fe0198 MA |
3225 | objset_t *os = cookie; |
3226 | zilog_t *zilog = dmu_objset_zil(os); | |
3227 | ||
34dc7c2f BB |
3228 | mutex_enter(&zilog->zl_lock); |
3229 | ASSERT(zilog->zl_suspend != 0); | |
3230 | zilog->zl_suspend--; | |
3231 | mutex_exit(&zilog->zl_lock); | |
13fe0198 MA |
3232 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); |
3233 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
34dc7c2f BB |
3234 | } |
3235 | ||
3236 | typedef struct zil_replay_arg { | |
867959b5 | 3237 | zil_replay_func_t **zr_replay; |
34dc7c2f | 3238 | void *zr_arg; |
34dc7c2f | 3239 | boolean_t zr_byteswap; |
428870ff | 3240 | char *zr_lr; |
34dc7c2f BB |
3241 | } zil_replay_arg_t; |
3242 | ||
428870ff BB |
3243 | static int |
3244 | zil_replay_error(zilog_t *zilog, lr_t *lr, int error) | |
3245 | { | |
eca7b760 | 3246 | char name[ZFS_MAX_DATASET_NAME_LEN]; |
428870ff BB |
3247 | |
3248 | zilog->zl_replaying_seq--; /* didn't actually replay this one */ | |
3249 | ||
3250 | dmu_objset_name(zilog->zl_os, name); | |
3251 | ||
3252 | cmn_err(CE_WARN, "ZFS replay transaction error %d, " | |
3253 | "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, | |
3254 | (u_longlong_t)lr->lrc_seq, | |
3255 | (u_longlong_t)(lr->lrc_txtype & ~TX_CI), | |
3256 | (lr->lrc_txtype & TX_CI) ? "CI" : ""); | |
3257 | ||
3258 | return (error); | |
3259 | } | |
3260 | ||
3261 | static int | |
34dc7c2f BB |
3262 | zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) |
3263 | { | |
3264 | zil_replay_arg_t *zr = zra; | |
3265 | const zil_header_t *zh = zilog->zl_header; | |
3266 | uint64_t reclen = lr->lrc_reclen; | |
3267 | uint64_t txtype = lr->lrc_txtype; | |
428870ff | 3268 | int error = 0; |
34dc7c2f | 3269 | |
428870ff | 3270 | zilog->zl_replaying_seq = lr->lrc_seq; |
34dc7c2f BB |
3271 | |
3272 | if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ | |
428870ff BB |
3273 | return (0); |
3274 | ||
3275 | if (lr->lrc_txg < claim_txg) /* already committed */ | |
3276 | return (0); | |
34dc7c2f BB |
3277 | |
3278 | /* Strip case-insensitive bit, still present in log record */ | |
3279 | txtype &= ~TX_CI; | |
3280 | ||
428870ff BB |
3281 | if (txtype == 0 || txtype >= TX_MAX_TYPE) |
3282 | return (zil_replay_error(zilog, lr, EINVAL)); | |
3283 | ||
3284 | /* | |
3285 | * If this record type can be logged out of order, the object | |
3286 | * (lr_foid) may no longer exist. That's legitimate, not an error. | |
3287 | */ | |
3288 | if (TX_OOO(txtype)) { | |
3289 | error = dmu_object_info(zilog->zl_os, | |
50c957f7 | 3290 | LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); |
428870ff BB |
3291 | if (error == ENOENT || error == EEXIST) |
3292 | return (0); | |
fb5f0bc8 BB |
3293 | } |
3294 | ||
34dc7c2f BB |
3295 | /* |
3296 | * Make a copy of the data so we can revise and extend it. | |
3297 | */ | |
428870ff BB |
3298 | bcopy(lr, zr->zr_lr, reclen); |
3299 | ||
3300 | /* | |
3301 | * If this is a TX_WRITE with a blkptr, suck in the data. | |
3302 | */ | |
3303 | if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { | |
3304 | error = zil_read_log_data(zilog, (lr_write_t *)lr, | |
3305 | zr->zr_lr + reclen); | |
13fe0198 | 3306 | if (error != 0) |
428870ff BB |
3307 | return (zil_replay_error(zilog, lr, error)); |
3308 | } | |
34dc7c2f BB |
3309 | |
3310 | /* | |
3311 | * The log block containing this lr may have been byteswapped | |
3312 | * so that we can easily examine common fields like lrc_txtype. | |
428870ff | 3313 | * However, the log is a mix of different record types, and only the |
34dc7c2f BB |
3314 | * replay vectors know how to byteswap their records. Therefore, if |
3315 | * the lr was byteswapped, undo it before invoking the replay vector. | |
3316 | */ | |
3317 | if (zr->zr_byteswap) | |
428870ff | 3318 | byteswap_uint64_array(zr->zr_lr, reclen); |
34dc7c2f BB |
3319 | |
3320 | /* | |
3321 | * We must now do two things atomically: replay this log record, | |
fb5f0bc8 BB |
3322 | * and update the log header sequence number to reflect the fact that |
3323 | * we did so. At the end of each replay function the sequence number | |
3324 | * is updated if we are in replay mode. | |
34dc7c2f | 3325 | */ |
428870ff | 3326 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); |
13fe0198 | 3327 | if (error != 0) { |
34dc7c2f BB |
3328 | /* |
3329 | * The DMU's dnode layer doesn't see removes until the txg | |
3330 | * commits, so a subsequent claim can spuriously fail with | |
fb5f0bc8 | 3331 | * EEXIST. So if we receive any error we try syncing out |
428870ff BB |
3332 | * any removes then retry the transaction. Note that we |
3333 | * specify B_FALSE for byteswap now, so we don't do it twice. | |
34dc7c2f | 3334 | */ |
428870ff BB |
3335 | txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); |
3336 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); | |
13fe0198 | 3337 | if (error != 0) |
428870ff | 3338 | return (zil_replay_error(zilog, lr, error)); |
34dc7c2f | 3339 | } |
428870ff | 3340 | return (0); |
34dc7c2f BB |
3341 | } |
3342 | ||
3343 | /* ARGSUSED */ | |
428870ff | 3344 | static int |
34dc7c2f BB |
3345 | zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) |
3346 | { | |
3347 | zilog->zl_replay_blks++; | |
428870ff BB |
3348 | |
3349 | return (0); | |
34dc7c2f BB |
3350 | } |
3351 | ||
3352 | /* | |
3353 | * If this dataset has a non-empty intent log, replay it and destroy it. | |
3354 | */ | |
3355 | void | |
867959b5 | 3356 | zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) |
34dc7c2f BB |
3357 | { |
3358 | zilog_t *zilog = dmu_objset_zil(os); | |
3359 | const zil_header_t *zh = zilog->zl_header; | |
3360 | zil_replay_arg_t zr; | |
3361 | ||
9babb374 | 3362 | if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { |
34dc7c2f BB |
3363 | zil_destroy(zilog, B_TRUE); |
3364 | return; | |
3365 | } | |
3366 | ||
34dc7c2f BB |
3367 | zr.zr_replay = replay_func; |
3368 | zr.zr_arg = arg; | |
34dc7c2f | 3369 | zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); |
79c76d5b | 3370 | zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); |
34dc7c2f BB |
3371 | |
3372 | /* | |
3373 | * Wait for in-progress removes to sync before starting replay. | |
3374 | */ | |
3375 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
3376 | ||
fb5f0bc8 | 3377 | zilog->zl_replay = B_TRUE; |
428870ff | 3378 | zilog->zl_replay_time = ddi_get_lbolt(); |
34dc7c2f BB |
3379 | ASSERT(zilog->zl_replay_blks == 0); |
3380 | (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, | |
b5256303 | 3381 | zh->zh_claim_txg, B_TRUE); |
00b46022 | 3382 | vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); |
34dc7c2f BB |
3383 | |
3384 | zil_destroy(zilog, B_FALSE); | |
3385 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
fb5f0bc8 | 3386 | zilog->zl_replay = B_FALSE; |
34dc7c2f BB |
3387 | } |
3388 | ||
428870ff BB |
3389 | boolean_t |
3390 | zil_replaying(zilog_t *zilog, dmu_tx_t *tx) | |
34dc7c2f | 3391 | { |
428870ff BB |
3392 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
3393 | return (B_TRUE); | |
34dc7c2f | 3394 | |
428870ff BB |
3395 | if (zilog->zl_replay) { |
3396 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); | |
3397 | zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = | |
3398 | zilog->zl_replaying_seq; | |
3399 | return (B_TRUE); | |
34dc7c2f BB |
3400 | } |
3401 | ||
428870ff | 3402 | return (B_FALSE); |
34dc7c2f | 3403 | } |
9babb374 BB |
3404 | |
3405 | /* ARGSUSED */ | |
3406 | int | |
428870ff | 3407 | zil_vdev_offline(const char *osname, void *arg) |
9babb374 | 3408 | { |
9babb374 BB |
3409 | int error; |
3410 | ||
13fe0198 MA |
3411 | error = zil_suspend(osname, NULL); |
3412 | if (error != 0) | |
2e528b49 | 3413 | return (SET_ERROR(EEXIST)); |
13fe0198 | 3414 | return (0); |
9babb374 | 3415 | } |
c409e464 BB |
3416 | |
3417 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
0f699108 AZ |
3418 | EXPORT_SYMBOL(zil_alloc); |
3419 | EXPORT_SYMBOL(zil_free); | |
3420 | EXPORT_SYMBOL(zil_open); | |
3421 | EXPORT_SYMBOL(zil_close); | |
3422 | EXPORT_SYMBOL(zil_replay); | |
3423 | EXPORT_SYMBOL(zil_replaying); | |
3424 | EXPORT_SYMBOL(zil_destroy); | |
3425 | EXPORT_SYMBOL(zil_destroy_sync); | |
3426 | EXPORT_SYMBOL(zil_itx_create); | |
3427 | EXPORT_SYMBOL(zil_itx_destroy); | |
3428 | EXPORT_SYMBOL(zil_itx_assign); | |
3429 | EXPORT_SYMBOL(zil_commit); | |
3430 | EXPORT_SYMBOL(zil_vdev_offline); | |
3431 | EXPORT_SYMBOL(zil_claim); | |
3432 | EXPORT_SYMBOL(zil_check_log_chain); | |
3433 | EXPORT_SYMBOL(zil_sync); | |
3434 | EXPORT_SYMBOL(zil_clean); | |
3435 | EXPORT_SYMBOL(zil_suspend); | |
3436 | EXPORT_SYMBOL(zil_resume); | |
1ce23dca | 3437 | EXPORT_SYMBOL(zil_lwb_add_block); |
0f699108 AZ |
3438 | EXPORT_SYMBOL(zil_bp_tree_add); |
3439 | EXPORT_SYMBOL(zil_set_sync); | |
3440 | EXPORT_SYMBOL(zil_set_logbias); | |
3441 | ||
1b7c1e5c | 3442 | /* BEGIN CSTYLED */ |
2fe61a7e PS |
3443 | module_param(zfs_commit_timeout_pct, int, 0644); |
3444 | MODULE_PARM_DESC(zfs_commit_timeout_pct, "ZIL block open timeout percentage"); | |
3445 | ||
c409e464 BB |
3446 | module_param(zil_replay_disable, int, 0644); |
3447 | MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay"); | |
3448 | ||
3449 | module_param(zfs_nocacheflush, int, 0644); | |
3450 | MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes"); | |
ee191e80 | 3451 | |
1b7c1e5c GDN |
3452 | module_param(zil_slog_bulk, ulong, 0644); |
3453 | MODULE_PARM_DESC(zil_slog_bulk, "Limit in bytes slog sync writes per commit"); | |
3454 | /* END CSTYLED */ | |
c409e464 | 3455 | #endif |