]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
492f64e9 | 23 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. |
55922e73 | 24 | * Copyright (c) 2014 Integros [integros.com] |
2ffd89fc | 25 | * Copyright (c) 2018 Datto Inc. |
34dc7c2f BB |
26 | */ |
27 | ||
428870ff BB |
28 | /* Portions Copyright 2010 Robert Milkowski */ |
29 | ||
34dc7c2f BB |
30 | #include <sys/zfs_context.h> |
31 | #include <sys/spa.h> | |
d2734cce | 32 | #include <sys/spa_impl.h> |
34dc7c2f BB |
33 | #include <sys/dmu.h> |
34 | #include <sys/zap.h> | |
35 | #include <sys/arc.h> | |
36 | #include <sys/stat.h> | |
34dc7c2f BB |
37 | #include <sys/zil.h> |
38 | #include <sys/zil_impl.h> | |
39 | #include <sys/dsl_dataset.h> | |
572e2857 | 40 | #include <sys/vdev_impl.h> |
34dc7c2f | 41 | #include <sys/dmu_tx.h> |
428870ff | 42 | #include <sys/dsl_pool.h> |
920dd524 | 43 | #include <sys/metaslab.h> |
e5d1c27e | 44 | #include <sys/trace_zfs.h> |
a6255b7f | 45 | #include <sys/abd.h> |
34dc7c2f BB |
46 | |
47 | /* | |
1ce23dca PS |
48 | * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system |
49 | * calls that change the file system. Each itx has enough information to | |
50 | * be able to replay them after a system crash, power loss, or | |
51 | * equivalent failure mode. These are stored in memory until either: | |
34dc7c2f | 52 | * |
1ce23dca PS |
53 | * 1. they are committed to the pool by the DMU transaction group |
54 | * (txg), at which point they can be discarded; or | |
55 | * 2. they are committed to the on-disk ZIL for the dataset being | |
56 | * modified (e.g. due to an fsync, O_DSYNC, or other synchronous | |
57 | * requirement). | |
34dc7c2f | 58 | * |
1ce23dca PS |
59 | * In the event of a crash or power loss, the itxs contained by each |
60 | * dataset's on-disk ZIL will be replayed when that dataset is first | |
e1cfd73f | 61 | * instantiated (e.g. if the dataset is a normal filesystem, when it is |
1ce23dca | 62 | * first mounted). |
34dc7c2f | 63 | * |
1ce23dca PS |
64 | * As hinted at above, there is one ZIL per dataset (both the in-memory |
65 | * representation, and the on-disk representation). The on-disk format | |
66 | * consists of 3 parts: | |
67 | * | |
68 | * - a single, per-dataset, ZIL header; which points to a chain of | |
69 | * - zero or more ZIL blocks; each of which contains | |
70 | * - zero or more ZIL records | |
71 | * | |
72 | * A ZIL record holds the information necessary to replay a single | |
73 | * system call transaction. A ZIL block can hold many ZIL records, and | |
74 | * the blocks are chained together, similarly to a singly linked list. | |
75 | * | |
76 | * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL | |
77 | * block in the chain, and the ZIL header points to the first block in | |
78 | * the chain. | |
79 | * | |
80 | * Note, there is not a fixed place in the pool to hold these ZIL | |
81 | * blocks; they are dynamically allocated and freed as needed from the | |
82 | * blocks available on the pool, though they can be preferentially | |
83 | * allocated from a dedicated "log" vdev. | |
34dc7c2f BB |
84 | */ |
85 | ||
1ce23dca PS |
86 | /* |
87 | * This controls the amount of time that a ZIL block (lwb) will remain | |
88 | * "open" when it isn't "full", and it has a thread waiting for it to be | |
89 | * committed to stable storage. Please refer to the zil_commit_waiter() | |
90 | * function (and the comments within it) for more details. | |
91 | */ | |
18168da7 | 92 | static int zfs_commit_timeout_pct = 5; |
1ce23dca | 93 | |
b6ad9671 ED |
94 | /* |
95 | * See zil.h for more information about these fields. | |
96 | */ | |
18168da7 | 97 | static zil_stats_t zil_stats = { |
d1d7e268 MK |
98 | { "zil_commit_count", KSTAT_DATA_UINT64 }, |
99 | { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, | |
100 | { "zil_itx_count", KSTAT_DATA_UINT64 }, | |
101 | { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, | |
102 | { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, | |
103 | { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, | |
104 | { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, | |
105 | { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, | |
106 | { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, | |
107 | { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, | |
108 | { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, | |
109 | { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, | |
110 | { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, | |
b6ad9671 ED |
111 | }; |
112 | ||
113 | static kstat_t *zil_ksp; | |
114 | ||
34dc7c2f | 115 | /* |
d3cc8b15 | 116 | * Disable intent logging replay. This global ZIL switch affects all pools. |
34dc7c2f | 117 | */ |
d3cc8b15 | 118 | int zil_replay_disable = 0; |
34dc7c2f BB |
119 | |
120 | /* | |
53b1f5ea PS |
121 | * Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to |
122 | * the disk(s) by the ZIL after an LWB write has completed. Setting this | |
123 | * will cause ZIL corruption on power loss if a volatile out-of-order | |
124 | * write cache is enabled. | |
34dc7c2f | 125 | */ |
18168da7 | 126 | static int zil_nocacheflush = 0; |
34dc7c2f | 127 | |
1b7c1e5c GDN |
128 | /* |
129 | * Limit SLOG write size per commit executed with synchronous priority. | |
130 | * Any writes above that will be executed with lower (asynchronous) priority | |
131 | * to limit potential SLOG device abuse by single active ZIL writer. | |
132 | */ | |
18168da7 | 133 | static unsigned long zil_slog_bulk = 768 * 1024; |
1b7c1e5c | 134 | |
34dc7c2f | 135 | static kmem_cache_t *zil_lwb_cache; |
1ce23dca | 136 | static kmem_cache_t *zil_zcw_cache; |
34dc7c2f | 137 | |
428870ff BB |
138 | #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \ |
139 | sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused)) | |
140 | ||
34dc7c2f | 141 | static int |
428870ff | 142 | zil_bp_compare(const void *x1, const void *x2) |
34dc7c2f | 143 | { |
428870ff BB |
144 | const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; |
145 | const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; | |
34dc7c2f | 146 | |
ca577779 | 147 | int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); |
ee36c709 GN |
148 | if (likely(cmp)) |
149 | return (cmp); | |
34dc7c2f | 150 | |
ca577779 | 151 | return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); |
34dc7c2f BB |
152 | } |
153 | ||
154 | static void | |
428870ff | 155 | zil_bp_tree_init(zilog_t *zilog) |
34dc7c2f | 156 | { |
428870ff BB |
157 | avl_create(&zilog->zl_bp_tree, zil_bp_compare, |
158 | sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); | |
34dc7c2f BB |
159 | } |
160 | ||
161 | static void | |
428870ff | 162 | zil_bp_tree_fini(zilog_t *zilog) |
34dc7c2f | 163 | { |
428870ff BB |
164 | avl_tree_t *t = &zilog->zl_bp_tree; |
165 | zil_bp_node_t *zn; | |
34dc7c2f BB |
166 | void *cookie = NULL; |
167 | ||
168 | while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) | |
428870ff | 169 | kmem_free(zn, sizeof (zil_bp_node_t)); |
34dc7c2f BB |
170 | |
171 | avl_destroy(t); | |
172 | } | |
173 | ||
428870ff BB |
174 | int |
175 | zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) | |
34dc7c2f | 176 | { |
428870ff | 177 | avl_tree_t *t = &zilog->zl_bp_tree; |
9b67f605 | 178 | const dva_t *dva; |
428870ff | 179 | zil_bp_node_t *zn; |
34dc7c2f BB |
180 | avl_index_t where; |
181 | ||
9b67f605 MA |
182 | if (BP_IS_EMBEDDED(bp)) |
183 | return (0); | |
184 | ||
185 | dva = BP_IDENTITY(bp); | |
186 | ||
34dc7c2f | 187 | if (avl_find(t, dva, &where) != NULL) |
2e528b49 | 188 | return (SET_ERROR(EEXIST)); |
34dc7c2f | 189 | |
79c76d5b | 190 | zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); |
34dc7c2f BB |
191 | zn->zn_dva = *dva; |
192 | avl_insert(t, zn, where); | |
193 | ||
194 | return (0); | |
195 | } | |
196 | ||
197 | static zil_header_t * | |
198 | zil_header_in_syncing_context(zilog_t *zilog) | |
199 | { | |
200 | return ((zil_header_t *)zilog->zl_header); | |
201 | } | |
202 | ||
203 | static void | |
204 | zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) | |
205 | { | |
206 | zio_cksum_t *zc = &bp->blk_cksum; | |
207 | ||
29274c9f AM |
208 | (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0], |
209 | sizeof (zc->zc_word[ZIL_ZC_GUID_0])); | |
210 | (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1], | |
211 | sizeof (zc->zc_word[ZIL_ZC_GUID_1])); | |
34dc7c2f BB |
212 | zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); |
213 | zc->zc_word[ZIL_ZC_SEQ] = 1ULL; | |
214 | } | |
215 | ||
216 | /* | |
428870ff | 217 | * Read a log block and make sure it's valid. |
34dc7c2f BB |
218 | */ |
219 | static int | |
b5256303 TC |
220 | zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, |
221 | blkptr_t *nbp, void *dst, char **end) | |
34dc7c2f | 222 | { |
428870ff | 223 | enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; |
2a432414 | 224 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff | 225 | arc_buf_t *abuf = NULL; |
5dbd68a3 | 226 | zbookmark_phys_t zb; |
34dc7c2f BB |
227 | int error; |
228 | ||
428870ff BB |
229 | if (zilog->zl_header->zh_claim_txg == 0) |
230 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
34dc7c2f | 231 | |
428870ff BB |
232 | if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) |
233 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
34dc7c2f | 234 | |
b5256303 TC |
235 | if (!decrypt) |
236 | zio_flags |= ZIO_FLAG_RAW; | |
237 | ||
428870ff BB |
238 | SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], |
239 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
240 | ||
b5256303 TC |
241 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, |
242 | &abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); | |
34dc7c2f BB |
243 | |
244 | if (error == 0) { | |
34dc7c2f BB |
245 | zio_cksum_t cksum = bp->blk_cksum; |
246 | ||
247 | /* | |
b128c09f BB |
248 | * Validate the checksummed log block. |
249 | * | |
34dc7c2f BB |
250 | * Sequence numbers should be... sequential. The checksum |
251 | * verifier for the next block should be bp's checksum plus 1. | |
b128c09f BB |
252 | * |
253 | * Also check the log chain linkage and size used. | |
34dc7c2f BB |
254 | */ |
255 | cksum.zc_word[ZIL_ZC_SEQ]++; | |
256 | ||
428870ff BB |
257 | if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { |
258 | zil_chain_t *zilc = abuf->b_data; | |
259 | char *lr = (char *)(zilc + 1); | |
260 | uint64_t len = zilc->zc_nused - sizeof (zil_chain_t); | |
34dc7c2f | 261 | |
428870ff BB |
262 | if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, |
263 | sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) { | |
2e528b49 | 264 | error = SET_ERROR(ECKSUM); |
428870ff | 265 | } else { |
f1512ee6 | 266 | ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE); |
428870ff BB |
267 | bcopy(lr, dst, len); |
268 | *end = (char *)dst + len; | |
269 | *nbp = zilc->zc_next_blk; | |
270 | } | |
271 | } else { | |
272 | char *lr = abuf->b_data; | |
273 | uint64_t size = BP_GET_LSIZE(bp); | |
274 | zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; | |
275 | ||
276 | if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum, | |
277 | sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || | |
278 | (zilc->zc_nused > (size - sizeof (*zilc)))) { | |
2e528b49 | 279 | error = SET_ERROR(ECKSUM); |
428870ff | 280 | } else { |
f1512ee6 MA |
281 | ASSERT3U(zilc->zc_nused, <=, |
282 | SPA_OLD_MAXBLOCKSIZE); | |
428870ff BB |
283 | bcopy(lr, dst, zilc->zc_nused); |
284 | *end = (char *)dst + zilc->zc_nused; | |
285 | *nbp = zilc->zc_next_blk; | |
286 | } | |
34dc7c2f | 287 | } |
428870ff | 288 | |
d3c2ae1c | 289 | arc_buf_destroy(abuf, &abuf); |
428870ff BB |
290 | } |
291 | ||
292 | return (error); | |
293 | } | |
294 | ||
295 | /* | |
296 | * Read a TX_WRITE log data block. | |
297 | */ | |
298 | static int | |
299 | zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) | |
300 | { | |
301 | enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; | |
302 | const blkptr_t *bp = &lr->lr_blkptr; | |
2a432414 | 303 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff | 304 | arc_buf_t *abuf = NULL; |
5dbd68a3 | 305 | zbookmark_phys_t zb; |
428870ff BB |
306 | int error; |
307 | ||
308 | if (BP_IS_HOLE(bp)) { | |
309 | if (wbuf != NULL) | |
310 | bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length)); | |
311 | return (0); | |
34dc7c2f BB |
312 | } |
313 | ||
428870ff BB |
314 | if (zilog->zl_header->zh_claim_txg == 0) |
315 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
316 | ||
b5256303 TC |
317 | /* |
318 | * If we are not using the resulting data, we are just checking that | |
319 | * it hasn't been corrupted so we don't need to waste CPU time | |
320 | * decompressing and decrypting it. | |
321 | */ | |
322 | if (wbuf == NULL) | |
323 | zio_flags |= ZIO_FLAG_RAW; | |
324 | ||
428870ff BB |
325 | SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, |
326 | ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); | |
327 | ||
294f6806 | 328 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, |
428870ff BB |
329 | ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); |
330 | ||
331 | if (error == 0) { | |
332 | if (wbuf != NULL) | |
333 | bcopy(abuf->b_data, wbuf, arc_buf_size(abuf)); | |
d3c2ae1c | 334 | arc_buf_destroy(abuf, &abuf); |
428870ff | 335 | } |
34dc7c2f BB |
336 | |
337 | return (error); | |
338 | } | |
339 | ||
340 | /* | |
341 | * Parse the intent log, and call parse_func for each valid record within. | |
34dc7c2f | 342 | */ |
428870ff | 343 | int |
34dc7c2f | 344 | zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, |
b5256303 TC |
345 | zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, |
346 | boolean_t decrypt) | |
34dc7c2f BB |
347 | { |
348 | const zil_header_t *zh = zilog->zl_header; | |
428870ff BB |
349 | boolean_t claimed = !!zh->zh_claim_txg; |
350 | uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; | |
351 | uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; | |
352 | uint64_t max_blk_seq = 0; | |
353 | uint64_t max_lr_seq = 0; | |
354 | uint64_t blk_count = 0; | |
355 | uint64_t lr_count = 0; | |
356 | blkptr_t blk, next_blk; | |
34dc7c2f | 357 | char *lrbuf, *lrp; |
428870ff | 358 | int error = 0; |
34dc7c2f | 359 | |
d1d7e268 | 360 | bzero(&next_blk, sizeof (blkptr_t)); |
d4ed6673 | 361 | |
428870ff BB |
362 | /* |
363 | * Old logs didn't record the maximum zh_claim_lr_seq. | |
364 | */ | |
365 | if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) | |
366 | claim_lr_seq = UINT64_MAX; | |
34dc7c2f BB |
367 | |
368 | /* | |
369 | * Starting at the block pointed to by zh_log we read the log chain. | |
370 | * For each block in the chain we strongly check that block to | |
371 | * ensure its validity. We stop when an invalid block is found. | |
372 | * For each block pointer in the chain we call parse_blk_func(). | |
373 | * For each record in each valid block we call parse_lr_func(). | |
374 | * If the log has been claimed, stop if we encounter a sequence | |
375 | * number greater than the highest claimed sequence number. | |
376 | */ | |
f1512ee6 | 377 | lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE); |
428870ff | 378 | zil_bp_tree_init(zilog); |
34dc7c2f | 379 | |
428870ff BB |
380 | for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { |
381 | uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; | |
382 | int reclen; | |
d4ed6673 | 383 | char *end = NULL; |
34dc7c2f | 384 | |
428870ff BB |
385 | if (blk_seq > claim_blk_seq) |
386 | break; | |
b5256303 TC |
387 | |
388 | error = parse_blk_func(zilog, &blk, arg, txg); | |
389 | if (error != 0) | |
428870ff BB |
390 | break; |
391 | ASSERT3U(max_blk_seq, <, blk_seq); | |
392 | max_blk_seq = blk_seq; | |
393 | blk_count++; | |
34dc7c2f | 394 | |
428870ff BB |
395 | if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) |
396 | break; | |
34dc7c2f | 397 | |
b5256303 TC |
398 | error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, |
399 | lrbuf, &end); | |
13fe0198 | 400 | if (error != 0) |
34dc7c2f BB |
401 | break; |
402 | ||
428870ff | 403 | for (lrp = lrbuf; lrp < end; lrp += reclen) { |
34dc7c2f BB |
404 | lr_t *lr = (lr_t *)lrp; |
405 | reclen = lr->lrc_reclen; | |
406 | ASSERT3U(reclen, >=, sizeof (lr_t)); | |
428870ff BB |
407 | if (lr->lrc_seq > claim_lr_seq) |
408 | goto done; | |
b5256303 TC |
409 | |
410 | error = parse_lr_func(zilog, lr, arg, txg); | |
411 | if (error != 0) | |
428870ff BB |
412 | goto done; |
413 | ASSERT3U(max_lr_seq, <, lr->lrc_seq); | |
414 | max_lr_seq = lr->lrc_seq; | |
415 | lr_count++; | |
34dc7c2f | 416 | } |
34dc7c2f | 417 | } |
428870ff BB |
418 | done: |
419 | zilog->zl_parse_error = error; | |
420 | zilog->zl_parse_blk_seq = max_blk_seq; | |
421 | zilog->zl_parse_lr_seq = max_lr_seq; | |
422 | zilog->zl_parse_blk_count = blk_count; | |
423 | zilog->zl_parse_lr_count = lr_count; | |
424 | ||
425 | ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) || | |
b5256303 TC |
426 | (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq) || |
427 | (decrypt && error == EIO)); | |
428870ff BB |
428 | |
429 | zil_bp_tree_fini(zilog); | |
f1512ee6 | 430 | zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE); |
34dc7c2f | 431 | |
428870ff | 432 | return (error); |
34dc7c2f BB |
433 | } |
434 | ||
d2734cce | 435 | static int |
61868bb1 CS |
436 | zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
437 | uint64_t first_txg) | |
d2734cce | 438 | { |
14e4e3cb | 439 | (void) tx; |
d2734cce SD |
440 | ASSERT(!BP_IS_HOLE(bp)); |
441 | ||
442 | /* | |
443 | * As we call this function from the context of a rewind to a | |
444 | * checkpoint, each ZIL block whose txg is later than the txg | |
445 | * that we rewind to is invalid. Thus, we return -1 so | |
446 | * zil_parse() doesn't attempt to read it. | |
447 | */ | |
448 | if (bp->blk_birth >= first_txg) | |
449 | return (-1); | |
450 | ||
451 | if (zil_bp_tree_add(zilog, bp) != 0) | |
452 | return (0); | |
453 | ||
454 | zio_free(zilog->zl_spa, first_txg, bp); | |
455 | return (0); | |
456 | } | |
457 | ||
d2734cce | 458 | static int |
61868bb1 CS |
459 | zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, |
460 | uint64_t first_txg) | |
d2734cce | 461 | { |
14e4e3cb | 462 | (void) zilog, (void) lrc, (void) tx, (void) first_txg; |
d2734cce SD |
463 | return (0); |
464 | } | |
465 | ||
428870ff | 466 | static int |
61868bb1 CS |
467 | zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
468 | uint64_t first_txg) | |
34dc7c2f | 469 | { |
34dc7c2f BB |
470 | /* |
471 | * Claim log block if not already committed and not already claimed. | |
428870ff | 472 | * If tx == NULL, just verify that the block is claimable. |
34dc7c2f | 473 | */ |
b0bc7a84 MG |
474 | if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || |
475 | zil_bp_tree_add(zilog, bp) != 0) | |
428870ff BB |
476 | return (0); |
477 | ||
478 | return (zio_wait(zio_claim(NULL, zilog->zl_spa, | |
479 | tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, | |
480 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); | |
34dc7c2f BB |
481 | } |
482 | ||
428870ff | 483 | static int |
61868bb1 CS |
484 | zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, |
485 | uint64_t first_txg) | |
34dc7c2f | 486 | { |
428870ff BB |
487 | lr_write_t *lr = (lr_write_t *)lrc; |
488 | int error; | |
489 | ||
490 | if (lrc->lrc_txtype != TX_WRITE) | |
491 | return (0); | |
492 | ||
493 | /* | |
494 | * If the block is not readable, don't claim it. This can happen | |
495 | * in normal operation when a log block is written to disk before | |
496 | * some of the dmu_sync() blocks it points to. In this case, the | |
497 | * transaction cannot have been committed to anyone (we would have | |
498 | * waited for all writes to be stable first), so it is semantically | |
499 | * correct to declare this the end of the log. | |
500 | */ | |
b5256303 TC |
501 | if (lr->lr_blkptr.blk_birth >= first_txg) { |
502 | error = zil_read_log_data(zilog, lr, NULL); | |
503 | if (error != 0) | |
504 | return (error); | |
505 | } | |
506 | ||
428870ff | 507 | return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); |
34dc7c2f BB |
508 | } |
509 | ||
428870ff | 510 | static int |
61868bb1 CS |
511 | zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
512 | uint64_t claim_txg) | |
34dc7c2f | 513 | { |
14e4e3cb AZ |
514 | (void) claim_txg; |
515 | ||
d2734cce | 516 | zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
428870ff BB |
517 | |
518 | return (0); | |
34dc7c2f BB |
519 | } |
520 | ||
428870ff | 521 | static int |
61868bb1 CS |
522 | zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, |
523 | uint64_t claim_txg) | |
34dc7c2f | 524 | { |
428870ff BB |
525 | lr_write_t *lr = (lr_write_t *)lrc; |
526 | blkptr_t *bp = &lr->lr_blkptr; | |
527 | ||
34dc7c2f BB |
528 | /* |
529 | * If we previously claimed it, we need to free it. | |
530 | */ | |
428870ff | 531 | if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE && |
b0bc7a84 MG |
532 | bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && |
533 | !BP_IS_HOLE(bp)) | |
428870ff BB |
534 | zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
535 | ||
536 | return (0); | |
537 | } | |
538 | ||
1ce23dca PS |
539 | static int |
540 | zil_lwb_vdev_compare(const void *x1, const void *x2) | |
541 | { | |
542 | const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; | |
543 | const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; | |
544 | ||
ca577779 | 545 | return (TREE_CMP(v1, v2)); |
1ce23dca PS |
546 | } |
547 | ||
428870ff | 548 | static lwb_t * |
1b7c1e5c GDN |
549 | zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg, |
550 | boolean_t fastwrite) | |
428870ff BB |
551 | { |
552 | lwb_t *lwb; | |
553 | ||
79c76d5b | 554 | lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); |
428870ff BB |
555 | lwb->lwb_zilog = zilog; |
556 | lwb->lwb_blk = *bp; | |
920dd524 | 557 | lwb->lwb_fastwrite = fastwrite; |
1b7c1e5c | 558 | lwb->lwb_slog = slog; |
1ce23dca | 559 | lwb->lwb_state = LWB_STATE_CLOSED; |
428870ff BB |
560 | lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); |
561 | lwb->lwb_max_txg = txg; | |
1ce23dca PS |
562 | lwb->lwb_write_zio = NULL; |
563 | lwb->lwb_root_zio = NULL; | |
428870ff | 564 | lwb->lwb_tx = NULL; |
1ce23dca | 565 | lwb->lwb_issued_timestamp = 0; |
428870ff BB |
566 | if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { |
567 | lwb->lwb_nused = sizeof (zil_chain_t); | |
568 | lwb->lwb_sz = BP_GET_LSIZE(bp); | |
569 | } else { | |
570 | lwb->lwb_nused = 0; | |
571 | lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); | |
34dc7c2f | 572 | } |
428870ff BB |
573 | |
574 | mutex_enter(&zilog->zl_lock); | |
575 | list_insert_tail(&zilog->zl_lwb_list, lwb); | |
576 | mutex_exit(&zilog->zl_lock); | |
577 | ||
1ce23dca PS |
578 | ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); |
579 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); | |
2fe61a7e PS |
580 | VERIFY(list_is_empty(&lwb->lwb_waiters)); |
581 | VERIFY(list_is_empty(&lwb->lwb_itxs)); | |
1ce23dca | 582 | |
428870ff | 583 | return (lwb); |
34dc7c2f BB |
584 | } |
585 | ||
1ce23dca PS |
586 | static void |
587 | zil_free_lwb(zilog_t *zilog, lwb_t *lwb) | |
588 | { | |
589 | ASSERT(MUTEX_HELD(&zilog->zl_lock)); | |
590 | ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); | |
2fe61a7e PS |
591 | VERIFY(list_is_empty(&lwb->lwb_waiters)); |
592 | VERIFY(list_is_empty(&lwb->lwb_itxs)); | |
1ce23dca | 593 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); |
1ce23dca PS |
594 | ASSERT3P(lwb->lwb_write_zio, ==, NULL); |
595 | ASSERT3P(lwb->lwb_root_zio, ==, NULL); | |
2fe61a7e PS |
596 | ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); |
597 | ASSERT(lwb->lwb_state == LWB_STATE_CLOSED || | |
900d09b2 | 598 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); |
1ce23dca PS |
599 | |
600 | /* | |
601 | * Clear the zilog's field to indicate this lwb is no longer | |
602 | * valid, and prevent use-after-free errors. | |
603 | */ | |
604 | if (zilog->zl_last_lwb_opened == lwb) | |
605 | zilog->zl_last_lwb_opened = NULL; | |
606 | ||
607 | kmem_cache_free(zil_lwb_cache, lwb); | |
608 | } | |
609 | ||
29809a6c MA |
610 | /* |
611 | * Called when we create in-memory log transactions so that we know | |
612 | * to cleanup the itxs at the end of spa_sync(). | |
613 | */ | |
65c7cc49 | 614 | static void |
29809a6c MA |
615 | zilog_dirty(zilog_t *zilog, uint64_t txg) |
616 | { | |
617 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
618 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); | |
619 | ||
1ce23dca PS |
620 | ASSERT(spa_writeable(zilog->zl_spa)); |
621 | ||
0c66c32d | 622 | if (ds->ds_is_snapshot) |
29809a6c MA |
623 | panic("dirtying snapshot!"); |
624 | ||
13fe0198 | 625 | if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { |
29809a6c MA |
626 | /* up the hold count until we can be written out */ |
627 | dmu_buf_add_ref(ds->ds_dbuf, zilog); | |
1ce23dca PS |
628 | |
629 | zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); | |
29809a6c MA |
630 | } |
631 | } | |
632 | ||
55922e73 GW |
633 | /* |
634 | * Determine if the zil is dirty in the specified txg. Callers wanting to | |
635 | * ensure that the dirty state does not change must hold the itxg_lock for | |
636 | * the specified txg. Holding the lock will ensure that the zil cannot be | |
637 | * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current | |
638 | * state. | |
639 | */ | |
65c7cc49 | 640 | static boolean_t __maybe_unused |
55922e73 GW |
641 | zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) |
642 | { | |
643 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
644 | ||
645 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) | |
646 | return (B_TRUE); | |
647 | return (B_FALSE); | |
648 | } | |
649 | ||
650 | /* | |
651 | * Determine if the zil is dirty. The zil is considered dirty if it has | |
652 | * any pending itx records that have not been cleaned by zil_clean(). | |
653 | */ | |
65c7cc49 | 654 | static boolean_t |
29809a6c MA |
655 | zilog_is_dirty(zilog_t *zilog) |
656 | { | |
657 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
29809a6c | 658 | |
1c27024e | 659 | for (int t = 0; t < TXG_SIZE; t++) { |
29809a6c MA |
660 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) |
661 | return (B_TRUE); | |
662 | } | |
663 | return (B_FALSE); | |
664 | } | |
665 | ||
34dc7c2f BB |
666 | /* |
667 | * Create an on-disk intent log. | |
668 | */ | |
428870ff | 669 | static lwb_t * |
34dc7c2f BB |
670 | zil_create(zilog_t *zilog) |
671 | { | |
672 | const zil_header_t *zh = zilog->zl_header; | |
428870ff | 673 | lwb_t *lwb = NULL; |
34dc7c2f BB |
674 | uint64_t txg = 0; |
675 | dmu_tx_t *tx = NULL; | |
676 | blkptr_t blk; | |
677 | int error = 0; | |
920dd524 | 678 | boolean_t fastwrite = FALSE; |
1b7c1e5c | 679 | boolean_t slog = FALSE; |
34dc7c2f BB |
680 | |
681 | /* | |
682 | * Wait for any previous destroy to complete. | |
683 | */ | |
684 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
685 | ||
686 | ASSERT(zh->zh_claim_txg == 0); | |
687 | ASSERT(zh->zh_replay_seq == 0); | |
688 | ||
689 | blk = zh->zh_log; | |
690 | ||
691 | /* | |
428870ff BB |
692 | * Allocate an initial log block if: |
693 | * - there isn't one already | |
4e33ba4c | 694 | * - the existing block is the wrong endianness |
34dc7c2f | 695 | */ |
fb5f0bc8 | 696 | if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { |
34dc7c2f | 697 | tx = dmu_tx_create(zilog->zl_os); |
1ce23dca | 698 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
699 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
700 | txg = dmu_tx_get_txg(tx); | |
701 | ||
fb5f0bc8 | 702 | if (!BP_IS_HOLE(&blk)) { |
d2734cce | 703 | zio_free(zilog->zl_spa, txg, &blk); |
fb5f0bc8 BB |
704 | BP_ZERO(&blk); |
705 | } | |
706 | ||
b5256303 | 707 | error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, |
1b7c1e5c | 708 | ZIL_MIN_BLKSZ, &slog); |
920dd524 | 709 | fastwrite = TRUE; |
34dc7c2f BB |
710 | |
711 | if (error == 0) | |
712 | zil_init_log_chain(zilog, &blk); | |
713 | } | |
714 | ||
715 | /* | |
1ce23dca | 716 | * Allocate a log write block (lwb) for the first log block. |
34dc7c2f | 717 | */ |
428870ff | 718 | if (error == 0) |
1b7c1e5c | 719 | lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite); |
34dc7c2f BB |
720 | |
721 | /* | |
722 | * If we just allocated the first log block, commit our transaction | |
2fe61a7e | 723 | * and wait for zil_sync() to stuff the block pointer into zh_log. |
34dc7c2f BB |
724 | * (zh is part of the MOS, so we cannot modify it in open context.) |
725 | */ | |
726 | if (tx != NULL) { | |
727 | dmu_tx_commit(tx); | |
728 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
729 | } | |
730 | ||
c04812f9 TC |
731 | ASSERT(error != 0 || bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); |
732 | IMPLY(error == 0, lwb != NULL); | |
428870ff BB |
733 | |
734 | return (lwb); | |
34dc7c2f BB |
735 | } |
736 | ||
737 | /* | |
1ce23dca PS |
738 | * In one tx, free all log blocks and clear the log header. If keep_first |
739 | * is set, then we're replaying a log with no content. We want to keep the | |
740 | * first block, however, so that the first synchronous transaction doesn't | |
741 | * require a txg_wait_synced() in zil_create(). We don't need to | |
742 | * txg_wait_synced() here either when keep_first is set, because both | |
743 | * zil_create() and zil_destroy() will wait for any in-progress destroys | |
744 | * to complete. | |
34dc7c2f BB |
745 | */ |
746 | void | |
747 | zil_destroy(zilog_t *zilog, boolean_t keep_first) | |
748 | { | |
749 | const zil_header_t *zh = zilog->zl_header; | |
750 | lwb_t *lwb; | |
751 | dmu_tx_t *tx; | |
752 | uint64_t txg; | |
753 | ||
754 | /* | |
755 | * Wait for any previous destroy to complete. | |
756 | */ | |
757 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
758 | ||
428870ff BB |
759 | zilog->zl_old_header = *zh; /* debugging aid */ |
760 | ||
34dc7c2f BB |
761 | if (BP_IS_HOLE(&zh->zh_log)) |
762 | return; | |
763 | ||
764 | tx = dmu_tx_create(zilog->zl_os); | |
1ce23dca | 765 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
766 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
767 | txg = dmu_tx_get_txg(tx); | |
768 | ||
769 | mutex_enter(&zilog->zl_lock); | |
770 | ||
34dc7c2f BB |
771 | ASSERT3U(zilog->zl_destroy_txg, <, txg); |
772 | zilog->zl_destroy_txg = txg; | |
773 | zilog->zl_keep_first = keep_first; | |
774 | ||
775 | if (!list_is_empty(&zilog->zl_lwb_list)) { | |
776 | ASSERT(zh->zh_claim_txg == 0); | |
3e31d2b0 | 777 | VERIFY(!keep_first); |
34dc7c2f | 778 | while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { |
920dd524 ED |
779 | if (lwb->lwb_fastwrite) |
780 | metaslab_fastwrite_unmark(zilog->zl_spa, | |
781 | &lwb->lwb_blk); | |
1ce23dca | 782 | |
34dc7c2f BB |
783 | list_remove(&zilog->zl_lwb_list, lwb); |
784 | if (lwb->lwb_buf != NULL) | |
785 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
1ce23dca PS |
786 | zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); |
787 | zil_free_lwb(zilog, lwb); | |
34dc7c2f | 788 | } |
428870ff | 789 | } else if (!keep_first) { |
29809a6c | 790 | zil_destroy_sync(zilog, tx); |
34dc7c2f BB |
791 | } |
792 | mutex_exit(&zilog->zl_lock); | |
793 | ||
794 | dmu_tx_commit(tx); | |
795 | } | |
796 | ||
29809a6c MA |
797 | void |
798 | zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) | |
799 | { | |
800 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
801 | (void) zil_parse(zilog, zil_free_log_block, | |
b5256303 | 802 | zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); |
29809a6c MA |
803 | } |
804 | ||
34dc7c2f | 805 | int |
9c43027b | 806 | zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) |
34dc7c2f BB |
807 | { |
808 | dmu_tx_t *tx = txarg; | |
34dc7c2f | 809 | zilog_t *zilog; |
d2734cce | 810 | uint64_t first_txg; |
34dc7c2f BB |
811 | zil_header_t *zh; |
812 | objset_t *os; | |
813 | int error; | |
814 | ||
9c43027b | 815 | error = dmu_objset_own_obj(dp, ds->ds_object, |
b5256303 | 816 | DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); |
13fe0198 | 817 | if (error != 0) { |
6d9036f3 MA |
818 | /* |
819 | * EBUSY indicates that the objset is inconsistent, in which | |
820 | * case it can not have a ZIL. | |
821 | */ | |
822 | if (error != EBUSY) { | |
9c43027b AJ |
823 | cmn_err(CE_WARN, "can't open objset for %llu, error %u", |
824 | (unsigned long long)ds->ds_object, error); | |
6d9036f3 MA |
825 | } |
826 | ||
34dc7c2f BB |
827 | return (0); |
828 | } | |
829 | ||
830 | zilog = dmu_objset_zil(os); | |
831 | zh = zil_header_in_syncing_context(zilog); | |
d2734cce SD |
832 | ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); |
833 | first_txg = spa_min_claim_txg(zilog->zl_spa); | |
34dc7c2f | 834 | |
d2734cce SD |
835 | /* |
836 | * If the spa_log_state is not set to be cleared, check whether | |
837 | * the current uberblock is a checkpoint one and if the current | |
838 | * header has been claimed before moving on. | |
839 | * | |
840 | * If the current uberblock is a checkpointed uberblock then | |
841 | * one of the following scenarios took place: | |
842 | * | |
843 | * 1] We are currently rewinding to the checkpoint of the pool. | |
844 | * 2] We crashed in the middle of a checkpoint rewind but we | |
845 | * did manage to write the checkpointed uberblock to the | |
846 | * vdev labels, so when we tried to import the pool again | |
847 | * the checkpointed uberblock was selected from the import | |
848 | * procedure. | |
849 | * | |
850 | * In both cases we want to zero out all the ZIL blocks, except | |
851 | * the ones that have been claimed at the time of the checkpoint | |
852 | * (their zh_claim_txg != 0). The reason is that these blocks | |
853 | * may be corrupted since we may have reused their locations on | |
854 | * disk after we took the checkpoint. | |
855 | * | |
856 | * We could try to set spa_log_state to SPA_LOG_CLEAR earlier | |
857 | * when we first figure out whether the current uberblock is | |
858 | * checkpointed or not. Unfortunately, that would discard all | |
859 | * the logs, including the ones that are claimed, and we would | |
860 | * leak space. | |
861 | */ | |
862 | if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || | |
863 | (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && | |
864 | zh->zh_claim_txg == 0)) { | |
865 | if (!BP_IS_HOLE(&zh->zh_log)) { | |
866 | (void) zil_parse(zilog, zil_clear_log_block, | |
867 | zil_noop_log_record, tx, first_txg, B_FALSE); | |
868 | } | |
9babb374 | 869 | BP_ZERO(&zh->zh_log); |
b5256303 | 870 | if (os->os_encrypted) |
1b66810b | 871 | os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; |
9babb374 | 872 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
b5256303 | 873 | dmu_objset_disown(os, B_FALSE, FTAG); |
428870ff | 874 | return (0); |
9babb374 BB |
875 | } |
876 | ||
d2734cce SD |
877 | /* |
878 | * If we are not rewinding and opening the pool normally, then | |
879 | * the min_claim_txg should be equal to the first txg of the pool. | |
880 | */ | |
881 | ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); | |
882 | ||
34dc7c2f BB |
883 | /* |
884 | * Claim all log blocks if we haven't already done so, and remember | |
885 | * the highest claimed sequence number. This ensures that if we can | |
886 | * read only part of the log now (e.g. due to a missing device), | |
887 | * but we can read the entire log later, we will not try to replay | |
888 | * or destroy beyond the last block we successfully claimed. | |
889 | */ | |
890 | ASSERT3U(zh->zh_claim_txg, <=, first_txg); | |
891 | if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { | |
428870ff | 892 | (void) zil_parse(zilog, zil_claim_log_block, |
b5256303 | 893 | zil_claim_log_record, tx, first_txg, B_FALSE); |
428870ff BB |
894 | zh->zh_claim_txg = first_txg; |
895 | zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; | |
896 | zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; | |
897 | if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) | |
898 | zh->zh_flags |= ZIL_REPLAY_NEEDED; | |
899 | zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; | |
d53bd7f5 | 900 | if (os->os_encrypted) |
1b66810b | 901 | os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; |
34dc7c2f BB |
902 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
903 | } | |
904 | ||
905 | ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); | |
b5256303 | 906 | dmu_objset_disown(os, B_FALSE, FTAG); |
34dc7c2f BB |
907 | return (0); |
908 | } | |
909 | ||
b128c09f BB |
910 | /* |
911 | * Check the log by walking the log chain. | |
912 | * Checksum errors are ok as they indicate the end of the chain. | |
913 | * Any other error (no device or read failure) returns an error. | |
914 | */ | |
b128c09f | 915 | int |
9c43027b | 916 | zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) |
b128c09f | 917 | { |
14e4e3cb | 918 | (void) dp; |
b128c09f | 919 | zilog_t *zilog; |
b128c09f | 920 | objset_t *os; |
572e2857 | 921 | blkptr_t *bp; |
b128c09f BB |
922 | int error; |
923 | ||
428870ff BB |
924 | ASSERT(tx == NULL); |
925 | ||
9c43027b | 926 | error = dmu_objset_from_ds(ds, &os); |
13fe0198 | 927 | if (error != 0) { |
9c43027b AJ |
928 | cmn_err(CE_WARN, "can't open objset %llu, error %d", |
929 | (unsigned long long)ds->ds_object, error); | |
b128c09f BB |
930 | return (0); |
931 | } | |
932 | ||
933 | zilog = dmu_objset_zil(os); | |
572e2857 BB |
934 | bp = (blkptr_t *)&zilog->zl_header->zh_log; |
935 | ||
572e2857 BB |
936 | if (!BP_IS_HOLE(bp)) { |
937 | vdev_t *vd; | |
938 | boolean_t valid = B_TRUE; | |
939 | ||
d2734cce SD |
940 | /* |
941 | * Check the first block and determine if it's on a log device | |
942 | * which may have been removed or faulted prior to loading this | |
943 | * pool. If so, there's no point in checking the rest of the | |
944 | * log as its content should have already been synced to the | |
945 | * pool. | |
946 | */ | |
572e2857 BB |
947 | spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); |
948 | vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); | |
949 | if (vd->vdev_islog && vdev_is_dead(vd)) | |
950 | valid = vdev_log_state_valid(vd); | |
951 | spa_config_exit(os->os_spa, SCL_STATE, FTAG); | |
952 | ||
9c43027b | 953 | if (!valid) |
572e2857 | 954 | return (0); |
d2734cce SD |
955 | |
956 | /* | |
957 | * Check whether the current uberblock is checkpointed (e.g. | |
958 | * we are rewinding) and whether the current header has been | |
959 | * claimed or not. If it hasn't then skip verifying it. We | |
960 | * do this because its ZIL blocks may be part of the pool's | |
961 | * state before the rewind, which is no longer valid. | |
962 | */ | |
963 | zil_header_t *zh = zil_header_in_syncing_context(zilog); | |
964 | if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && | |
965 | zh->zh_claim_txg == 0) | |
966 | return (0); | |
572e2857 | 967 | } |
b128c09f | 968 | |
428870ff BB |
969 | /* |
970 | * Because tx == NULL, zil_claim_log_block() will not actually claim | |
971 | * any blocks, but just determine whether it is possible to do so. | |
972 | * In addition to checking the log chain, zil_claim_log_block() | |
973 | * will invoke zio_claim() with a done func of spa_claim_notify(), | |
974 | * which will update spa_max_claim_txg. See spa_load() for details. | |
975 | */ | |
976 | error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, | |
d2734cce SD |
977 | zilog->zl_header->zh_claim_txg ? -1ULL : |
978 | spa_min_claim_txg(os->os_spa), B_FALSE); | |
428870ff | 979 | |
428870ff | 980 | return ((error == ECKSUM || error == ENOENT) ? 0 : error); |
b128c09f BB |
981 | } |
982 | ||
1ce23dca PS |
983 | /* |
984 | * When an itx is "skipped", this function is used to properly mark the | |
985 | * waiter as "done, and signal any thread(s) waiting on it. An itx can | |
986 | * be skipped (and not committed to an lwb) for a variety of reasons, | |
987 | * one of them being that the itx was committed via spa_sync(), prior to | |
988 | * it being committed to an lwb; this can happen if a thread calling | |
989 | * zil_commit() is racing with spa_sync(). | |
990 | */ | |
991 | static void | |
992 | zil_commit_waiter_skip(zil_commit_waiter_t *zcw) | |
34dc7c2f | 993 | { |
1ce23dca PS |
994 | mutex_enter(&zcw->zcw_lock); |
995 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
996 | zcw->zcw_done = B_TRUE; | |
997 | cv_broadcast(&zcw->zcw_cv); | |
998 | mutex_exit(&zcw->zcw_lock); | |
999 | } | |
34dc7c2f | 1000 | |
1ce23dca PS |
1001 | /* |
1002 | * This function is used when the given waiter is to be linked into an | |
1003 | * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. | |
1004 | * At this point, the waiter will no longer be referenced by the itx, | |
1005 | * and instead, will be referenced by the lwb. | |
1006 | */ | |
1007 | static void | |
1008 | zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) | |
1009 | { | |
2fe61a7e PS |
1010 | /* |
1011 | * The lwb_waiters field of the lwb is protected by the zilog's | |
1012 | * zl_lock, thus it must be held when calling this function. | |
1013 | */ | |
1014 | ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); | |
1015 | ||
1ce23dca PS |
1016 | mutex_enter(&zcw->zcw_lock); |
1017 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
1018 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
1019 | ASSERT3P(lwb, !=, NULL); | |
1020 | ASSERT(lwb->lwb_state == LWB_STATE_OPENED || | |
900d09b2 PS |
1021 | lwb->lwb_state == LWB_STATE_ISSUED || |
1022 | lwb->lwb_state == LWB_STATE_WRITE_DONE); | |
1ce23dca PS |
1023 | |
1024 | list_insert_tail(&lwb->lwb_waiters, zcw); | |
1025 | zcw->zcw_lwb = lwb; | |
1026 | mutex_exit(&zcw->zcw_lock); | |
1027 | } | |
1028 | ||
1029 | /* | |
1030 | * This function is used when zio_alloc_zil() fails to allocate a ZIL | |
1031 | * block, and the given waiter must be linked to the "nolwb waiters" | |
1032 | * list inside of zil_process_commit_list(). | |
1033 | */ | |
1034 | static void | |
1035 | zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) | |
1036 | { | |
1037 | mutex_enter(&zcw->zcw_lock); | |
1038 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
1039 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
1040 | list_insert_tail(nolwb, zcw); | |
1041 | mutex_exit(&zcw->zcw_lock); | |
34dc7c2f BB |
1042 | } |
1043 | ||
1044 | void | |
1ce23dca | 1045 | zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) |
34dc7c2f | 1046 | { |
1ce23dca | 1047 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
34dc7c2f BB |
1048 | avl_index_t where; |
1049 | zil_vdev_node_t *zv, zvsearch; | |
1050 | int ndvas = BP_GET_NDVAS(bp); | |
1051 | int i; | |
1052 | ||
53b1f5ea | 1053 | if (zil_nocacheflush) |
34dc7c2f BB |
1054 | return; |
1055 | ||
1ce23dca | 1056 | mutex_enter(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
1057 | for (i = 0; i < ndvas; i++) { |
1058 | zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); | |
1059 | if (avl_find(t, &zvsearch, &where) == NULL) { | |
79c76d5b | 1060 | zv = kmem_alloc(sizeof (*zv), KM_SLEEP); |
34dc7c2f BB |
1061 | zv->zv_vdev = zvsearch.zv_vdev; |
1062 | avl_insert(t, zv, where); | |
1063 | } | |
1064 | } | |
1ce23dca | 1065 | mutex_exit(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
1066 | } |
1067 | ||
900d09b2 PS |
1068 | static void |
1069 | zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb) | |
1070 | { | |
1071 | avl_tree_t *src = &lwb->lwb_vdev_tree; | |
1072 | avl_tree_t *dst = &nlwb->lwb_vdev_tree; | |
1073 | void *cookie = NULL; | |
1074 | zil_vdev_node_t *zv; | |
1075 | ||
1076 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); | |
1077 | ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE); | |
1078 | ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
1079 | ||
1080 | /* | |
1081 | * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does | |
1082 | * not need the protection of lwb_vdev_lock (it will only be modified | |
1083 | * while holding zilog->zl_lock) as its writes and those of its | |
1084 | * children have all completed. The younger 'nlwb' may be waiting on | |
1085 | * future writes to additional vdevs. | |
1086 | */ | |
1087 | mutex_enter(&nlwb->lwb_vdev_lock); | |
1088 | /* | |
1089 | * Tear down the 'lwb' vdev tree, ensuring that entries which do not | |
1090 | * exist in 'nlwb' are moved to it, freeing any would-be duplicates. | |
1091 | */ | |
1092 | while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) { | |
1093 | avl_index_t where; | |
1094 | ||
1095 | if (avl_find(dst, zv, &where) == NULL) { | |
1096 | avl_insert(dst, zv, where); | |
1097 | } else { | |
1098 | kmem_free(zv, sizeof (*zv)); | |
1099 | } | |
1100 | } | |
1101 | mutex_exit(&nlwb->lwb_vdev_lock); | |
1102 | } | |
1103 | ||
1ce23dca PS |
1104 | void |
1105 | zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) | |
1106 | { | |
1107 | lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); | |
1108 | } | |
1109 | ||
1110 | /* | |
900d09b2 | 1111 | * This function is a called after all vdevs associated with a given lwb |
1ce23dca | 1112 | * write have completed their DKIOCFLUSHWRITECACHE command; or as soon |
900d09b2 PS |
1113 | * as the lwb write completes, if "zil_nocacheflush" is set. Further, |
1114 | * all "previous" lwb's will have completed before this function is | |
1115 | * called; i.e. this function is called for all previous lwbs before | |
1116 | * it's called for "this" lwb (enforced via zio the dependencies | |
1117 | * configured in zil_lwb_set_zio_dependency()). | |
1ce23dca PS |
1118 | * |
1119 | * The intention is for this function to be called as soon as the | |
1120 | * contents of an lwb are considered "stable" on disk, and will survive | |
1121 | * any sudden loss of power. At this point, any threads waiting for the | |
1122 | * lwb to reach this state are signalled, and the "waiter" structures | |
1123 | * are marked "done". | |
1124 | */ | |
572e2857 | 1125 | static void |
1ce23dca | 1126 | zil_lwb_flush_vdevs_done(zio_t *zio) |
34dc7c2f | 1127 | { |
1ce23dca PS |
1128 | lwb_t *lwb = zio->io_private; |
1129 | zilog_t *zilog = lwb->lwb_zilog; | |
1130 | dmu_tx_t *tx = lwb->lwb_tx; | |
1131 | zil_commit_waiter_t *zcw; | |
1132 | itx_t *itx; | |
1133 | ||
1134 | spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); | |
1135 | ||
1136 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
34dc7c2f | 1137 | |
1ce23dca | 1138 | mutex_enter(&zilog->zl_lock); |
34dc7c2f BB |
1139 | |
1140 | /* | |
1ce23dca PS |
1141 | * Ensure the lwb buffer pointer is cleared before releasing the |
1142 | * txg. If we have had an allocation failure and the txg is | |
1143 | * waiting to sync then we want zil_sync() to remove the lwb so | |
1144 | * that it's not picked up as the next new one in | |
1145 | * zil_process_commit_list(). zil_sync() will only remove the | |
1146 | * lwb if lwb_buf is null. | |
34dc7c2f | 1147 | */ |
1ce23dca PS |
1148 | lwb->lwb_buf = NULL; |
1149 | lwb->lwb_tx = NULL; | |
34dc7c2f | 1150 | |
1ce23dca PS |
1151 | ASSERT3U(lwb->lwb_issued_timestamp, >, 0); |
1152 | zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp; | |
34dc7c2f | 1153 | |
1ce23dca | 1154 | lwb->lwb_root_zio = NULL; |
900d09b2 PS |
1155 | |
1156 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); | |
1157 | lwb->lwb_state = LWB_STATE_FLUSH_DONE; | |
34dc7c2f | 1158 | |
1ce23dca PS |
1159 | if (zilog->zl_last_lwb_opened == lwb) { |
1160 | /* | |
1161 | * Remember the highest committed log sequence number | |
1162 | * for ztest. We only update this value when all the log | |
1163 | * writes succeeded, because ztest wants to ASSERT that | |
1164 | * it got the whole log chain. | |
1165 | */ | |
1166 | zilog->zl_commit_lr_seq = zilog->zl_lr_seq; | |
1167 | } | |
1168 | ||
1169 | while ((itx = list_head(&lwb->lwb_itxs)) != NULL) { | |
1170 | list_remove(&lwb->lwb_itxs, itx); | |
1171 | zil_itx_destroy(itx); | |
1172 | } | |
1173 | ||
1174 | while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) { | |
1175 | mutex_enter(&zcw->zcw_lock); | |
1176 | ||
1177 | ASSERT(list_link_active(&zcw->zcw_node)); | |
1178 | list_remove(&lwb->lwb_waiters, zcw); | |
1179 | ||
1180 | ASSERT3P(zcw->zcw_lwb, ==, lwb); | |
1181 | zcw->zcw_lwb = NULL; | |
f82f0279 AK |
1182 | /* |
1183 | * We expect any ZIO errors from child ZIOs to have been | |
1184 | * propagated "up" to this specific LWB's root ZIO, in | |
1185 | * order for this error handling to work correctly. This | |
1186 | * includes ZIO errors from either this LWB's write or | |
1187 | * flush, as well as any errors from other dependent LWBs | |
1188 | * (e.g. a root LWB ZIO that might be a child of this LWB). | |
1189 | * | |
1190 | * With that said, it's important to note that LWB flush | |
1191 | * errors are not propagated up to the LWB root ZIO. | |
1192 | * This is incorrect behavior, and results in VDEV flush | |
1193 | * errors not being handled correctly here. See the | |
1194 | * comment above the call to "zio_flush" for details. | |
1195 | */ | |
1ce23dca PS |
1196 | |
1197 | zcw->zcw_zio_error = zio->io_error; | |
1198 | ||
1199 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
1200 | zcw->zcw_done = B_TRUE; | |
1201 | cv_broadcast(&zcw->zcw_cv); | |
1202 | ||
1203 | mutex_exit(&zcw->zcw_lock); | |
34dc7c2f BB |
1204 | } |
1205 | ||
1ce23dca PS |
1206 | mutex_exit(&zilog->zl_lock); |
1207 | ||
34dc7c2f | 1208 | /* |
1ce23dca PS |
1209 | * Now that we've written this log block, we have a stable pointer |
1210 | * to the next block in the chain, so it's OK to let the txg in | |
1211 | * which we allocated the next block sync. | |
34dc7c2f | 1212 | */ |
1ce23dca | 1213 | dmu_tx_commit(tx); |
34dc7c2f BB |
1214 | } |
1215 | ||
1216 | /* | |
900d09b2 PS |
1217 | * This is called when an lwb's write zio completes. The callback's |
1218 | * purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs | |
1219 | * in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved | |
1220 | * in writing out this specific lwb's data, and in the case that cache | |
1221 | * flushes have been deferred, vdevs involved in writing the data for | |
1222 | * previous lwbs. The writes corresponding to all the vdevs in the | |
1223 | * lwb_vdev_tree will have completed by the time this is called, due to | |
1224 | * the zio dependencies configured in zil_lwb_set_zio_dependency(), | |
1225 | * which takes deferred flushes into account. The lwb will be "done" | |
1226 | * once zil_lwb_flush_vdevs_done() is called, which occurs in the zio | |
1227 | * completion callback for the lwb's root zio. | |
34dc7c2f BB |
1228 | */ |
1229 | static void | |
1230 | zil_lwb_write_done(zio_t *zio) | |
1231 | { | |
1232 | lwb_t *lwb = zio->io_private; | |
1ce23dca | 1233 | spa_t *spa = zio->io_spa; |
34dc7c2f | 1234 | zilog_t *zilog = lwb->lwb_zilog; |
1ce23dca PS |
1235 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
1236 | void *cookie = NULL; | |
1237 | zil_vdev_node_t *zv; | |
900d09b2 | 1238 | lwb_t *nlwb; |
1ce23dca PS |
1239 | |
1240 | ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); | |
34dc7c2f | 1241 | |
b128c09f | 1242 | ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); |
b128c09f BB |
1243 | ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); |
1244 | ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); | |
1245 | ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); | |
1246 | ASSERT(!BP_IS_GANG(zio->io_bp)); | |
1247 | ASSERT(!BP_IS_HOLE(zio->io_bp)); | |
9b67f605 | 1248 | ASSERT(BP_GET_FILL(zio->io_bp) == 0); |
b128c09f | 1249 | |
e2af2acc | 1250 | abd_free(zio->io_abd); |
1ce23dca | 1251 | |
34dc7c2f | 1252 | mutex_enter(&zilog->zl_lock); |
900d09b2 PS |
1253 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); |
1254 | lwb->lwb_state = LWB_STATE_WRITE_DONE; | |
1ce23dca | 1255 | lwb->lwb_write_zio = NULL; |
920dd524 | 1256 | lwb->lwb_fastwrite = FALSE; |
900d09b2 | 1257 | nlwb = list_next(&zilog->zl_lwb_list, lwb); |
428870ff | 1258 | mutex_exit(&zilog->zl_lock); |
9babb374 | 1259 | |
1ce23dca PS |
1260 | if (avl_numnodes(t) == 0) |
1261 | return; | |
1262 | ||
9babb374 | 1263 | /* |
1ce23dca PS |
1264 | * If there was an IO error, we're not going to call zio_flush() |
1265 | * on these vdevs, so we simply empty the tree and free the | |
1266 | * nodes. We avoid calling zio_flush() since there isn't any | |
1267 | * good reason for doing so, after the lwb block failed to be | |
1268 | * written out. | |
f82f0279 AK |
1269 | * |
1270 | * Additionally, we don't perform any further error handling at | |
1271 | * this point (e.g. setting "zcw_zio_error" appropriately), as | |
1272 | * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus, | |
1273 | * we expect any error seen here, to have been propagated to | |
1274 | * that function). | |
9babb374 | 1275 | */ |
1ce23dca PS |
1276 | if (zio->io_error != 0) { |
1277 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) | |
1278 | kmem_free(zv, sizeof (*zv)); | |
1279 | return; | |
1280 | } | |
1281 | ||
900d09b2 PS |
1282 | /* |
1283 | * If this lwb does not have any threads waiting for it to | |
1284 | * complete, we want to defer issuing the DKIOCFLUSHWRITECACHE | |
1285 | * command to the vdevs written to by "this" lwb, and instead | |
1286 | * rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE | |
1287 | * command for those vdevs. Thus, we merge the vdev tree of | |
1288 | * "this" lwb with the vdev tree of the "next" lwb in the list, | |
1289 | * and assume the "next" lwb will handle flushing the vdevs (or | |
1290 | * deferring the flush(s) again). | |
1291 | * | |
1292 | * This is a useful performance optimization, especially for | |
1293 | * workloads with lots of async write activity and few sync | |
1294 | * write and/or fsync activity, as it has the potential to | |
1295 | * coalesce multiple flush commands to a vdev into one. | |
1296 | */ | |
1297 | if (list_head(&lwb->lwb_waiters) == NULL && nlwb != NULL) { | |
1298 | zil_lwb_flush_defer(lwb, nlwb); | |
1299 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); | |
1300 | return; | |
1301 | } | |
1302 | ||
1ce23dca PS |
1303 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { |
1304 | vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); | |
f82f0279 AK |
1305 | if (vd != NULL) { |
1306 | /* | |
1307 | * The "ZIO_FLAG_DONT_PROPAGATE" is currently | |
1308 | * always used within "zio_flush". This means, | |
1309 | * any errors when flushing the vdev(s), will | |
1310 | * (unfortunately) not be handled correctly, | |
1311 | * since these "zio_flush" errors will not be | |
1312 | * propagated up to "zil_lwb_flush_vdevs_done". | |
1313 | */ | |
1ce23dca | 1314 | zio_flush(lwb->lwb_root_zio, vd); |
f82f0279 | 1315 | } |
1ce23dca PS |
1316 | kmem_free(zv, sizeof (*zv)); |
1317 | } | |
34dc7c2f BB |
1318 | } |
1319 | ||
900d09b2 PS |
1320 | static void |
1321 | zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb) | |
1322 | { | |
1323 | lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; | |
1324 | ||
1325 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); | |
1326 | ASSERT(MUTEX_HELD(&zilog->zl_lock)); | |
1327 | ||
1328 | /* | |
1329 | * The zilog's "zl_last_lwb_opened" field is used to build the | |
1330 | * lwb/zio dependency chain, which is used to preserve the | |
1331 | * ordering of lwb completions that is required by the semantics | |
1332 | * of the ZIL. Each new lwb zio becomes a parent of the | |
1333 | * "previous" lwb zio, such that the new lwb's zio cannot | |
1334 | * complete until the "previous" lwb's zio completes. | |
1335 | * | |
1336 | * This is required by the semantics of zil_commit(); the commit | |
1337 | * waiters attached to the lwbs will be woken in the lwb zio's | |
1338 | * completion callback, so this zio dependency graph ensures the | |
1339 | * waiters are woken in the correct order (the same order the | |
1340 | * lwbs were created). | |
1341 | */ | |
1342 | if (last_lwb_opened != NULL && | |
1343 | last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) { | |
1344 | ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || | |
1345 | last_lwb_opened->lwb_state == LWB_STATE_ISSUED || | |
1346 | last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE); | |
1347 | ||
1348 | ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); | |
1349 | zio_add_child(lwb->lwb_root_zio, | |
1350 | last_lwb_opened->lwb_root_zio); | |
1351 | ||
1352 | /* | |
1353 | * If the previous lwb's write hasn't already completed, | |
1354 | * we also want to order the completion of the lwb write | |
1355 | * zios (above, we only order the completion of the lwb | |
1356 | * root zios). This is required because of how we can | |
1357 | * defer the DKIOCFLUSHWRITECACHE commands for each lwb. | |
1358 | * | |
612c4930 | 1359 | * When the DKIOCFLUSHWRITECACHE commands are deferred, |
900d09b2 PS |
1360 | * the previous lwb will rely on this lwb to flush the |
1361 | * vdevs written to by that previous lwb. Thus, we need | |
1362 | * to ensure this lwb doesn't issue the flush until | |
1363 | * after the previous lwb's write completes. We ensure | |
1364 | * this ordering by setting the zio parent/child | |
1365 | * relationship here. | |
1366 | * | |
1367 | * Without this relationship on the lwb's write zio, | |
1368 | * it's possible for this lwb's write to complete prior | |
1369 | * to the previous lwb's write completing; and thus, the | |
1370 | * vdevs for the previous lwb would be flushed prior to | |
1371 | * that lwb's data being written to those vdevs (the | |
1372 | * vdevs are flushed in the lwb write zio's completion | |
1373 | * handler, zil_lwb_write_done()). | |
1374 | */ | |
1375 | if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) { | |
1376 | ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || | |
1377 | last_lwb_opened->lwb_state == LWB_STATE_ISSUED); | |
1378 | ||
1379 | ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL); | |
1380 | zio_add_child(lwb->lwb_write_zio, | |
1381 | last_lwb_opened->lwb_write_zio); | |
1382 | } | |
1383 | } | |
1384 | } | |
1385 | ||
1386 | ||
34dc7c2f | 1387 | /* |
1ce23dca PS |
1388 | * This function's purpose is to "open" an lwb such that it is ready to |
1389 | * accept new itxs being committed to it. To do this, the lwb's zio | |
1390 | * structures are created, and linked to the lwb. This function is | |
1391 | * idempotent; if the passed in lwb has already been opened, this | |
1392 | * function is essentially a no-op. | |
34dc7c2f BB |
1393 | */ |
1394 | static void | |
1ce23dca | 1395 | zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) |
34dc7c2f | 1396 | { |
5dbd68a3 | 1397 | zbookmark_phys_t zb; |
1b7c1e5c | 1398 | zio_priority_t prio; |
34dc7c2f | 1399 | |
1b2b0aca | 1400 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1401 | ASSERT3P(lwb, !=, NULL); |
1402 | EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); | |
1403 | EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); | |
1404 | ||
428870ff BB |
1405 | SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], |
1406 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, | |
1407 | lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
34dc7c2f | 1408 | |
920dd524 ED |
1409 | /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */ |
1410 | mutex_enter(&zilog->zl_lock); | |
1ce23dca | 1411 | if (lwb->lwb_root_zio == NULL) { |
a6255b7f DQ |
1412 | abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, |
1413 | BP_GET_LSIZE(&lwb->lwb_blk)); | |
1ce23dca | 1414 | |
920dd524 ED |
1415 | if (!lwb->lwb_fastwrite) { |
1416 | metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk); | |
1417 | lwb->lwb_fastwrite = 1; | |
1418 | } | |
1ce23dca | 1419 | |
1b7c1e5c GDN |
1420 | if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) |
1421 | prio = ZIO_PRIORITY_SYNC_WRITE; | |
1422 | else | |
1423 | prio = ZIO_PRIORITY_ASYNC_WRITE; | |
1ce23dca PS |
1424 | |
1425 | lwb->lwb_root_zio = zio_root(zilog->zl_spa, | |
1426 | zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); | |
1427 | ASSERT3P(lwb->lwb_root_zio, !=, NULL); | |
1428 | ||
1429 | lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, | |
1430 | zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd, | |
1431 | BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb, | |
f82f0279 | 1432 | prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, &zb); |
1ce23dca PS |
1433 | ASSERT3P(lwb->lwb_write_zio, !=, NULL); |
1434 | ||
1435 | lwb->lwb_state = LWB_STATE_OPENED; | |
1436 | ||
900d09b2 | 1437 | zil_lwb_set_zio_dependency(zilog, lwb); |
1ce23dca | 1438 | zilog->zl_last_lwb_opened = lwb; |
34dc7c2f | 1439 | } |
920dd524 | 1440 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
1441 | |
1442 | ASSERT3P(lwb->lwb_root_zio, !=, NULL); | |
1443 | ASSERT3P(lwb->lwb_write_zio, !=, NULL); | |
1444 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
34dc7c2f BB |
1445 | } |
1446 | ||
428870ff BB |
1447 | /* |
1448 | * Define a limited set of intent log block sizes. | |
d3cc8b15 | 1449 | * |
428870ff BB |
1450 | * These must be a multiple of 4KB. Note only the amount used (again |
1451 | * aligned to 4KB) actually gets written. However, we can't always just | |
f1512ee6 | 1452 | * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. |
428870ff | 1453 | */ |
18168da7 | 1454 | static const struct { |
f15d6a54 AM |
1455 | uint64_t limit; |
1456 | uint64_t blksz; | |
1457 | } zil_block_buckets[] = { | |
1458 | { 4096, 4096 }, /* non TX_WRITE */ | |
1459 | { 8192 + 4096, 8192 + 4096 }, /* database */ | |
1460 | { 32768 + 4096, 32768 + 4096 }, /* NFS writes */ | |
1461 | { 65536 + 4096, 65536 + 4096 }, /* 64KB writes */ | |
1462 | { 131072, 131072 }, /* < 128KB writes */ | |
1463 | { 131072 +4096, 65536 + 4096 }, /* 128KB writes */ | |
1464 | { UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */ | |
428870ff BB |
1465 | }; |
1466 | ||
b8738257 MA |
1467 | /* |
1468 | * Maximum block size used by the ZIL. This is picked up when the ZIL is | |
1469 | * initialized. Otherwise this should not be used directly; see | |
1470 | * zl_max_block_size instead. | |
1471 | */ | |
18168da7 | 1472 | static int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; |
b8738257 | 1473 | |
34dc7c2f BB |
1474 | /* |
1475 | * Start a log block write and advance to the next log block. | |
1476 | * Calls are serialized. | |
1477 | */ | |
1478 | static lwb_t * | |
1ce23dca | 1479 | zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) |
34dc7c2f | 1480 | { |
428870ff BB |
1481 | lwb_t *nlwb = NULL; |
1482 | zil_chain_t *zilc; | |
34dc7c2f | 1483 | spa_t *spa = zilog->zl_spa; |
428870ff BB |
1484 | blkptr_t *bp; |
1485 | dmu_tx_t *tx; | |
34dc7c2f | 1486 | uint64_t txg; |
428870ff BB |
1487 | uint64_t zil_blksz, wsz; |
1488 | int i, error; | |
1b7c1e5c | 1489 | boolean_t slog; |
428870ff | 1490 | |
1b2b0aca | 1491 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1492 | ASSERT3P(lwb->lwb_root_zio, !=, NULL); |
1493 | ASSERT3P(lwb->lwb_write_zio, !=, NULL); | |
1494 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
1495 | ||
428870ff BB |
1496 | if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { |
1497 | zilc = (zil_chain_t *)lwb->lwb_buf; | |
1498 | bp = &zilc->zc_next_blk; | |
1499 | } else { | |
1500 | zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); | |
1501 | bp = &zilc->zc_next_blk; | |
1502 | } | |
34dc7c2f | 1503 | |
428870ff | 1504 | ASSERT(lwb->lwb_nused <= lwb->lwb_sz); |
34dc7c2f BB |
1505 | |
1506 | /* | |
1507 | * Allocate the next block and save its address in this block | |
1508 | * before writing it in order to establish the log chain. | |
1509 | * Note that if the allocation of nlwb synced before we wrote | |
1510 | * the block that points at it (lwb), we'd leak it if we crashed. | |
428870ff BB |
1511 | * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done(). |
1512 | * We dirty the dataset to ensure that zil_sync() will be called | |
1513 | * to clean up in the event of allocation failure or I/O failure. | |
34dc7c2f | 1514 | */ |
1ce23dca | 1515 | |
428870ff | 1516 | tx = dmu_tx_create(zilog->zl_os); |
e98b6117 AG |
1517 | |
1518 | /* | |
0735ecb3 PS |
1519 | * Since we are not going to create any new dirty data, and we |
1520 | * can even help with clearing the existing dirty data, we | |
1521 | * should not be subject to the dirty data based delays. We | |
1522 | * use TXG_NOTHROTTLE to bypass the delay mechanism. | |
e98b6117 | 1523 | */ |
0735ecb3 PS |
1524 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); |
1525 | ||
428870ff BB |
1526 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
1527 | txg = dmu_tx_get_txg(tx); | |
1528 | ||
1529 | lwb->lwb_tx = tx; | |
34dc7c2f BB |
1530 | |
1531 | /* | |
428870ff BB |
1532 | * Log blocks are pre-allocated. Here we select the size of the next |
1533 | * block, based on size used in the last block. | |
1534 | * - first find the smallest bucket that will fit the block from a | |
1535 | * limited set of block sizes. This is because it's faster to write | |
1536 | * blocks allocated from the same metaslab as they are adjacent or | |
1537 | * close. | |
1538 | * - next find the maximum from the new suggested size and an array of | |
1539 | * previous sizes. This lessens a picket fence effect of wrongly | |
2fe61a7e | 1540 | * guessing the size if we have a stream of say 2k, 64k, 2k, 64k |
428870ff BB |
1541 | * requests. |
1542 | * | |
1543 | * Note we only write what is used, but we can't just allocate | |
1544 | * the maximum block size because we can exhaust the available | |
1545 | * pool log space. | |
34dc7c2f | 1546 | */ |
428870ff | 1547 | zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); |
f15d6a54 | 1548 | for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++) |
428870ff | 1549 | continue; |
f15d6a54 | 1550 | zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size); |
428870ff BB |
1551 | zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; |
1552 | for (i = 0; i < ZIL_PREV_BLKS; i++) | |
1553 | zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); | |
1554 | zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); | |
34dc7c2f BB |
1555 | |
1556 | BP_ZERO(bp); | |
b5256303 | 1557 | error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog); |
1b7c1e5c | 1558 | if (slog) { |
b6ad9671 ED |
1559 | ZIL_STAT_BUMP(zil_itx_metaslab_slog_count); |
1560 | ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused); | |
d1d7e268 | 1561 | } else { |
b6ad9671 ED |
1562 | ZIL_STAT_BUMP(zil_itx_metaslab_normal_count); |
1563 | ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused); | |
1564 | } | |
13fe0198 | 1565 | if (error == 0) { |
428870ff BB |
1566 | ASSERT3U(bp->blk_birth, ==, txg); |
1567 | bp->blk_cksum = lwb->lwb_blk.blk_cksum; | |
1568 | bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; | |
34dc7c2f BB |
1569 | |
1570 | /* | |
1ce23dca | 1571 | * Allocate a new log write block (lwb). |
34dc7c2f | 1572 | */ |
1b7c1e5c | 1573 | nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE); |
34dc7c2f BB |
1574 | } |
1575 | ||
428870ff BB |
1576 | if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { |
1577 | /* For Slim ZIL only write what is used. */ | |
1578 | wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t); | |
1579 | ASSERT3U(wsz, <=, lwb->lwb_sz); | |
1ce23dca | 1580 | zio_shrink(lwb->lwb_write_zio, wsz); |
34dc7c2f | 1581 | |
428870ff BB |
1582 | } else { |
1583 | wsz = lwb->lwb_sz; | |
1584 | } | |
34dc7c2f | 1585 | |
428870ff BB |
1586 | zilc->zc_pad = 0; |
1587 | zilc->zc_nused = lwb->lwb_nused; | |
1588 | zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; | |
34dc7c2f BB |
1589 | |
1590 | /* | |
428870ff | 1591 | * clear unused data for security |
34dc7c2f | 1592 | */ |
428870ff | 1593 | bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused); |
34dc7c2f | 1594 | |
1ce23dca PS |
1595 | spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER); |
1596 | ||
1597 | zil_lwb_add_block(lwb, &lwb->lwb_blk); | |
1598 | lwb->lwb_issued_timestamp = gethrtime(); | |
1599 | lwb->lwb_state = LWB_STATE_ISSUED; | |
1600 | ||
1601 | zio_nowait(lwb->lwb_root_zio); | |
1602 | zio_nowait(lwb->lwb_write_zio); | |
34dc7c2f BB |
1603 | |
1604 | /* | |
428870ff BB |
1605 | * If there was an allocation failure then nlwb will be null which |
1606 | * forces a txg_wait_synced(). | |
34dc7c2f | 1607 | */ |
34dc7c2f BB |
1608 | return (nlwb); |
1609 | } | |
1610 | ||
b8738257 MA |
1611 | /* |
1612 | * Maximum amount of write data that can be put into single log block. | |
1613 | */ | |
1614 | uint64_t | |
1615 | zil_max_log_data(zilog_t *zilog) | |
1616 | { | |
1617 | return (zilog->zl_max_block_size - | |
1618 | sizeof (zil_chain_t) - sizeof (lr_write_t)); | |
1619 | } | |
1620 | ||
1621 | /* | |
1622 | * Maximum amount of log space we agree to waste to reduce number of | |
1623 | * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%). | |
1624 | */ | |
1625 | static inline uint64_t | |
1626 | zil_max_waste_space(zilog_t *zilog) | |
1627 | { | |
1628 | return (zil_max_log_data(zilog) / 8); | |
1629 | } | |
1630 | ||
1631 | /* | |
1632 | * Maximum amount of write data for WR_COPIED. For correctness, consumers | |
1633 | * must fall back to WR_NEED_COPY if we can't fit the entire record into one | |
1634 | * maximum sized log block, because each WR_COPIED record must fit in a | |
1635 | * single log block. For space efficiency, we want to fit two records into a | |
1636 | * max-sized log block. | |
1637 | */ | |
1638 | uint64_t | |
1639 | zil_max_copied_data(zilog_t *zilog) | |
1640 | { | |
1641 | return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 - | |
1642 | sizeof (lr_write_t)); | |
1643 | } | |
1644 | ||
34dc7c2f BB |
1645 | static lwb_t * |
1646 | zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) | |
1647 | { | |
1b7c1e5c GDN |
1648 | lr_t *lrcb, *lrc; |
1649 | lr_write_t *lrwb, *lrw; | |
428870ff | 1650 | char *lr_buf; |
3a185275 | 1651 | uint64_t dlen, dnow, dpad, lwb_sp, reclen, txg, max_log_data; |
34dc7c2f | 1652 | |
1b2b0aca | 1653 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1654 | ASSERT3P(lwb, !=, NULL); |
1655 | ASSERT3P(lwb->lwb_buf, !=, NULL); | |
1656 | ||
1657 | zil_lwb_write_open(zilog, lwb); | |
428870ff | 1658 | |
1ce23dca PS |
1659 | lrc = &itx->itx_lr; |
1660 | lrw = (lr_write_t *)lrc; | |
1661 | ||
1662 | /* | |
1663 | * A commit itx doesn't represent any on-disk state; instead | |
1664 | * it's simply used as a place holder on the commit list, and | |
1665 | * provides a mechanism for attaching a "commit waiter" onto the | |
1666 | * correct lwb (such that the waiter can be signalled upon | |
1667 | * completion of that lwb). Thus, we don't process this itx's | |
1668 | * log record if it's a commit itx (these itx's don't have log | |
1669 | * records), and instead link the itx's waiter onto the lwb's | |
1670 | * list of waiters. | |
1671 | * | |
1672 | * For more details, see the comment above zil_commit(). | |
1673 | */ | |
1674 | if (lrc->lrc_txtype == TX_COMMIT) { | |
2fe61a7e | 1675 | mutex_enter(&zilog->zl_lock); |
1ce23dca PS |
1676 | zil_commit_waiter_link_lwb(itx->itx_private, lwb); |
1677 | itx->itx_private = NULL; | |
2fe61a7e | 1678 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
1679 | return (lwb); |
1680 | } | |
34dc7c2f | 1681 | |
1b7c1e5c | 1682 | if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { |
34dc7c2f | 1683 | dlen = P2ROUNDUP_TYPED( |
428870ff | 1684 | lrw->lr_length, sizeof (uint64_t), uint64_t); |
3a185275 | 1685 | dpad = dlen - lrw->lr_length; |
1b7c1e5c | 1686 | } else { |
3a185275 | 1687 | dlen = dpad = 0; |
1b7c1e5c GDN |
1688 | } |
1689 | reclen = lrc->lrc_reclen; | |
34dc7c2f | 1690 | zilog->zl_cur_used += (reclen + dlen); |
1b7c1e5c | 1691 | txg = lrc->lrc_txg; |
34dc7c2f | 1692 | |
1ce23dca | 1693 | ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen)); |
34dc7c2f | 1694 | |
1b7c1e5c | 1695 | cont: |
34dc7c2f BB |
1696 | /* |
1697 | * If this record won't fit in the current log block, start a new one. | |
1b7c1e5c | 1698 | * For WR_NEED_COPY optimize layout for minimal number of chunks. |
34dc7c2f | 1699 | */ |
1b7c1e5c | 1700 | lwb_sp = lwb->lwb_sz - lwb->lwb_nused; |
b8738257 | 1701 | max_log_data = zil_max_log_data(zilog); |
1b7c1e5c | 1702 | if (reclen > lwb_sp || (reclen + dlen > lwb_sp && |
b8738257 MA |
1703 | lwb_sp < zil_max_waste_space(zilog) && |
1704 | (dlen % max_log_data == 0 || | |
1705 | lwb_sp < reclen + dlen % max_log_data))) { | |
1ce23dca | 1706 | lwb = zil_lwb_write_issue(zilog, lwb); |
34dc7c2f BB |
1707 | if (lwb == NULL) |
1708 | return (NULL); | |
1ce23dca | 1709 | zil_lwb_write_open(zilog, lwb); |
428870ff | 1710 | ASSERT(LWB_EMPTY(lwb)); |
1b7c1e5c | 1711 | lwb_sp = lwb->lwb_sz - lwb->lwb_nused; |
b8738257 MA |
1712 | |
1713 | /* | |
1714 | * There must be enough space in the new, empty log block to | |
1715 | * hold reclen. For WR_COPIED, we need to fit the whole | |
1716 | * record in one block, and reclen is the header size + the | |
1717 | * data size. For WR_NEED_COPY, we can create multiple | |
1718 | * records, splitting the data into multiple blocks, so we | |
1719 | * only need to fit one word of data per block; in this case | |
1720 | * reclen is just the header size (no data). | |
1721 | */ | |
1b7c1e5c | 1722 | ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); |
34dc7c2f BB |
1723 | } |
1724 | ||
1b7c1e5c | 1725 | dnow = MIN(dlen, lwb_sp - reclen); |
428870ff BB |
1726 | lr_buf = lwb->lwb_buf + lwb->lwb_nused; |
1727 | bcopy(lrc, lr_buf, reclen); | |
1b7c1e5c GDN |
1728 | lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */ |
1729 | lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */ | |
34dc7c2f | 1730 | |
b6ad9671 ED |
1731 | ZIL_STAT_BUMP(zil_itx_count); |
1732 | ||
34dc7c2f BB |
1733 | /* |
1734 | * If it's a write, fetch the data or get its blkptr as appropriate. | |
1735 | */ | |
1736 | if (lrc->lrc_txtype == TX_WRITE) { | |
1737 | if (txg > spa_freeze_txg(zilog->zl_spa)) | |
1738 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
b6ad9671 ED |
1739 | if (itx->itx_wr_state == WR_COPIED) { |
1740 | ZIL_STAT_BUMP(zil_itx_copied_count); | |
1741 | ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length); | |
1742 | } else { | |
34dc7c2f BB |
1743 | char *dbuf; |
1744 | int error; | |
1745 | ||
1b7c1e5c | 1746 | if (itx->itx_wr_state == WR_NEED_COPY) { |
428870ff | 1747 | dbuf = lr_buf + reclen; |
1b7c1e5c GDN |
1748 | lrcb->lrc_reclen += dnow; |
1749 | if (lrwb->lr_length > dnow) | |
1750 | lrwb->lr_length = dnow; | |
1751 | lrw->lr_offset += dnow; | |
1752 | lrw->lr_length -= dnow; | |
b6ad9671 | 1753 | ZIL_STAT_BUMP(zil_itx_needcopy_count); |
5666a994 | 1754 | ZIL_STAT_INCR(zil_itx_needcopy_bytes, dnow); |
34dc7c2f | 1755 | } else { |
1ce23dca | 1756 | ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); |
34dc7c2f | 1757 | dbuf = NULL; |
b6ad9671 | 1758 | ZIL_STAT_BUMP(zil_itx_indirect_count); |
d1d7e268 MK |
1759 | ZIL_STAT_INCR(zil_itx_indirect_bytes, |
1760 | lrw->lr_length); | |
34dc7c2f | 1761 | } |
1ce23dca PS |
1762 | |
1763 | /* | |
1764 | * We pass in the "lwb_write_zio" rather than | |
1765 | * "lwb_root_zio" so that the "lwb_write_zio" | |
1766 | * becomes the parent of any zio's created by | |
1767 | * the "zl_get_data" callback. The vdevs are | |
1768 | * flushed after the "lwb_write_zio" completes, | |
1769 | * so we want to make sure that completion | |
1770 | * callback waits for these additional zio's, | |
1771 | * such that the vdevs used by those zio's will | |
1772 | * be included in the lwb's vdev tree, and those | |
1773 | * vdevs will be properly flushed. If we passed | |
1774 | * in "lwb_root_zio" here, then these additional | |
1775 | * vdevs may not be flushed; e.g. if these zio's | |
1776 | * completed after "lwb_write_zio" completed. | |
1777 | */ | |
1778 | error = zilog->zl_get_data(itx->itx_private, | |
296a4a36 CC |
1779 | itx->itx_gen, lrwb, dbuf, lwb, |
1780 | lwb->lwb_write_zio); | |
3a185275 MJ |
1781 | if (dbuf != NULL && error == 0 && dnow == dlen) |
1782 | /* Zero any padding bytes in the last block. */ | |
1783 | bzero((char *)dbuf + lrwb->lr_length, dpad); | |
1ce23dca | 1784 | |
45d1cae3 BB |
1785 | if (error == EIO) { |
1786 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
1787 | return (lwb); | |
1788 | } | |
13fe0198 | 1789 | if (error != 0) { |
34dc7c2f BB |
1790 | ASSERT(error == ENOENT || error == EEXIST || |
1791 | error == EALREADY); | |
1792 | return (lwb); | |
1793 | } | |
1794 | } | |
1795 | } | |
1796 | ||
428870ff BB |
1797 | /* |
1798 | * We're actually making an entry, so update lrc_seq to be the | |
1799 | * log record sequence number. Note that this is generally not | |
1800 | * equal to the itx sequence number because not all transactions | |
1801 | * are synchronous, and sometimes spa_sync() gets there first. | |
1802 | */ | |
1ce23dca | 1803 | lrcb->lrc_seq = ++zilog->zl_lr_seq; |
1b7c1e5c | 1804 | lwb->lwb_nused += reclen + dnow; |
1ce23dca PS |
1805 | |
1806 | zil_lwb_add_txg(lwb, txg); | |
1807 | ||
428870ff | 1808 | ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); |
c99c9001 | 1809 | ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); |
34dc7c2f | 1810 | |
1b7c1e5c GDN |
1811 | dlen -= dnow; |
1812 | if (dlen > 0) { | |
1813 | zilog->zl_cur_used += reclen; | |
1814 | goto cont; | |
1815 | } | |
1816 | ||
34dc7c2f BB |
1817 | return (lwb); |
1818 | } | |
1819 | ||
1820 | itx_t * | |
58714c28 | 1821 | zil_itx_create(uint64_t txtype, size_t olrsize) |
34dc7c2f | 1822 | { |
58714c28 | 1823 | size_t itxsize, lrsize; |
34dc7c2f BB |
1824 | itx_t *itx; |
1825 | ||
58714c28 | 1826 | lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t); |
72841b9f | 1827 | itxsize = offsetof(itx_t, itx_lr) + lrsize; |
34dc7c2f | 1828 | |
72841b9f | 1829 | itx = zio_data_buf_alloc(itxsize); |
34dc7c2f BB |
1830 | itx->itx_lr.lrc_txtype = txtype; |
1831 | itx->itx_lr.lrc_reclen = lrsize; | |
34dc7c2f | 1832 | itx->itx_lr.lrc_seq = 0; /* defensive */ |
58714c28 | 1833 | bzero((char *)&itx->itx_lr + olrsize, lrsize - olrsize); |
572e2857 | 1834 | itx->itx_sync = B_TRUE; /* default is synchronous */ |
119a394a ED |
1835 | itx->itx_callback = NULL; |
1836 | itx->itx_callback_data = NULL; | |
72841b9f | 1837 | itx->itx_size = itxsize; |
34dc7c2f BB |
1838 | |
1839 | return (itx); | |
1840 | } | |
1841 | ||
428870ff BB |
1842 | void |
1843 | zil_itx_destroy(itx_t *itx) | |
1844 | { | |
1ce23dca PS |
1845 | IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); |
1846 | IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
1847 | ||
1848 | if (itx->itx_callback != NULL) | |
1849 | itx->itx_callback(itx->itx_callback_data); | |
1850 | ||
72841b9f | 1851 | zio_data_buf_free(itx, itx->itx_size); |
428870ff BB |
1852 | } |
1853 | ||
572e2857 BB |
1854 | /* |
1855 | * Free up the sync and async itxs. The itxs_t has already been detached | |
1856 | * so no locks are needed. | |
1857 | */ | |
1858 | static void | |
23c13c7e | 1859 | zil_itxg_clean(void *arg) |
34dc7c2f | 1860 | { |
572e2857 BB |
1861 | itx_t *itx; |
1862 | list_t *list; | |
1863 | avl_tree_t *t; | |
1864 | void *cookie; | |
23c13c7e | 1865 | itxs_t *itxs = arg; |
572e2857 BB |
1866 | itx_async_node_t *ian; |
1867 | ||
1868 | list = &itxs->i_sync_list; | |
1869 | while ((itx = list_head(list)) != NULL) { | |
1ce23dca PS |
1870 | /* |
1871 | * In the general case, commit itxs will not be found | |
1872 | * here, as they'll be committed to an lwb via | |
1873 | * zil_lwb_commit(), and free'd in that function. Having | |
1874 | * said that, it is still possible for commit itxs to be | |
1875 | * found here, due to the following race: | |
1876 | * | |
1877 | * - a thread calls zil_commit() which assigns the | |
1878 | * commit itx to a per-txg i_sync_list | |
1879 | * - zil_itxg_clean() is called (e.g. via spa_sync()) | |
1880 | * while the waiter is still on the i_sync_list | |
1881 | * | |
1882 | * There's nothing to prevent syncing the txg while the | |
1883 | * waiter is on the i_sync_list. This normally doesn't | |
1884 | * happen because spa_sync() is slower than zil_commit(), | |
1885 | * but if zil_commit() calls txg_wait_synced() (e.g. | |
1886 | * because zil_create() or zil_commit_writer_stall() is | |
1887 | * called) we will hit this case. | |
1888 | */ | |
1889 | if (itx->itx_lr.lrc_txtype == TX_COMMIT) | |
1890 | zil_commit_waiter_skip(itx->itx_private); | |
1891 | ||
572e2857 | 1892 | list_remove(list, itx); |
19ea3d25 | 1893 | zil_itx_destroy(itx); |
572e2857 | 1894 | } |
34dc7c2f | 1895 | |
572e2857 BB |
1896 | cookie = NULL; |
1897 | t = &itxs->i_async_tree; | |
1898 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
1899 | list = &ian->ia_list; | |
1900 | while ((itx = list_head(list)) != NULL) { | |
1901 | list_remove(list, itx); | |
1ce23dca PS |
1902 | /* commit itxs should never be on the async lists. */ |
1903 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 1904 | zil_itx_destroy(itx); |
572e2857 BB |
1905 | } |
1906 | list_destroy(list); | |
1907 | kmem_free(ian, sizeof (itx_async_node_t)); | |
1908 | } | |
1909 | avl_destroy(t); | |
34dc7c2f | 1910 | |
572e2857 BB |
1911 | kmem_free(itxs, sizeof (itxs_t)); |
1912 | } | |
34dc7c2f | 1913 | |
572e2857 BB |
1914 | static int |
1915 | zil_aitx_compare(const void *x1, const void *x2) | |
1916 | { | |
1917 | const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; | |
1918 | const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; | |
1919 | ||
ca577779 | 1920 | return (TREE_CMP(o1, o2)); |
34dc7c2f BB |
1921 | } |
1922 | ||
1923 | /* | |
572e2857 | 1924 | * Remove all async itx with the given oid. |
34dc7c2f | 1925 | */ |
8e556c5e | 1926 | void |
572e2857 | 1927 | zil_remove_async(zilog_t *zilog, uint64_t oid) |
34dc7c2f | 1928 | { |
572e2857 BB |
1929 | uint64_t otxg, txg; |
1930 | itx_async_node_t *ian; | |
1931 | avl_tree_t *t; | |
1932 | avl_index_t where; | |
34dc7c2f BB |
1933 | list_t clean_list; |
1934 | itx_t *itx; | |
1935 | ||
572e2857 | 1936 | ASSERT(oid != 0); |
34dc7c2f BB |
1937 | list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); |
1938 | ||
572e2857 BB |
1939 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
1940 | otxg = ZILTEST_TXG; | |
1941 | else | |
1942 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
34dc7c2f | 1943 | |
572e2857 BB |
1944 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
1945 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
1946 | ||
1947 | mutex_enter(&itxg->itxg_lock); | |
1948 | if (itxg->itxg_txg != txg) { | |
1949 | mutex_exit(&itxg->itxg_lock); | |
1950 | continue; | |
1951 | } | |
34dc7c2f | 1952 | |
572e2857 BB |
1953 | /* |
1954 | * Locate the object node and append its list. | |
1955 | */ | |
1956 | t = &itxg->itxg_itxs->i_async_tree; | |
1957 | ian = avl_find(t, &oid, &where); | |
1958 | if (ian != NULL) | |
1959 | list_move_tail(&clean_list, &ian->ia_list); | |
1960 | mutex_exit(&itxg->itxg_lock); | |
1961 | } | |
34dc7c2f BB |
1962 | while ((itx = list_head(&clean_list)) != NULL) { |
1963 | list_remove(&clean_list, itx); | |
1ce23dca PS |
1964 | /* commit itxs should never be on the async lists. */ |
1965 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 1966 | zil_itx_destroy(itx); |
34dc7c2f BB |
1967 | } |
1968 | list_destroy(&clean_list); | |
1969 | } | |
1970 | ||
572e2857 BB |
1971 | void |
1972 | zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) | |
1973 | { | |
1974 | uint64_t txg; | |
1975 | itxg_t *itxg; | |
1976 | itxs_t *itxs, *clean = NULL; | |
1977 | ||
572e2857 BB |
1978 | /* |
1979 | * Ensure the data of a renamed file is committed before the rename. | |
1980 | */ | |
1981 | if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) | |
1982 | zil_async_to_sync(zilog, itx->itx_oid); | |
1983 | ||
29809a6c | 1984 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) |
572e2857 BB |
1985 | txg = ZILTEST_TXG; |
1986 | else | |
1987 | txg = dmu_tx_get_txg(tx); | |
1988 | ||
1989 | itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
1990 | mutex_enter(&itxg->itxg_lock); | |
1991 | itxs = itxg->itxg_itxs; | |
1992 | if (itxg->itxg_txg != txg) { | |
1993 | if (itxs != NULL) { | |
1994 | /* | |
1995 | * The zil_clean callback hasn't got around to cleaning | |
1996 | * this itxg. Save the itxs for release below. | |
1997 | * This should be rare. | |
1998 | */ | |
55922e73 | 1999 | zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " |
8e739b2c | 2000 | "txg %llu", (u_longlong_t)itxg->itxg_txg); |
572e2857 BB |
2001 | clean = itxg->itxg_itxs; |
2002 | } | |
572e2857 | 2003 | itxg->itxg_txg = txg; |
d1d7e268 | 2004 | itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), |
79c76d5b | 2005 | KM_SLEEP); |
572e2857 BB |
2006 | |
2007 | list_create(&itxs->i_sync_list, sizeof (itx_t), | |
2008 | offsetof(itx_t, itx_node)); | |
2009 | avl_create(&itxs->i_async_tree, zil_aitx_compare, | |
2010 | sizeof (itx_async_node_t), | |
2011 | offsetof(itx_async_node_t, ia_node)); | |
2012 | } | |
2013 | if (itx->itx_sync) { | |
2014 | list_insert_tail(&itxs->i_sync_list, itx); | |
572e2857 BB |
2015 | } else { |
2016 | avl_tree_t *t = &itxs->i_async_tree; | |
50c957f7 NB |
2017 | uint64_t foid = |
2018 | LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); | |
572e2857 BB |
2019 | itx_async_node_t *ian; |
2020 | avl_index_t where; | |
2021 | ||
2022 | ian = avl_find(t, &foid, &where); | |
2023 | if (ian == NULL) { | |
d1d7e268 | 2024 | ian = kmem_alloc(sizeof (itx_async_node_t), |
79c76d5b | 2025 | KM_SLEEP); |
572e2857 BB |
2026 | list_create(&ian->ia_list, sizeof (itx_t), |
2027 | offsetof(itx_t, itx_node)); | |
2028 | ian->ia_foid = foid; | |
2029 | avl_insert(t, ian, where); | |
2030 | } | |
2031 | list_insert_tail(&ian->ia_list, itx); | |
2032 | } | |
2033 | ||
2034 | itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); | |
1ce23dca PS |
2035 | |
2036 | /* | |
2037 | * We don't want to dirty the ZIL using ZILTEST_TXG, because | |
2038 | * zil_clean() will never be called using ZILTEST_TXG. Thus, we | |
2039 | * need to be careful to always dirty the ZIL using the "real" | |
2040 | * TXG (not itxg_txg) even when the SPA is frozen. | |
2041 | */ | |
2042 | zilog_dirty(zilog, dmu_tx_get_txg(tx)); | |
572e2857 BB |
2043 | mutex_exit(&itxg->itxg_lock); |
2044 | ||
2045 | /* Release the old itxs now we've dropped the lock */ | |
2046 | if (clean != NULL) | |
2047 | zil_itxg_clean(clean); | |
2048 | } | |
2049 | ||
34dc7c2f BB |
2050 | /* |
2051 | * If there are any in-memory intent log transactions which have now been | |
29809a6c | 2052 | * synced then start up a taskq to free them. We should only do this after we |
e1cfd73f | 2053 | * have written out the uberblocks (i.e. txg has been committed) so that |
29809a6c MA |
2054 | * don't inadvertently clean out in-memory log records that would be required |
2055 | * by zil_commit(). | |
34dc7c2f BB |
2056 | */ |
2057 | void | |
572e2857 | 2058 | zil_clean(zilog_t *zilog, uint64_t synced_txg) |
34dc7c2f | 2059 | { |
572e2857 BB |
2060 | itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; |
2061 | itxs_t *clean_me; | |
34dc7c2f | 2062 | |
1ce23dca PS |
2063 | ASSERT3U(synced_txg, <, ZILTEST_TXG); |
2064 | ||
572e2857 BB |
2065 | mutex_enter(&itxg->itxg_lock); |
2066 | if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { | |
2067 | mutex_exit(&itxg->itxg_lock); | |
2068 | return; | |
2069 | } | |
2070 | ASSERT3U(itxg->itxg_txg, <=, synced_txg); | |
a032ac4b | 2071 | ASSERT3U(itxg->itxg_txg, !=, 0); |
572e2857 BB |
2072 | clean_me = itxg->itxg_itxs; |
2073 | itxg->itxg_itxs = NULL; | |
2074 | itxg->itxg_txg = 0; | |
2075 | mutex_exit(&itxg->itxg_lock); | |
2076 | /* | |
2077 | * Preferably start a task queue to free up the old itxs but | |
2078 | * if taskq_dispatch can't allocate resources to do that then | |
2079 | * free it in-line. This should be rare. Note, using TQ_SLEEP | |
2080 | * created a bad performance problem. | |
2081 | */ | |
a032ac4b BB |
2082 | ASSERT3P(zilog->zl_dmu_pool, !=, NULL); |
2083 | ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); | |
2084 | taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, | |
23c13c7e | 2085 | zil_itxg_clean, clean_me, TQ_NOSLEEP); |
a032ac4b | 2086 | if (id == TASKQID_INVALID) |
572e2857 BB |
2087 | zil_itxg_clean(clean_me); |
2088 | } | |
2089 | ||
2090 | /* | |
1ce23dca PS |
2091 | * This function will traverse the queue of itxs that need to be |
2092 | * committed, and move them onto the ZIL's zl_itx_commit_list. | |
572e2857 BB |
2093 | */ |
2094 | static void | |
2095 | zil_get_commit_list(zilog_t *zilog) | |
2096 | { | |
2097 | uint64_t otxg, txg; | |
2098 | list_t *commit_list = &zilog->zl_itx_commit_list; | |
572e2857 | 2099 | |
1b2b0aca | 2100 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 2101 | |
572e2857 BB |
2102 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
2103 | otxg = ZILTEST_TXG; | |
2104 | else | |
2105 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
2106 | ||
55922e73 GW |
2107 | /* |
2108 | * This is inherently racy, since there is nothing to prevent | |
2109 | * the last synced txg from changing. That's okay since we'll | |
2110 | * only commit things in the future. | |
2111 | */ | |
572e2857 BB |
2112 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2113 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2114 | ||
2115 | mutex_enter(&itxg->itxg_lock); | |
2116 | if (itxg->itxg_txg != txg) { | |
2117 | mutex_exit(&itxg->itxg_lock); | |
2118 | continue; | |
2119 | } | |
2120 | ||
55922e73 GW |
2121 | /* |
2122 | * If we're adding itx records to the zl_itx_commit_list, | |
2123 | * then the zil better be dirty in this "txg". We can assert | |
2124 | * that here since we're holding the itxg_lock which will | |
2125 | * prevent spa_sync from cleaning it. Once we add the itxs | |
2126 | * to the zl_itx_commit_list we must commit it to disk even | |
2127 | * if it's unnecessary (i.e. the txg was synced). | |
2128 | */ | |
2129 | ASSERT(zilog_is_dirty_in_txg(zilog, txg) || | |
2130 | spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); | |
572e2857 | 2131 | list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list); |
572e2857 BB |
2132 | |
2133 | mutex_exit(&itxg->itxg_lock); | |
2134 | } | |
572e2857 BB |
2135 | } |
2136 | ||
2137 | /* | |
2138 | * Move the async itxs for a specified object to commit into sync lists. | |
2139 | */ | |
eedb3a62 | 2140 | void |
572e2857 BB |
2141 | zil_async_to_sync(zilog_t *zilog, uint64_t foid) |
2142 | { | |
2143 | uint64_t otxg, txg; | |
2144 | itx_async_node_t *ian; | |
2145 | avl_tree_t *t; | |
2146 | avl_index_t where; | |
2147 | ||
2148 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ | |
2149 | otxg = ZILTEST_TXG; | |
2150 | else | |
2151 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
2152 | ||
55922e73 GW |
2153 | /* |
2154 | * This is inherently racy, since there is nothing to prevent | |
2155 | * the last synced txg from changing. | |
2156 | */ | |
572e2857 BB |
2157 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2158 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2159 | ||
2160 | mutex_enter(&itxg->itxg_lock); | |
2161 | if (itxg->itxg_txg != txg) { | |
2162 | mutex_exit(&itxg->itxg_lock); | |
2163 | continue; | |
2164 | } | |
2165 | ||
2166 | /* | |
2167 | * If a foid is specified then find that node and append its | |
2168 | * list. Otherwise walk the tree appending all the lists | |
2169 | * to the sync list. We add to the end rather than the | |
2170 | * beginning to ensure the create has happened. | |
2171 | */ | |
2172 | t = &itxg->itxg_itxs->i_async_tree; | |
2173 | if (foid != 0) { | |
2174 | ian = avl_find(t, &foid, &where); | |
2175 | if (ian != NULL) { | |
2176 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
2177 | &ian->ia_list); | |
2178 | } | |
2179 | } else { | |
2180 | void *cookie = NULL; | |
2181 | ||
2182 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
2183 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
2184 | &ian->ia_list); | |
2185 | list_destroy(&ian->ia_list); | |
2186 | kmem_free(ian, sizeof (itx_async_node_t)); | |
2187 | } | |
2188 | } | |
2189 | mutex_exit(&itxg->itxg_lock); | |
34dc7c2f | 2190 | } |
34dc7c2f BB |
2191 | } |
2192 | ||
1ce23dca PS |
2193 | /* |
2194 | * This function will prune commit itxs that are at the head of the | |
2195 | * commit list (it won't prune past the first non-commit itx), and | |
2196 | * either: a) attach them to the last lwb that's still pending | |
2197 | * completion, or b) skip them altogether. | |
2198 | * | |
2199 | * This is used as a performance optimization to prevent commit itxs | |
2200 | * from generating new lwbs when it's unnecessary to do so. | |
2201 | */ | |
b128c09f | 2202 | static void |
1ce23dca | 2203 | zil_prune_commit_list(zilog_t *zilog) |
34dc7c2f | 2204 | { |
572e2857 | 2205 | itx_t *itx; |
34dc7c2f | 2206 | |
1b2b0aca | 2207 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 | 2208 | |
1ce23dca PS |
2209 | while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { |
2210 | lr_t *lrc = &itx->itx_lr; | |
2211 | if (lrc->lrc_txtype != TX_COMMIT) | |
2212 | break; | |
572e2857 | 2213 | |
1ce23dca PS |
2214 | mutex_enter(&zilog->zl_lock); |
2215 | ||
2216 | lwb_t *last_lwb = zilog->zl_last_lwb_opened; | |
900d09b2 PS |
2217 | if (last_lwb == NULL || |
2218 | last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) { | |
1ce23dca PS |
2219 | /* |
2220 | * All of the itxs this waiter was waiting on | |
2221 | * must have already completed (or there were | |
2222 | * never any itx's for it to wait on), so it's | |
2223 | * safe to skip this waiter and mark it done. | |
2224 | */ | |
2225 | zil_commit_waiter_skip(itx->itx_private); | |
2226 | } else { | |
2227 | zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); | |
2228 | itx->itx_private = NULL; | |
2229 | } | |
2230 | ||
2231 | mutex_exit(&zilog->zl_lock); | |
2232 | ||
2233 | list_remove(&zilog->zl_itx_commit_list, itx); | |
2234 | zil_itx_destroy(itx); | |
2235 | } | |
2236 | ||
2237 | IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
2238 | } | |
2239 | ||
2240 | static void | |
2241 | zil_commit_writer_stall(zilog_t *zilog) | |
2242 | { | |
2243 | /* | |
2244 | * When zio_alloc_zil() fails to allocate the next lwb block on | |
2245 | * disk, we must call txg_wait_synced() to ensure all of the | |
2246 | * lwbs in the zilog's zl_lwb_list are synced and then freed (in | |
2247 | * zil_sync()), such that any subsequent ZIL writer (i.e. a call | |
2248 | * to zil_process_commit_list()) will have to call zil_create(), | |
2249 | * and start a new ZIL chain. | |
2250 | * | |
2251 | * Since zil_alloc_zil() failed, the lwb that was previously | |
2252 | * issued does not have a pointer to the "next" lwb on disk. | |
2253 | * Thus, if another ZIL writer thread was to allocate the "next" | |
2254 | * on-disk lwb, that block could be leaked in the event of a | |
2255 | * crash (because the previous lwb on-disk would not point to | |
2256 | * it). | |
2257 | * | |
1b2b0aca | 2258 | * We must hold the zilog's zl_issuer_lock while we do this, to |
1ce23dca PS |
2259 | * ensure no new threads enter zil_process_commit_list() until |
2260 | * all lwb's in the zl_lwb_list have been synced and freed | |
2261 | * (which is achieved via the txg_wait_synced() call). | |
2262 | */ | |
1b2b0aca | 2263 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
2264 | txg_wait_synced(zilog->zl_dmu_pool, 0); |
2265 | ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); | |
2266 | } | |
2267 | ||
2268 | /* | |
2269 | * This function will traverse the commit list, creating new lwbs as | |
2270 | * needed, and committing the itxs from the commit list to these newly | |
2271 | * created lwbs. Additionally, as a new lwb is created, the previous | |
2272 | * lwb will be issued to the zio layer to be written to disk. | |
2273 | */ | |
2274 | static void | |
2275 | zil_process_commit_list(zilog_t *zilog) | |
2276 | { | |
2277 | spa_t *spa = zilog->zl_spa; | |
2278 | list_t nolwb_itxs; | |
2279 | list_t nolwb_waiters; | |
2280 | lwb_t *lwb; | |
2281 | itx_t *itx; | |
2282 | ||
1b2b0aca | 2283 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 BB |
2284 | |
2285 | /* | |
2286 | * Return if there's nothing to commit before we dirty the fs by | |
2287 | * calling zil_create(). | |
2288 | */ | |
1ce23dca | 2289 | if (list_head(&zilog->zl_itx_commit_list) == NULL) |
572e2857 | 2290 | return; |
34dc7c2f | 2291 | |
1ce23dca PS |
2292 | list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); |
2293 | list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), | |
2294 | offsetof(zil_commit_waiter_t, zcw_node)); | |
2295 | ||
2296 | lwb = list_tail(&zilog->zl_lwb_list); | |
2297 | if (lwb == NULL) { | |
2298 | lwb = zil_create(zilog); | |
34dc7c2f | 2299 | } else { |
1ce23dca | 2300 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); |
900d09b2 PS |
2301 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); |
2302 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
34dc7c2f BB |
2303 | } |
2304 | ||
1ce23dca PS |
2305 | while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { |
2306 | lr_t *lrc = &itx->itx_lr; | |
2307 | uint64_t txg = lrc->lrc_txg; | |
2308 | ||
55922e73 | 2309 | ASSERT3U(txg, !=, 0); |
34dc7c2f | 2310 | |
1ce23dca PS |
2311 | if (lrc->lrc_txtype == TX_COMMIT) { |
2312 | DTRACE_PROBE2(zil__process__commit__itx, | |
2313 | zilog_t *, zilog, itx_t *, itx); | |
2314 | } else { | |
2315 | DTRACE_PROBE2(zil__process__normal__itx, | |
2316 | zilog_t *, zilog, itx_t *, itx); | |
2317 | } | |
2318 | ||
2319 | list_remove(&zilog->zl_itx_commit_list, itx); | |
2320 | ||
1ce23dca PS |
2321 | boolean_t synced = txg <= spa_last_synced_txg(spa); |
2322 | boolean_t frozen = txg > spa_freeze_txg(spa); | |
2323 | ||
2fe61a7e PS |
2324 | /* |
2325 | * If the txg of this itx has already been synced out, then | |
2326 | * we don't need to commit this itx to an lwb. This is | |
2327 | * because the data of this itx will have already been | |
2328 | * written to the main pool. This is inherently racy, and | |
2329 | * it's still ok to commit an itx whose txg has already | |
2330 | * been synced; this will result in a write that's | |
2331 | * unnecessary, but will do no harm. | |
2332 | * | |
2333 | * With that said, we always want to commit TX_COMMIT itxs | |
2334 | * to an lwb, regardless of whether or not that itx's txg | |
2335 | * has been synced out. We do this to ensure any OPENED lwb | |
2336 | * will always have at least one zil_commit_waiter_t linked | |
2337 | * to the lwb. | |
2338 | * | |
2339 | * As a counter-example, if we skipped TX_COMMIT itx's | |
2340 | * whose txg had already been synced, the following | |
2341 | * situation could occur if we happened to be racing with | |
2342 | * spa_sync: | |
2343 | * | |
2344 | * 1. We commit a non-TX_COMMIT itx to an lwb, where the | |
2345 | * itx's txg is 10 and the last synced txg is 9. | |
2346 | * 2. spa_sync finishes syncing out txg 10. | |
2347 | * 3. We move to the next itx in the list, it's a TX_COMMIT | |
2348 | * whose txg is 10, so we skip it rather than committing | |
2349 | * it to the lwb used in (1). | |
2350 | * | |
2351 | * If the itx that is skipped in (3) is the last TX_COMMIT | |
2352 | * itx in the commit list, than it's possible for the lwb | |
2353 | * used in (1) to remain in the OPENED state indefinitely. | |
2354 | * | |
2355 | * To prevent the above scenario from occurring, ensuring | |
2356 | * that once an lwb is OPENED it will transition to ISSUED | |
2357 | * and eventually DONE, we always commit TX_COMMIT itx's to | |
2358 | * an lwb here, even if that itx's txg has already been | |
2359 | * synced. | |
2360 | * | |
2361 | * Finally, if the pool is frozen, we _always_ commit the | |
2362 | * itx. The point of freezing the pool is to prevent data | |
2363 | * from being written to the main pool via spa_sync, and | |
2364 | * instead rely solely on the ZIL to persistently store the | |
2365 | * data; i.e. when the pool is frozen, the last synced txg | |
2366 | * value can't be trusted. | |
2367 | */ | |
2368 | if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { | |
1ce23dca PS |
2369 | if (lwb != NULL) { |
2370 | lwb = zil_lwb_commit(zilog, itx, lwb); | |
2371 | ||
2372 | if (lwb == NULL) | |
2373 | list_insert_tail(&nolwb_itxs, itx); | |
2374 | else | |
2375 | list_insert_tail(&lwb->lwb_itxs, itx); | |
2376 | } else { | |
2377 | if (lrc->lrc_txtype == TX_COMMIT) { | |
2378 | zil_commit_waiter_link_nolwb( | |
2379 | itx->itx_private, &nolwb_waiters); | |
2380 | } | |
2381 | ||
2382 | list_insert_tail(&nolwb_itxs, itx); | |
2383 | } | |
2384 | } else { | |
2fe61a7e | 2385 | ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); |
1ce23dca PS |
2386 | zil_itx_destroy(itx); |
2387 | } | |
34dc7c2f | 2388 | } |
34dc7c2f | 2389 | |
1ce23dca PS |
2390 | if (lwb == NULL) { |
2391 | /* | |
2392 | * This indicates zio_alloc_zil() failed to allocate the | |
2393 | * "next" lwb on-disk. When this happens, we must stall | |
2394 | * the ZIL write pipeline; see the comment within | |
2395 | * zil_commit_writer_stall() for more details. | |
2396 | */ | |
2397 | zil_commit_writer_stall(zilog); | |
34dc7c2f | 2398 | |
1ce23dca PS |
2399 | /* |
2400 | * Additionally, we have to signal and mark the "nolwb" | |
2401 | * waiters as "done" here, since without an lwb, we | |
2402 | * can't do this via zil_lwb_flush_vdevs_done() like | |
2403 | * normal. | |
2404 | */ | |
2405 | zil_commit_waiter_t *zcw; | |
2406 | while ((zcw = list_head(&nolwb_waiters)) != NULL) { | |
2407 | zil_commit_waiter_skip(zcw); | |
2408 | list_remove(&nolwb_waiters, zcw); | |
2409 | } | |
2410 | ||
2411 | /* | |
2412 | * And finally, we have to destroy the itx's that | |
2413 | * couldn't be committed to an lwb; this will also call | |
2414 | * the itx's callback if one exists for the itx. | |
2415 | */ | |
2416 | while ((itx = list_head(&nolwb_itxs)) != NULL) { | |
2417 | list_remove(&nolwb_itxs, itx); | |
2418 | zil_itx_destroy(itx); | |
2419 | } | |
2420 | } else { | |
2421 | ASSERT(list_is_empty(&nolwb_waiters)); | |
2422 | ASSERT3P(lwb, !=, NULL); | |
2423 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); | |
900d09b2 PS |
2424 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); |
2425 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
1ce23dca PS |
2426 | |
2427 | /* | |
2428 | * At this point, the ZIL block pointed at by the "lwb" | |
2429 | * variable is in one of the following states: "closed" | |
2430 | * or "open". | |
2431 | * | |
2fe61a7e PS |
2432 | * If it's "closed", then no itxs have been committed to |
2433 | * it, so there's no point in issuing its zio (i.e. it's | |
2434 | * "empty"). | |
1ce23dca | 2435 | * |
2fe61a7e PS |
2436 | * If it's "open", then it contains one or more itxs that |
2437 | * eventually need to be committed to stable storage. In | |
2438 | * this case we intentionally do not issue the lwb's zio | |
2439 | * to disk yet, and instead rely on one of the following | |
2440 | * two mechanisms for issuing the zio: | |
1ce23dca | 2441 | * |
2fe61a7e | 2442 | * 1. Ideally, there will be more ZIL activity occurring |
1ce23dca | 2443 | * on the system, such that this function will be |
2fe61a7e | 2444 | * immediately called again (not necessarily by the same |
1ce23dca PS |
2445 | * thread) and this lwb's zio will be issued via |
2446 | * zil_lwb_commit(). This way, the lwb is guaranteed to | |
2447 | * be "full" when it is issued to disk, and we'll make | |
2448 | * use of the lwb's size the best we can. | |
2449 | * | |
2fe61a7e | 2450 | * 2. If there isn't sufficient ZIL activity occurring on |
1ce23dca PS |
2451 | * the system, such that this lwb's zio isn't issued via |
2452 | * zil_lwb_commit(), zil_commit_waiter() will issue the | |
2453 | * lwb's zio. If this occurs, the lwb is not guaranteed | |
2454 | * to be "full" by the time its zio is issued, and means | |
2455 | * the size of the lwb was "too large" given the amount | |
2fe61a7e | 2456 | * of ZIL activity occurring on the system at that time. |
1ce23dca PS |
2457 | * |
2458 | * We do this for a couple of reasons: | |
2459 | * | |
2460 | * 1. To try and reduce the number of IOPs needed to | |
2461 | * write the same number of itxs. If an lwb has space | |
2fe61a7e | 2462 | * available in its buffer for more itxs, and more itxs |
1ce23dca PS |
2463 | * will be committed relatively soon (relative to the |
2464 | * latency of performing a write), then it's beneficial | |
2465 | * to wait for these "next" itxs. This way, more itxs | |
2466 | * can be committed to stable storage with fewer writes. | |
2467 | * | |
2468 | * 2. To try and use the largest lwb block size that the | |
2469 | * incoming rate of itxs can support. Again, this is to | |
2470 | * try and pack as many itxs into as few lwbs as | |
2471 | * possible, without significantly impacting the latency | |
2472 | * of each individual itx. | |
2473 | */ | |
2474 | } | |
2475 | } | |
2476 | ||
2477 | /* | |
2478 | * This function is responsible for ensuring the passed in commit waiter | |
2479 | * (and associated commit itx) is committed to an lwb. If the waiter is | |
2480 | * not already committed to an lwb, all itxs in the zilog's queue of | |
2481 | * itxs will be processed. The assumption is the passed in waiter's | |
2482 | * commit itx will found in the queue just like the other non-commit | |
2483 | * itxs, such that when the entire queue is processed, the waiter will | |
2fe61a7e | 2484 | * have been committed to an lwb. |
1ce23dca PS |
2485 | * |
2486 | * The lwb associated with the passed in waiter is not guaranteed to | |
2487 | * have been issued by the time this function completes. If the lwb is | |
2488 | * not issued, we rely on future calls to zil_commit_writer() to issue | |
2489 | * the lwb, or the timeout mechanism found in zil_commit_waiter(). | |
2490 | */ | |
2491 | static void | |
2492 | zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2493 | { | |
2494 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); | |
2495 | ASSERT(spa_writeable(zilog->zl_spa)); | |
1ce23dca | 2496 | |
1b2b0aca | 2497 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca PS |
2498 | |
2499 | if (zcw->zcw_lwb != NULL || zcw->zcw_done) { | |
2500 | /* | |
2501 | * It's possible that, while we were waiting to acquire | |
1b2b0aca | 2502 | * the "zl_issuer_lock", another thread committed this |
1ce23dca PS |
2503 | * waiter to an lwb. If that occurs, we bail out early, |
2504 | * without processing any of the zilog's queue of itxs. | |
2505 | * | |
2506 | * On certain workloads and system configurations, the | |
1b2b0aca | 2507 | * "zl_issuer_lock" can become highly contended. In an |
1ce23dca PS |
2508 | * attempt to reduce this contention, we immediately drop |
2509 | * the lock if the waiter has already been processed. | |
2510 | * | |
2511 | * We've measured this optimization to reduce CPU spent | |
2512 | * contending on this lock by up to 5%, using a system | |
2513 | * with 32 CPUs, low latency storage (~50 usec writes), | |
2514 | * and 1024 threads performing sync writes. | |
2515 | */ | |
2516 | goto out; | |
2517 | } | |
2518 | ||
2519 | ZIL_STAT_BUMP(zil_commit_writer_count); | |
2520 | ||
2521 | zil_get_commit_list(zilog); | |
2522 | zil_prune_commit_list(zilog); | |
2523 | zil_process_commit_list(zilog); | |
2524 | ||
2525 | out: | |
1b2b0aca | 2526 | mutex_exit(&zilog->zl_issuer_lock); |
1ce23dca PS |
2527 | } |
2528 | ||
2529 | static void | |
2530 | zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2531 | { | |
1b2b0aca | 2532 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
2533 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); |
2534 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
2535 | ||
2536 | lwb_t *lwb = zcw->zcw_lwb; | |
2537 | ASSERT3P(lwb, !=, NULL); | |
2538 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); | |
34dc7c2f BB |
2539 | |
2540 | /* | |
1ce23dca PS |
2541 | * If the lwb has already been issued by another thread, we can |
2542 | * immediately return since there's no work to be done (the | |
2543 | * point of this function is to issue the lwb). Additionally, we | |
1b2b0aca | 2544 | * do this prior to acquiring the zl_issuer_lock, to avoid |
1ce23dca | 2545 | * acquiring it when it's not necessary to do so. |
34dc7c2f | 2546 | */ |
1ce23dca | 2547 | if (lwb->lwb_state == LWB_STATE_ISSUED || |
900d09b2 PS |
2548 | lwb->lwb_state == LWB_STATE_WRITE_DONE || |
2549 | lwb->lwb_state == LWB_STATE_FLUSH_DONE) | |
1ce23dca | 2550 | return; |
34dc7c2f | 2551 | |
1ce23dca PS |
2552 | /* |
2553 | * In order to call zil_lwb_write_issue() we must hold the | |
1b2b0aca | 2554 | * zilog's "zl_issuer_lock". We can't simply acquire that lock, |
1ce23dca | 2555 | * since we're already holding the commit waiter's "zcw_lock", |
2fe61a7e | 2556 | * and those two locks are acquired in the opposite order |
1ce23dca PS |
2557 | * elsewhere. |
2558 | */ | |
2559 | mutex_exit(&zcw->zcw_lock); | |
1b2b0aca | 2560 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca | 2561 | mutex_enter(&zcw->zcw_lock); |
34dc7c2f | 2562 | |
1ce23dca PS |
2563 | /* |
2564 | * Since we just dropped and re-acquired the commit waiter's | |
2565 | * lock, we have to re-check to see if the waiter was marked | |
2566 | * "done" during that process. If the waiter was marked "done", | |
2567 | * the "lwb" pointer is no longer valid (it can be free'd after | |
2568 | * the waiter is marked "done"), so without this check we could | |
2569 | * wind up with a use-after-free error below. | |
2570 | */ | |
2571 | if (zcw->zcw_done) | |
2572 | goto out; | |
119a394a | 2573 | |
1ce23dca PS |
2574 | ASSERT3P(lwb, ==, zcw->zcw_lwb); |
2575 | ||
2576 | /* | |
2fe61a7e PS |
2577 | * We've already checked this above, but since we hadn't acquired |
2578 | * the zilog's zl_issuer_lock, we have to perform this check a | |
2579 | * second time while holding the lock. | |
2580 | * | |
2581 | * We don't need to hold the zl_lock since the lwb cannot transition | |
2582 | * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb | |
2583 | * _can_ transition from ISSUED to DONE, but it's OK to race with | |
2584 | * that transition since we treat the lwb the same, whether it's in | |
2585 | * the ISSUED or DONE states. | |
2586 | * | |
2587 | * The important thing, is we treat the lwb differently depending on | |
2588 | * if it's ISSUED or OPENED, and block any other threads that might | |
2589 | * attempt to issue this lwb. For that reason we hold the | |
2590 | * zl_issuer_lock when checking the lwb_state; we must not call | |
1ce23dca | 2591 | * zil_lwb_write_issue() if the lwb had already been issued. |
2fe61a7e PS |
2592 | * |
2593 | * See the comment above the lwb_state_t structure definition for | |
2594 | * more details on the lwb states, and locking requirements. | |
1ce23dca PS |
2595 | */ |
2596 | if (lwb->lwb_state == LWB_STATE_ISSUED || | |
900d09b2 PS |
2597 | lwb->lwb_state == LWB_STATE_WRITE_DONE || |
2598 | lwb->lwb_state == LWB_STATE_FLUSH_DONE) | |
1ce23dca PS |
2599 | goto out; |
2600 | ||
2601 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
2602 | ||
2603 | /* | |
2604 | * As described in the comments above zil_commit_waiter() and | |
2605 | * zil_process_commit_list(), we need to issue this lwb's zio | |
2606 | * since we've reached the commit waiter's timeout and it still | |
2607 | * hasn't been issued. | |
2608 | */ | |
2609 | lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb); | |
2610 | ||
ab119165 | 2611 | IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED); |
1ce23dca PS |
2612 | |
2613 | /* | |
2614 | * Since the lwb's zio hadn't been issued by the time this thread | |
2615 | * reached its timeout, we reset the zilog's "zl_cur_used" field | |
2616 | * to influence the zil block size selection algorithm. | |
2617 | * | |
2618 | * By having to issue the lwb's zio here, it means the size of the | |
2619 | * lwb was too large, given the incoming throughput of itxs. By | |
2620 | * setting "zl_cur_used" to zero, we communicate this fact to the | |
2fe61a7e | 2621 | * block size selection algorithm, so it can take this information |
1ce23dca PS |
2622 | * into account, and potentially select a smaller size for the |
2623 | * next lwb block that is allocated. | |
2624 | */ | |
2625 | zilog->zl_cur_used = 0; | |
2626 | ||
2627 | if (nlwb == NULL) { | |
2628 | /* | |
2629 | * When zil_lwb_write_issue() returns NULL, this | |
2630 | * indicates zio_alloc_zil() failed to allocate the | |
2631 | * "next" lwb on-disk. When this occurs, the ZIL write | |
2632 | * pipeline must be stalled; see the comment within the | |
2633 | * zil_commit_writer_stall() function for more details. | |
2634 | * | |
2635 | * We must drop the commit waiter's lock prior to | |
2636 | * calling zil_commit_writer_stall() or else we can wind | |
2637 | * up with the following deadlock: | |
2638 | * | |
2639 | * - This thread is waiting for the txg to sync while | |
2640 | * holding the waiter's lock; txg_wait_synced() is | |
2641 | * used within txg_commit_writer_stall(). | |
2642 | * | |
2643 | * - The txg can't sync because it is waiting for this | |
2644 | * lwb's zio callback to call dmu_tx_commit(). | |
2645 | * | |
2646 | * - The lwb's zio callback can't call dmu_tx_commit() | |
2647 | * because it's blocked trying to acquire the waiter's | |
2648 | * lock, which occurs prior to calling dmu_tx_commit() | |
2649 | */ | |
2650 | mutex_exit(&zcw->zcw_lock); | |
2651 | zil_commit_writer_stall(zilog); | |
2652 | mutex_enter(&zcw->zcw_lock); | |
119a394a ED |
2653 | } |
2654 | ||
1ce23dca | 2655 | out: |
1b2b0aca | 2656 | mutex_exit(&zilog->zl_issuer_lock); |
1ce23dca PS |
2657 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); |
2658 | } | |
2659 | ||
2660 | /* | |
2661 | * This function is responsible for performing the following two tasks: | |
2662 | * | |
2663 | * 1. its primary responsibility is to block until the given "commit | |
2664 | * waiter" is considered "done". | |
2665 | * | |
2666 | * 2. its secondary responsibility is to issue the zio for the lwb that | |
2667 | * the given "commit waiter" is waiting on, if this function has | |
2668 | * waited "long enough" and the lwb is still in the "open" state. | |
2669 | * | |
2670 | * Given a sufficient amount of itxs being generated and written using | |
2671 | * the ZIL, the lwb's zio will be issued via the zil_lwb_commit() | |
2672 | * function. If this does not occur, this secondary responsibility will | |
2673 | * ensure the lwb is issued even if there is not other synchronous | |
2674 | * activity on the system. | |
2675 | * | |
2676 | * For more details, see zil_process_commit_list(); more specifically, | |
2677 | * the comment at the bottom of that function. | |
2678 | */ | |
2679 | static void | |
2680 | zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2681 | { | |
2682 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); | |
1b2b0aca | 2683 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 2684 | ASSERT(spa_writeable(zilog->zl_spa)); |
1ce23dca PS |
2685 | |
2686 | mutex_enter(&zcw->zcw_lock); | |
428870ff BB |
2687 | |
2688 | /* | |
1ce23dca PS |
2689 | * The timeout is scaled based on the lwb latency to avoid |
2690 | * significantly impacting the latency of each individual itx. | |
2691 | * For more details, see the comment at the bottom of the | |
2692 | * zil_process_commit_list() function. | |
428870ff | 2693 | */ |
1ce23dca PS |
2694 | int pct = MAX(zfs_commit_timeout_pct, 1); |
2695 | hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; | |
2696 | hrtime_t wakeup = gethrtime() + sleep; | |
2697 | boolean_t timedout = B_FALSE; | |
2698 | ||
2699 | while (!zcw->zcw_done) { | |
2700 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); | |
2701 | ||
2702 | lwb_t *lwb = zcw->zcw_lwb; | |
2703 | ||
2704 | /* | |
2705 | * Usually, the waiter will have a non-NULL lwb field here, | |
2706 | * but it's possible for it to be NULL as a result of | |
2707 | * zil_commit() racing with spa_sync(). | |
2708 | * | |
2709 | * When zil_clean() is called, it's possible for the itxg | |
2710 | * list (which may be cleaned via a taskq) to contain | |
2711 | * commit itxs. When this occurs, the commit waiters linked | |
2712 | * off of these commit itxs will not be committed to an | |
2713 | * lwb. Additionally, these commit waiters will not be | |
2714 | * marked done until zil_commit_waiter_skip() is called via | |
2715 | * zil_itxg_clean(). | |
2716 | * | |
2717 | * Thus, it's possible for this commit waiter (i.e. the | |
2718 | * "zcw" variable) to be found in this "in between" state; | |
2719 | * where it's "zcw_lwb" field is NULL, and it hasn't yet | |
2720 | * been skipped, so it's "zcw_done" field is still B_FALSE. | |
2721 | */ | |
2722 | IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); | |
2723 | ||
2724 | if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { | |
2725 | ASSERT3B(timedout, ==, B_FALSE); | |
2726 | ||
2727 | /* | |
2728 | * If the lwb hasn't been issued yet, then we | |
2729 | * need to wait with a timeout, in case this | |
2730 | * function needs to issue the lwb after the | |
2731 | * timeout is reached; responsibility (2) from | |
2732 | * the comment above this function. | |
2733 | */ | |
8056a756 | 2734 | int rc = cv_timedwait_hires(&zcw->zcw_cv, |
1ce23dca PS |
2735 | &zcw->zcw_lock, wakeup, USEC2NSEC(1), |
2736 | CALLOUT_FLAG_ABSOLUTE); | |
2737 | ||
8056a756 | 2738 | if (rc != -1 || zcw->zcw_done) |
1ce23dca PS |
2739 | continue; |
2740 | ||
2741 | timedout = B_TRUE; | |
2742 | zil_commit_waiter_timeout(zilog, zcw); | |
2743 | ||
2744 | if (!zcw->zcw_done) { | |
2745 | /* | |
2746 | * If the commit waiter has already been | |
2747 | * marked "done", it's possible for the | |
2748 | * waiter's lwb structure to have already | |
2749 | * been freed. Thus, we can only reliably | |
2750 | * make these assertions if the waiter | |
2751 | * isn't done. | |
2752 | */ | |
2753 | ASSERT3P(lwb, ==, zcw->zcw_lwb); | |
2754 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); | |
2755 | } | |
2756 | } else { | |
2757 | /* | |
2758 | * If the lwb isn't open, then it must have already | |
2759 | * been issued. In that case, there's no need to | |
2760 | * use a timeout when waiting for the lwb to | |
2761 | * complete. | |
2762 | * | |
2763 | * Additionally, if the lwb is NULL, the waiter | |
2fe61a7e | 2764 | * will soon be signaled and marked done via |
1ce23dca PS |
2765 | * zil_clean() and zil_itxg_clean(), so no timeout |
2766 | * is required. | |
2767 | */ | |
2768 | ||
2769 | IMPLY(lwb != NULL, | |
2770 | lwb->lwb_state == LWB_STATE_ISSUED || | |
900d09b2 PS |
2771 | lwb->lwb_state == LWB_STATE_WRITE_DONE || |
2772 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); | |
1ce23dca PS |
2773 | cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); |
2774 | } | |
2775 | } | |
2776 | ||
2777 | mutex_exit(&zcw->zcw_lock); | |
2778 | } | |
2779 | ||
2780 | static zil_commit_waiter_t * | |
2781 | zil_alloc_commit_waiter(void) | |
2782 | { | |
2783 | zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); | |
2784 | ||
2785 | cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); | |
2786 | mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); | |
2787 | list_link_init(&zcw->zcw_node); | |
2788 | zcw->zcw_lwb = NULL; | |
2789 | zcw->zcw_done = B_FALSE; | |
2790 | zcw->zcw_zio_error = 0; | |
2791 | ||
2792 | return (zcw); | |
2793 | } | |
2794 | ||
2795 | static void | |
2796 | zil_free_commit_waiter(zil_commit_waiter_t *zcw) | |
2797 | { | |
2798 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
2799 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
2800 | ASSERT3B(zcw->zcw_done, ==, B_TRUE); | |
2801 | mutex_destroy(&zcw->zcw_lock); | |
2802 | cv_destroy(&zcw->zcw_cv); | |
2803 | kmem_cache_free(zil_zcw_cache, zcw); | |
34dc7c2f BB |
2804 | } |
2805 | ||
2806 | /* | |
1ce23dca PS |
2807 | * This function is used to create a TX_COMMIT itx and assign it. This |
2808 | * way, it will be linked into the ZIL's list of synchronous itxs, and | |
2809 | * then later committed to an lwb (or skipped) when | |
2810 | * zil_process_commit_list() is called. | |
2811 | */ | |
2812 | static void | |
2813 | zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
2814 | { | |
2815 | dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); | |
2816 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); | |
2817 | ||
2818 | itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); | |
2819 | itx->itx_sync = B_TRUE; | |
2820 | itx->itx_private = zcw; | |
2821 | ||
2822 | zil_itx_assign(zilog, itx, tx); | |
2823 | ||
2824 | dmu_tx_commit(tx); | |
2825 | } | |
2826 | ||
2827 | /* | |
2828 | * Commit ZFS Intent Log transactions (itxs) to stable storage. | |
2829 | * | |
2830 | * When writing ZIL transactions to the on-disk representation of the | |
2831 | * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple | |
2832 | * itxs can be committed to a single lwb. Once a lwb is written and | |
2833 | * committed to stable storage (i.e. the lwb is written, and vdevs have | |
2834 | * been flushed), each itx that was committed to that lwb is also | |
2835 | * considered to be committed to stable storage. | |
2836 | * | |
2837 | * When an itx is committed to an lwb, the log record (lr_t) contained | |
2838 | * by the itx is copied into the lwb's zio buffer, and once this buffer | |
2839 | * is written to disk, it becomes an on-disk ZIL block. | |
2840 | * | |
2841 | * As itxs are generated, they're inserted into the ZIL's queue of | |
2842 | * uncommitted itxs. The semantics of zil_commit() are such that it will | |
2843 | * block until all itxs that were in the queue when it was called, are | |
2844 | * committed to stable storage. | |
2845 | * | |
2846 | * If "foid" is zero, this means all "synchronous" and "asynchronous" | |
2847 | * itxs, for all objects in the dataset, will be committed to stable | |
2848 | * storage prior to zil_commit() returning. If "foid" is non-zero, all | |
2849 | * "synchronous" itxs for all objects, but only "asynchronous" itxs | |
2850 | * that correspond to the foid passed in, will be committed to stable | |
2851 | * storage prior to zil_commit() returning. | |
2852 | * | |
2853 | * Generally speaking, when zil_commit() is called, the consumer doesn't | |
2854 | * actually care about _all_ of the uncommitted itxs. Instead, they're | |
2855 | * simply trying to waiting for a specific itx to be committed to disk, | |
2856 | * but the interface(s) for interacting with the ZIL don't allow such | |
2857 | * fine-grained communication. A better interface would allow a consumer | |
2858 | * to create and assign an itx, and then pass a reference to this itx to | |
2859 | * zil_commit(); such that zil_commit() would return as soon as that | |
2860 | * specific itx was committed to disk (instead of waiting for _all_ | |
2861 | * itxs to be committed). | |
2862 | * | |
2863 | * When a thread calls zil_commit() a special "commit itx" will be | |
2864 | * generated, along with a corresponding "waiter" for this commit itx. | |
2865 | * zil_commit() will wait on this waiter's CV, such that when the waiter | |
2fe61a7e | 2866 | * is marked done, and signaled, zil_commit() will return. |
1ce23dca PS |
2867 | * |
2868 | * This commit itx is inserted into the queue of uncommitted itxs. This | |
2869 | * provides an easy mechanism for determining which itxs were in the | |
2870 | * queue prior to zil_commit() having been called, and which itxs were | |
2871 | * added after zil_commit() was called. | |
2872 | * | |
2873 | * The commit it is special; it doesn't have any on-disk representation. | |
2874 | * When a commit itx is "committed" to an lwb, the waiter associated | |
2875 | * with it is linked onto the lwb's list of waiters. Then, when that lwb | |
2fe61a7e | 2876 | * completes, each waiter on the lwb's list is marked done and signaled |
1ce23dca PS |
2877 | * -- allowing the thread waiting on the waiter to return from zil_commit(). |
2878 | * | |
2879 | * It's important to point out a few critical factors that allow us | |
2880 | * to make use of the commit itxs, commit waiters, per-lwb lists of | |
2881 | * commit waiters, and zio completion callbacks like we're doing: | |
572e2857 | 2882 | * |
1ce23dca | 2883 | * 1. The list of waiters for each lwb is traversed, and each commit |
2fe61a7e | 2884 | * waiter is marked "done" and signaled, in the zio completion |
1ce23dca | 2885 | * callback of the lwb's zio[*]. |
572e2857 | 2886 | * |
2fe61a7e | 2887 | * * Actually, the waiters are signaled in the zio completion |
1ce23dca PS |
2888 | * callback of the root zio for the DKIOCFLUSHWRITECACHE commands |
2889 | * that are sent to the vdevs upon completion of the lwb zio. | |
572e2857 | 2890 | * |
1ce23dca PS |
2891 | * 2. When the itxs are inserted into the ZIL's queue of uncommitted |
2892 | * itxs, the order in which they are inserted is preserved[*]; as | |
2893 | * itxs are added to the queue, they are added to the tail of | |
2894 | * in-memory linked lists. | |
572e2857 | 2895 | * |
1ce23dca PS |
2896 | * When committing the itxs to lwbs (to be written to disk), they |
2897 | * are committed in the same order in which the itxs were added to | |
2898 | * the uncommitted queue's linked list(s); i.e. the linked list of | |
2899 | * itxs to commit is traversed from head to tail, and each itx is | |
2900 | * committed to an lwb in that order. | |
2901 | * | |
2902 | * * To clarify: | |
2903 | * | |
2904 | * - the order of "sync" itxs is preserved w.r.t. other | |
2905 | * "sync" itxs, regardless of the corresponding objects. | |
2906 | * - the order of "async" itxs is preserved w.r.t. other | |
2907 | * "async" itxs corresponding to the same object. | |
2908 | * - the order of "async" itxs is *not* preserved w.r.t. other | |
2909 | * "async" itxs corresponding to different objects. | |
2910 | * - the order of "sync" itxs w.r.t. "async" itxs (or vice | |
2911 | * versa) is *not* preserved, even for itxs that correspond | |
2912 | * to the same object. | |
2913 | * | |
2914 | * For more details, see: zil_itx_assign(), zil_async_to_sync(), | |
2915 | * zil_get_commit_list(), and zil_process_commit_list(). | |
2916 | * | |
2917 | * 3. The lwbs represent a linked list of blocks on disk. Thus, any | |
2918 | * lwb cannot be considered committed to stable storage, until its | |
2919 | * "previous" lwb is also committed to stable storage. This fact, | |
2920 | * coupled with the fact described above, means that itxs are | |
2921 | * committed in (roughly) the order in which they were generated. | |
2922 | * This is essential because itxs are dependent on prior itxs. | |
2923 | * Thus, we *must not* deem an itx as being committed to stable | |
2924 | * storage, until *all* prior itxs have also been committed to | |
2925 | * stable storage. | |
2926 | * | |
2927 | * To enforce this ordering of lwb zio's, while still leveraging as | |
2928 | * much of the underlying storage performance as possible, we rely | |
2929 | * on two fundamental concepts: | |
2930 | * | |
2931 | * 1. The creation and issuance of lwb zio's is protected by | |
1b2b0aca | 2932 | * the zilog's "zl_issuer_lock", which ensures only a single |
1ce23dca PS |
2933 | * thread is creating and/or issuing lwb's at a time |
2934 | * 2. The "previous" lwb is a child of the "current" lwb | |
2fe61a7e | 2935 | * (leveraging the zio parent-child dependency graph) |
1ce23dca PS |
2936 | * |
2937 | * By relying on this parent-child zio relationship, we can have | |
2938 | * many lwb zio's concurrently issued to the underlying storage, | |
2939 | * but the order in which they complete will be the same order in | |
2940 | * which they were created. | |
34dc7c2f BB |
2941 | */ |
2942 | void | |
572e2857 | 2943 | zil_commit(zilog_t *zilog, uint64_t foid) |
34dc7c2f | 2944 | { |
1ce23dca PS |
2945 | /* |
2946 | * We should never attempt to call zil_commit on a snapshot for | |
2947 | * a couple of reasons: | |
2948 | * | |
2949 | * 1. A snapshot may never be modified, thus it cannot have any | |
2950 | * in-flight itxs that would have modified the dataset. | |
2951 | * | |
2952 | * 2. By design, when zil_commit() is called, a commit itx will | |
2953 | * be assigned to this zilog; as a result, the zilog will be | |
2954 | * dirtied. We must not dirty the zilog of a snapshot; there's | |
2955 | * checks in the code that enforce this invariant, and will | |
2956 | * cause a panic if it's not upheld. | |
2957 | */ | |
2958 | ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); | |
34dc7c2f | 2959 | |
572e2857 BB |
2960 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
2961 | return; | |
34dc7c2f | 2962 | |
1ce23dca PS |
2963 | if (!spa_writeable(zilog->zl_spa)) { |
2964 | /* | |
2965 | * If the SPA is not writable, there should never be any | |
2966 | * pending itxs waiting to be committed to disk. If that | |
2967 | * weren't true, we'd skip writing those itxs out, and | |
2fe61a7e | 2968 | * would break the semantics of zil_commit(); thus, we're |
1ce23dca PS |
2969 | * verifying that truth before we return to the caller. |
2970 | */ | |
2971 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
2972 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
2973 | for (int i = 0; i < TXG_SIZE; i++) | |
2974 | ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); | |
2975 | return; | |
2976 | } | |
2977 | ||
2978 | /* | |
2979 | * If the ZIL is suspended, we don't want to dirty it by calling | |
2980 | * zil_commit_itx_assign() below, nor can we write out | |
2981 | * lwbs like would be done in zil_commit_write(). Thus, we | |
2982 | * simply rely on txg_wait_synced() to maintain the necessary | |
2983 | * semantics, and avoid calling those functions altogether. | |
2984 | */ | |
2985 | if (zilog->zl_suspend > 0) { | |
2986 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
2987 | return; | |
2988 | } | |
2989 | ||
2fe61a7e PS |
2990 | zil_commit_impl(zilog, foid); |
2991 | } | |
2992 | ||
2993 | void | |
2994 | zil_commit_impl(zilog_t *zilog, uint64_t foid) | |
2995 | { | |
b6ad9671 ED |
2996 | ZIL_STAT_BUMP(zil_commit_count); |
2997 | ||
1ce23dca PS |
2998 | /* |
2999 | * Move the "async" itxs for the specified foid to the "sync" | |
3000 | * queues, such that they will be later committed (or skipped) | |
3001 | * to an lwb when zil_process_commit_list() is called. | |
3002 | * | |
3003 | * Since these "async" itxs must be committed prior to this | |
3004 | * call to zil_commit returning, we must perform this operation | |
3005 | * before we call zil_commit_itx_assign(). | |
3006 | */ | |
572e2857 | 3007 | zil_async_to_sync(zilog, foid); |
34dc7c2f | 3008 | |
1ce23dca PS |
3009 | /* |
3010 | * We allocate a new "waiter" structure which will initially be | |
3011 | * linked to the commit itx using the itx's "itx_private" field. | |
3012 | * Since the commit itx doesn't represent any on-disk state, | |
3013 | * when it's committed to an lwb, rather than copying the its | |
3014 | * lr_t into the lwb's buffer, the commit itx's "waiter" will be | |
3015 | * added to the lwb's list of waiters. Then, when the lwb is | |
3016 | * committed to stable storage, each waiter in the lwb's list of | |
3017 | * waiters will be marked "done", and signalled. | |
3018 | * | |
3019 | * We must create the waiter and assign the commit itx prior to | |
3020 | * calling zil_commit_writer(), or else our specific commit itx | |
3021 | * is not guaranteed to be committed to an lwb prior to calling | |
3022 | * zil_commit_waiter(). | |
3023 | */ | |
3024 | zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); | |
3025 | zil_commit_itx_assign(zilog, zcw); | |
428870ff | 3026 | |
1ce23dca PS |
3027 | zil_commit_writer(zilog, zcw); |
3028 | zil_commit_waiter(zilog, zcw); | |
428870ff | 3029 | |
1ce23dca PS |
3030 | if (zcw->zcw_zio_error != 0) { |
3031 | /* | |
3032 | * If there was an error writing out the ZIL blocks that | |
3033 | * this thread is waiting on, then we fallback to | |
3034 | * relying on spa_sync() to write out the data this | |
3035 | * thread is waiting on. Obviously this has performance | |
3036 | * implications, but the expectation is for this to be | |
3037 | * an exceptional case, and shouldn't occur often. | |
3038 | */ | |
3039 | DTRACE_PROBE2(zil__commit__io__error, | |
3040 | zilog_t *, zilog, zil_commit_waiter_t *, zcw); | |
3041 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
3042 | } | |
8c0712fd | 3043 | |
1ce23dca | 3044 | zil_free_commit_waiter(zcw); |
428870ff BB |
3045 | } |
3046 | ||
34dc7c2f BB |
3047 | /* |
3048 | * Called in syncing context to free committed log blocks and update log header. | |
3049 | */ | |
3050 | void | |
3051 | zil_sync(zilog_t *zilog, dmu_tx_t *tx) | |
3052 | { | |
3053 | zil_header_t *zh = zil_header_in_syncing_context(zilog); | |
3054 | uint64_t txg = dmu_tx_get_txg(tx); | |
3055 | spa_t *spa = zilog->zl_spa; | |
428870ff | 3056 | uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; |
34dc7c2f BB |
3057 | lwb_t *lwb; |
3058 | ||
9babb374 BB |
3059 | /* |
3060 | * We don't zero out zl_destroy_txg, so make sure we don't try | |
3061 | * to destroy it twice. | |
3062 | */ | |
3063 | if (spa_sync_pass(spa) != 1) | |
3064 | return; | |
3065 | ||
34dc7c2f BB |
3066 | mutex_enter(&zilog->zl_lock); |
3067 | ||
3068 | ASSERT(zilog->zl_stop_sync == 0); | |
3069 | ||
428870ff BB |
3070 | if (*replayed_seq != 0) { |
3071 | ASSERT(zh->zh_replay_seq < *replayed_seq); | |
3072 | zh->zh_replay_seq = *replayed_seq; | |
3073 | *replayed_seq = 0; | |
3074 | } | |
34dc7c2f BB |
3075 | |
3076 | if (zilog->zl_destroy_txg == txg) { | |
3077 | blkptr_t blk = zh->zh_log; | |
3078 | ||
3079 | ASSERT(list_head(&zilog->zl_lwb_list) == NULL); | |
34dc7c2f BB |
3080 | |
3081 | bzero(zh, sizeof (zil_header_t)); | |
fb5f0bc8 | 3082 | bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); |
34dc7c2f BB |
3083 | |
3084 | if (zilog->zl_keep_first) { | |
3085 | /* | |
3086 | * If this block was part of log chain that couldn't | |
3087 | * be claimed because a device was missing during | |
3088 | * zil_claim(), but that device later returns, | |
3089 | * then this block could erroneously appear valid. | |
3090 | * To guard against this, assign a new GUID to the new | |
3091 | * log chain so it doesn't matter what blk points to. | |
3092 | */ | |
3093 | zil_init_log_chain(zilog, &blk); | |
3094 | zh->zh_log = blk; | |
3095 | } | |
3096 | } | |
3097 | ||
9babb374 | 3098 | while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { |
34dc7c2f BB |
3099 | zh->zh_log = lwb->lwb_blk; |
3100 | if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) | |
3101 | break; | |
3102 | list_remove(&zilog->zl_lwb_list, lwb); | |
1ce23dca PS |
3103 | zio_free(spa, txg, &lwb->lwb_blk); |
3104 | zil_free_lwb(zilog, lwb); | |
34dc7c2f BB |
3105 | |
3106 | /* | |
3107 | * If we don't have anything left in the lwb list then | |
3108 | * we've had an allocation failure and we need to zero | |
3109 | * out the zil_header blkptr so that we don't end | |
3110 | * up freeing the same block twice. | |
3111 | */ | |
3112 | if (list_head(&zilog->zl_lwb_list) == NULL) | |
3113 | BP_ZERO(&zh->zh_log); | |
3114 | } | |
920dd524 ED |
3115 | |
3116 | /* | |
3117 | * Remove fastwrite on any blocks that have been pre-allocated for | |
3118 | * the next commit. This prevents fastwrite counter pollution by | |
3119 | * unused, long-lived LWBs. | |
3120 | */ | |
3121 | for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) { | |
1ce23dca | 3122 | if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) { |
920dd524 ED |
3123 | metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); |
3124 | lwb->lwb_fastwrite = 0; | |
3125 | } | |
3126 | } | |
3127 | ||
34dc7c2f BB |
3128 | mutex_exit(&zilog->zl_lock); |
3129 | } | |
3130 | ||
1ce23dca PS |
3131 | static int |
3132 | zil_lwb_cons(void *vbuf, void *unused, int kmflag) | |
3133 | { | |
14e4e3cb | 3134 | (void) unused, (void) kmflag; |
1ce23dca PS |
3135 | lwb_t *lwb = vbuf; |
3136 | list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); | |
3137 | list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), | |
3138 | offsetof(zil_commit_waiter_t, zcw_node)); | |
3139 | avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, | |
3140 | sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); | |
3141 | mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); | |
3142 | return (0); | |
3143 | } | |
3144 | ||
1ce23dca PS |
3145 | static void |
3146 | zil_lwb_dest(void *vbuf, void *unused) | |
3147 | { | |
14e4e3cb | 3148 | (void) unused; |
1ce23dca PS |
3149 | lwb_t *lwb = vbuf; |
3150 | mutex_destroy(&lwb->lwb_vdev_lock); | |
3151 | avl_destroy(&lwb->lwb_vdev_tree); | |
3152 | list_destroy(&lwb->lwb_waiters); | |
3153 | list_destroy(&lwb->lwb_itxs); | |
3154 | } | |
3155 | ||
34dc7c2f BB |
3156 | void |
3157 | zil_init(void) | |
3158 | { | |
3159 | zil_lwb_cache = kmem_cache_create("zil_lwb_cache", | |
1ce23dca PS |
3160 | sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); |
3161 | ||
3162 | zil_zcw_cache = kmem_cache_create("zil_zcw_cache", | |
3163 | sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
b6ad9671 ED |
3164 | |
3165 | zil_ksp = kstat_create("zfs", 0, "zil", "misc", | |
d1d7e268 | 3166 | KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), |
b6ad9671 ED |
3167 | KSTAT_FLAG_VIRTUAL); |
3168 | ||
3169 | if (zil_ksp != NULL) { | |
3170 | zil_ksp->ks_data = &zil_stats; | |
3171 | kstat_install(zil_ksp); | |
3172 | } | |
34dc7c2f BB |
3173 | } |
3174 | ||
3175 | void | |
3176 | zil_fini(void) | |
3177 | { | |
1ce23dca | 3178 | kmem_cache_destroy(zil_zcw_cache); |
34dc7c2f | 3179 | kmem_cache_destroy(zil_lwb_cache); |
b6ad9671 ED |
3180 | |
3181 | if (zil_ksp != NULL) { | |
3182 | kstat_delete(zil_ksp); | |
3183 | zil_ksp = NULL; | |
3184 | } | |
34dc7c2f BB |
3185 | } |
3186 | ||
428870ff BB |
3187 | void |
3188 | zil_set_sync(zilog_t *zilog, uint64_t sync) | |
3189 | { | |
3190 | zilog->zl_sync = sync; | |
3191 | } | |
3192 | ||
3193 | void | |
3194 | zil_set_logbias(zilog_t *zilog, uint64_t logbias) | |
3195 | { | |
3196 | zilog->zl_logbias = logbias; | |
3197 | } | |
3198 | ||
34dc7c2f BB |
3199 | zilog_t * |
3200 | zil_alloc(objset_t *os, zil_header_t *zh_phys) | |
3201 | { | |
3202 | zilog_t *zilog; | |
3203 | ||
79c76d5b | 3204 | zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); |
34dc7c2f BB |
3205 | |
3206 | zilog->zl_header = zh_phys; | |
3207 | zilog->zl_os = os; | |
3208 | zilog->zl_spa = dmu_objset_spa(os); | |
3209 | zilog->zl_dmu_pool = dmu_objset_pool(os); | |
3210 | zilog->zl_destroy_txg = TXG_INITIAL - 1; | |
428870ff BB |
3211 | zilog->zl_logbias = dmu_objset_logbias(os); |
3212 | zilog->zl_sync = dmu_objset_syncprop(os); | |
1ce23dca PS |
3213 | zilog->zl_dirty_max_txg = 0; |
3214 | zilog->zl_last_lwb_opened = NULL; | |
3215 | zilog->zl_last_lwb_latency = 0; | |
b8738257 | 3216 | zilog->zl_max_block_size = zil_maxblocksize; |
34dc7c2f BB |
3217 | |
3218 | mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); | |
1b2b0aca | 3219 | mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 3220 | |
1c27024e | 3221 | for (int i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
3222 | mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, |
3223 | MUTEX_DEFAULT, NULL); | |
3224 | } | |
34dc7c2f BB |
3225 | |
3226 | list_create(&zilog->zl_lwb_list, sizeof (lwb_t), | |
3227 | offsetof(lwb_t, lwb_node)); | |
3228 | ||
572e2857 BB |
3229 | list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), |
3230 | offsetof(itx_t, itx_node)); | |
3231 | ||
34dc7c2f BB |
3232 | cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); |
3233 | ||
3234 | return (zilog); | |
3235 | } | |
3236 | ||
3237 | void | |
3238 | zil_free(zilog_t *zilog) | |
3239 | { | |
d6320ddb | 3240 | int i; |
34dc7c2f BB |
3241 | |
3242 | zilog->zl_stop_sync = 1; | |
3243 | ||
13fe0198 MA |
3244 | ASSERT0(zilog->zl_suspend); |
3245 | ASSERT0(zilog->zl_suspending); | |
3246 | ||
3e31d2b0 | 3247 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
34dc7c2f BB |
3248 | list_destroy(&zilog->zl_lwb_list); |
3249 | ||
572e2857 BB |
3250 | ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); |
3251 | list_destroy(&zilog->zl_itx_commit_list); | |
3252 | ||
d6320ddb | 3253 | for (i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
3254 | /* |
3255 | * It's possible for an itx to be generated that doesn't dirty | |
3256 | * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() | |
3257 | * callback to remove the entry. We remove those here. | |
3258 | * | |
3259 | * Also free up the ziltest itxs. | |
3260 | */ | |
3261 | if (zilog->zl_itxg[i].itxg_itxs) | |
3262 | zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); | |
3263 | mutex_destroy(&zilog->zl_itxg[i].itxg_lock); | |
3264 | } | |
3265 | ||
1b2b0aca | 3266 | mutex_destroy(&zilog->zl_issuer_lock); |
34dc7c2f BB |
3267 | mutex_destroy(&zilog->zl_lock); |
3268 | ||
34dc7c2f BB |
3269 | cv_destroy(&zilog->zl_cv_suspend); |
3270 | ||
3271 | kmem_free(zilog, sizeof (zilog_t)); | |
3272 | } | |
3273 | ||
34dc7c2f BB |
3274 | /* |
3275 | * Open an intent log. | |
3276 | */ | |
3277 | zilog_t * | |
3278 | zil_open(objset_t *os, zil_get_data_t *get_data) | |
3279 | { | |
3280 | zilog_t *zilog = dmu_objset_zil(os); | |
3281 | ||
1ce23dca PS |
3282 | ASSERT3P(zilog->zl_get_data, ==, NULL); |
3283 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
3e31d2b0 ES |
3284 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
3285 | ||
34dc7c2f | 3286 | zilog->zl_get_data = get_data; |
34dc7c2f BB |
3287 | |
3288 | return (zilog); | |
3289 | } | |
3290 | ||
3291 | /* | |
3292 | * Close an intent log. | |
3293 | */ | |
3294 | void | |
3295 | zil_close(zilog_t *zilog) | |
3296 | { | |
3e31d2b0 | 3297 | lwb_t *lwb; |
1ce23dca | 3298 | uint64_t txg; |
572e2857 | 3299 | |
1ce23dca PS |
3300 | if (!dmu_objset_is_snapshot(zilog->zl_os)) { |
3301 | zil_commit(zilog, 0); | |
3302 | } else { | |
3303 | ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL); | |
3304 | ASSERT0(zilog->zl_dirty_max_txg); | |
3305 | ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); | |
3306 | } | |
572e2857 | 3307 | |
572e2857 | 3308 | mutex_enter(&zilog->zl_lock); |
3e31d2b0 | 3309 | lwb = list_tail(&zilog->zl_lwb_list); |
1ce23dca PS |
3310 | if (lwb == NULL) |
3311 | txg = zilog->zl_dirty_max_txg; | |
3312 | else | |
3313 | txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); | |
572e2857 | 3314 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
3315 | |
3316 | /* | |
3317 | * We need to use txg_wait_synced() to wait long enough for the | |
3318 | * ZIL to be clean, and to wait for all pending lwbs to be | |
3319 | * written out. | |
3320 | */ | |
3321 | if (txg != 0) | |
34dc7c2f | 3322 | txg_wait_synced(zilog->zl_dmu_pool, txg); |
55922e73 GW |
3323 | |
3324 | if (zilog_is_dirty(zilog)) | |
8e739b2c RE |
3325 | zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, |
3326 | (u_longlong_t)txg); | |
50c957f7 | 3327 | if (txg < spa_freeze_txg(zilog->zl_spa)) |
55922e73 | 3328 | VERIFY(!zilog_is_dirty(zilog)); |
34dc7c2f | 3329 | |
34dc7c2f | 3330 | zilog->zl_get_data = NULL; |
3e31d2b0 ES |
3331 | |
3332 | /* | |
1ce23dca | 3333 | * We should have only one lwb left on the list; remove it now. |
3e31d2b0 ES |
3334 | */ |
3335 | mutex_enter(&zilog->zl_lock); | |
3336 | lwb = list_head(&zilog->zl_lwb_list); | |
3337 | if (lwb != NULL) { | |
1ce23dca PS |
3338 | ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list)); |
3339 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); | |
3340 | ||
920dd524 ED |
3341 | if (lwb->lwb_fastwrite) |
3342 | metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk); | |
1ce23dca | 3343 | |
3e31d2b0 ES |
3344 | list_remove(&zilog->zl_lwb_list, lwb); |
3345 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
1ce23dca | 3346 | zil_free_lwb(zilog, lwb); |
3e31d2b0 ES |
3347 | } |
3348 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
3349 | } |
3350 | ||
13fe0198 MA |
3351 | static char *suspend_tag = "zil suspending"; |
3352 | ||
34dc7c2f BB |
3353 | /* |
3354 | * Suspend an intent log. While in suspended mode, we still honor | |
3355 | * synchronous semantics, but we rely on txg_wait_synced() to do it. | |
13fe0198 MA |
3356 | * On old version pools, we suspend the log briefly when taking a |
3357 | * snapshot so that it will have an empty intent log. | |
3358 | * | |
3359 | * Long holds are not really intended to be used the way we do here -- | |
3360 | * held for such a short time. A concurrent caller of dsl_dataset_long_held() | |
3361 | * could fail. Therefore we take pains to only put a long hold if it is | |
3362 | * actually necessary. Fortunately, it will only be necessary if the | |
3363 | * objset is currently mounted (or the ZVOL equivalent). In that case it | |
3364 | * will already have a long hold, so we are not really making things any worse. | |
3365 | * | |
3366 | * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or | |
3367 | * zvol_state_t), and use their mechanism to prevent their hold from being | |
3368 | * dropped (e.g. VFS_HOLD()). However, that would be even more pain for | |
3369 | * very little gain. | |
3370 | * | |
3371 | * if cookiep == NULL, this does both the suspend & resume. | |
3372 | * Otherwise, it returns with the dataset "long held", and the cookie | |
3373 | * should be passed into zil_resume(). | |
34dc7c2f BB |
3374 | */ |
3375 | int | |
13fe0198 | 3376 | zil_suspend(const char *osname, void **cookiep) |
34dc7c2f | 3377 | { |
13fe0198 MA |
3378 | objset_t *os; |
3379 | zilog_t *zilog; | |
3380 | const zil_header_t *zh; | |
3381 | int error; | |
3382 | ||
3383 | error = dmu_objset_hold(osname, suspend_tag, &os); | |
3384 | if (error != 0) | |
3385 | return (error); | |
3386 | zilog = dmu_objset_zil(os); | |
34dc7c2f BB |
3387 | |
3388 | mutex_enter(&zilog->zl_lock); | |
13fe0198 MA |
3389 | zh = zilog->zl_header; |
3390 | ||
9babb374 | 3391 | if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ |
34dc7c2f | 3392 | mutex_exit(&zilog->zl_lock); |
13fe0198 | 3393 | dmu_objset_rele(os, suspend_tag); |
2e528b49 | 3394 | return (SET_ERROR(EBUSY)); |
34dc7c2f | 3395 | } |
13fe0198 MA |
3396 | |
3397 | /* | |
3398 | * Don't put a long hold in the cases where we can avoid it. This | |
3399 | * is when there is no cookie so we are doing a suspend & resume | |
3400 | * (i.e. called from zil_vdev_offline()), and there's nothing to do | |
3401 | * for the suspend because it's already suspended, or there's no ZIL. | |
3402 | */ | |
3403 | if (cookiep == NULL && !zilog->zl_suspending && | |
3404 | (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { | |
3405 | mutex_exit(&zilog->zl_lock); | |
3406 | dmu_objset_rele(os, suspend_tag); | |
3407 | return (0); | |
3408 | } | |
3409 | ||
3410 | dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); | |
3411 | dsl_pool_rele(dmu_objset_pool(os), suspend_tag); | |
3412 | ||
3413 | zilog->zl_suspend++; | |
3414 | ||
3415 | if (zilog->zl_suspend > 1) { | |
34dc7c2f | 3416 | /* |
13fe0198 | 3417 | * Someone else is already suspending it. |
34dc7c2f BB |
3418 | * Just wait for them to finish. |
3419 | */ | |
13fe0198 | 3420 | |
34dc7c2f BB |
3421 | while (zilog->zl_suspending) |
3422 | cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); | |
34dc7c2f | 3423 | mutex_exit(&zilog->zl_lock); |
13fe0198 MA |
3424 | |
3425 | if (cookiep == NULL) | |
3426 | zil_resume(os); | |
3427 | else | |
3428 | *cookiep = os; | |
3429 | return (0); | |
3430 | } | |
3431 | ||
3432 | /* | |
3433 | * If there is no pointer to an on-disk block, this ZIL must not | |
3434 | * be active (e.g. filesystem not mounted), so there's nothing | |
3435 | * to clean up. | |
3436 | */ | |
3437 | if (BP_IS_HOLE(&zh->zh_log)) { | |
3438 | ASSERT(cookiep != NULL); /* fast path already handled */ | |
3439 | ||
3440 | *cookiep = os; | |
3441 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
3442 | return (0); |
3443 | } | |
13fe0198 | 3444 | |
4807c0ba TC |
3445 | /* |
3446 | * The ZIL has work to do. Ensure that the associated encryption | |
3447 | * key will remain mapped while we are committing the log by | |
3448 | * grabbing a reference to it. If the key isn't loaded we have no | |
3449 | * choice but to return an error until the wrapping key is loaded. | |
3450 | */ | |
52ce99dd TC |
3451 | if (os->os_encrypted && |
3452 | dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { | |
4807c0ba TC |
3453 | zilog->zl_suspend--; |
3454 | mutex_exit(&zilog->zl_lock); | |
3455 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); | |
3456 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
2ffd89fc | 3457 | return (SET_ERROR(EACCES)); |
4807c0ba TC |
3458 | } |
3459 | ||
34dc7c2f BB |
3460 | zilog->zl_suspending = B_TRUE; |
3461 | mutex_exit(&zilog->zl_lock); | |
3462 | ||
2fe61a7e PS |
3463 | /* |
3464 | * We need to use zil_commit_impl to ensure we wait for all | |
3465 | * LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed | |
3466 | * to disk before proceeding. If we used zil_commit instead, it | |
3467 | * would just call txg_wait_synced(), because zl_suspend is set. | |
3468 | * txg_wait_synced() doesn't wait for these lwb's to be | |
900d09b2 | 3469 | * LWB_STATE_FLUSH_DONE before returning. |
2fe61a7e PS |
3470 | */ |
3471 | zil_commit_impl(zilog, 0); | |
3472 | ||
3473 | /* | |
900d09b2 PS |
3474 | * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we |
3475 | * use txg_wait_synced() to ensure the data from the zilog has | |
3476 | * migrated to the main pool before calling zil_destroy(). | |
2fe61a7e PS |
3477 | */ |
3478 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
34dc7c2f BB |
3479 | |
3480 | zil_destroy(zilog, B_FALSE); | |
3481 | ||
3482 | mutex_enter(&zilog->zl_lock); | |
3483 | zilog->zl_suspending = B_FALSE; | |
3484 | cv_broadcast(&zilog->zl_cv_suspend); | |
3485 | mutex_exit(&zilog->zl_lock); | |
3486 | ||
52ce99dd TC |
3487 | if (os->os_encrypted) |
3488 | dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); | |
4807c0ba | 3489 | |
13fe0198 MA |
3490 | if (cookiep == NULL) |
3491 | zil_resume(os); | |
3492 | else | |
3493 | *cookiep = os; | |
34dc7c2f BB |
3494 | return (0); |
3495 | } | |
3496 | ||
3497 | void | |
13fe0198 | 3498 | zil_resume(void *cookie) |
34dc7c2f | 3499 | { |
13fe0198 MA |
3500 | objset_t *os = cookie; |
3501 | zilog_t *zilog = dmu_objset_zil(os); | |
3502 | ||
34dc7c2f BB |
3503 | mutex_enter(&zilog->zl_lock); |
3504 | ASSERT(zilog->zl_suspend != 0); | |
3505 | zilog->zl_suspend--; | |
3506 | mutex_exit(&zilog->zl_lock); | |
13fe0198 MA |
3507 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); |
3508 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
34dc7c2f BB |
3509 | } |
3510 | ||
3511 | typedef struct zil_replay_arg { | |
18168da7 | 3512 | zil_replay_func_t *const *zr_replay; |
34dc7c2f | 3513 | void *zr_arg; |
34dc7c2f | 3514 | boolean_t zr_byteswap; |
428870ff | 3515 | char *zr_lr; |
34dc7c2f BB |
3516 | } zil_replay_arg_t; |
3517 | ||
428870ff | 3518 | static int |
61868bb1 | 3519 | zil_replay_error(zilog_t *zilog, const lr_t *lr, int error) |
428870ff | 3520 | { |
eca7b760 | 3521 | char name[ZFS_MAX_DATASET_NAME_LEN]; |
428870ff BB |
3522 | |
3523 | zilog->zl_replaying_seq--; /* didn't actually replay this one */ | |
3524 | ||
3525 | dmu_objset_name(zilog->zl_os, name); | |
3526 | ||
3527 | cmn_err(CE_WARN, "ZFS replay transaction error %d, " | |
3528 | "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, | |
3529 | (u_longlong_t)lr->lrc_seq, | |
3530 | (u_longlong_t)(lr->lrc_txtype & ~TX_CI), | |
3531 | (lr->lrc_txtype & TX_CI) ? "CI" : ""); | |
3532 | ||
3533 | return (error); | |
3534 | } | |
3535 | ||
3536 | static int | |
61868bb1 CS |
3537 | zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, |
3538 | uint64_t claim_txg) | |
34dc7c2f BB |
3539 | { |
3540 | zil_replay_arg_t *zr = zra; | |
3541 | const zil_header_t *zh = zilog->zl_header; | |
3542 | uint64_t reclen = lr->lrc_reclen; | |
3543 | uint64_t txtype = lr->lrc_txtype; | |
428870ff | 3544 | int error = 0; |
34dc7c2f | 3545 | |
428870ff | 3546 | zilog->zl_replaying_seq = lr->lrc_seq; |
34dc7c2f BB |
3547 | |
3548 | if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ | |
428870ff BB |
3549 | return (0); |
3550 | ||
3551 | if (lr->lrc_txg < claim_txg) /* already committed */ | |
3552 | return (0); | |
34dc7c2f BB |
3553 | |
3554 | /* Strip case-insensitive bit, still present in log record */ | |
3555 | txtype &= ~TX_CI; | |
3556 | ||
428870ff BB |
3557 | if (txtype == 0 || txtype >= TX_MAX_TYPE) |
3558 | return (zil_replay_error(zilog, lr, EINVAL)); | |
3559 | ||
3560 | /* | |
3561 | * If this record type can be logged out of order, the object | |
3562 | * (lr_foid) may no longer exist. That's legitimate, not an error. | |
3563 | */ | |
3564 | if (TX_OOO(txtype)) { | |
3565 | error = dmu_object_info(zilog->zl_os, | |
50c957f7 | 3566 | LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); |
428870ff BB |
3567 | if (error == ENOENT || error == EEXIST) |
3568 | return (0); | |
fb5f0bc8 BB |
3569 | } |
3570 | ||
34dc7c2f BB |
3571 | /* |
3572 | * Make a copy of the data so we can revise and extend it. | |
3573 | */ | |
428870ff BB |
3574 | bcopy(lr, zr->zr_lr, reclen); |
3575 | ||
3576 | /* | |
3577 | * If this is a TX_WRITE with a blkptr, suck in the data. | |
3578 | */ | |
3579 | if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { | |
3580 | error = zil_read_log_data(zilog, (lr_write_t *)lr, | |
3581 | zr->zr_lr + reclen); | |
13fe0198 | 3582 | if (error != 0) |
428870ff BB |
3583 | return (zil_replay_error(zilog, lr, error)); |
3584 | } | |
34dc7c2f BB |
3585 | |
3586 | /* | |
3587 | * The log block containing this lr may have been byteswapped | |
3588 | * so that we can easily examine common fields like lrc_txtype. | |
428870ff | 3589 | * However, the log is a mix of different record types, and only the |
34dc7c2f BB |
3590 | * replay vectors know how to byteswap their records. Therefore, if |
3591 | * the lr was byteswapped, undo it before invoking the replay vector. | |
3592 | */ | |
3593 | if (zr->zr_byteswap) | |
428870ff | 3594 | byteswap_uint64_array(zr->zr_lr, reclen); |
34dc7c2f BB |
3595 | |
3596 | /* | |
3597 | * We must now do two things atomically: replay this log record, | |
fb5f0bc8 BB |
3598 | * and update the log header sequence number to reflect the fact that |
3599 | * we did so. At the end of each replay function the sequence number | |
3600 | * is updated if we are in replay mode. | |
34dc7c2f | 3601 | */ |
428870ff | 3602 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); |
13fe0198 | 3603 | if (error != 0) { |
34dc7c2f BB |
3604 | /* |
3605 | * The DMU's dnode layer doesn't see removes until the txg | |
3606 | * commits, so a subsequent claim can spuriously fail with | |
fb5f0bc8 | 3607 | * EEXIST. So if we receive any error we try syncing out |
428870ff BB |
3608 | * any removes then retry the transaction. Note that we |
3609 | * specify B_FALSE for byteswap now, so we don't do it twice. | |
34dc7c2f | 3610 | */ |
428870ff BB |
3611 | txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); |
3612 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); | |
13fe0198 | 3613 | if (error != 0) |
428870ff | 3614 | return (zil_replay_error(zilog, lr, error)); |
34dc7c2f | 3615 | } |
428870ff | 3616 | return (0); |
34dc7c2f BB |
3617 | } |
3618 | ||
428870ff | 3619 | static int |
61868bb1 | 3620 | zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) |
34dc7c2f | 3621 | { |
14e4e3cb AZ |
3622 | (void) bp, (void) arg, (void) claim_txg; |
3623 | ||
34dc7c2f | 3624 | zilog->zl_replay_blks++; |
428870ff BB |
3625 | |
3626 | return (0); | |
34dc7c2f BB |
3627 | } |
3628 | ||
3629 | /* | |
3630 | * If this dataset has a non-empty intent log, replay it and destroy it. | |
3631 | */ | |
3632 | void | |
18168da7 AZ |
3633 | zil_replay(objset_t *os, void *arg, |
3634 | zil_replay_func_t *const replay_func[TX_MAX_TYPE]) | |
34dc7c2f BB |
3635 | { |
3636 | zilog_t *zilog = dmu_objset_zil(os); | |
3637 | const zil_header_t *zh = zilog->zl_header; | |
3638 | zil_replay_arg_t zr; | |
3639 | ||
9babb374 | 3640 | if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { |
34dc7c2f BB |
3641 | zil_destroy(zilog, B_TRUE); |
3642 | return; | |
3643 | } | |
3644 | ||
34dc7c2f BB |
3645 | zr.zr_replay = replay_func; |
3646 | zr.zr_arg = arg; | |
34dc7c2f | 3647 | zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); |
79c76d5b | 3648 | zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); |
34dc7c2f BB |
3649 | |
3650 | /* | |
3651 | * Wait for in-progress removes to sync before starting replay. | |
3652 | */ | |
3653 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
3654 | ||
fb5f0bc8 | 3655 | zilog->zl_replay = B_TRUE; |
428870ff | 3656 | zilog->zl_replay_time = ddi_get_lbolt(); |
34dc7c2f BB |
3657 | ASSERT(zilog->zl_replay_blks == 0); |
3658 | (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, | |
b5256303 | 3659 | zh->zh_claim_txg, B_TRUE); |
00b46022 | 3660 | vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); |
34dc7c2f BB |
3661 | |
3662 | zil_destroy(zilog, B_FALSE); | |
3663 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
fb5f0bc8 | 3664 | zilog->zl_replay = B_FALSE; |
34dc7c2f BB |
3665 | } |
3666 | ||
428870ff BB |
3667 | boolean_t |
3668 | zil_replaying(zilog_t *zilog, dmu_tx_t *tx) | |
34dc7c2f | 3669 | { |
428870ff BB |
3670 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
3671 | return (B_TRUE); | |
34dc7c2f | 3672 | |
428870ff BB |
3673 | if (zilog->zl_replay) { |
3674 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); | |
3675 | zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = | |
3676 | zilog->zl_replaying_seq; | |
3677 | return (B_TRUE); | |
34dc7c2f BB |
3678 | } |
3679 | ||
428870ff | 3680 | return (B_FALSE); |
34dc7c2f | 3681 | } |
9babb374 | 3682 | |
9babb374 | 3683 | int |
a1d477c2 | 3684 | zil_reset(const char *osname, void *arg) |
9babb374 | 3685 | { |
14e4e3cb | 3686 | (void) arg; |
9babb374 | 3687 | |
14e4e3cb | 3688 | int error = zil_suspend(osname, NULL); |
2ffd89fc PZ |
3689 | /* EACCES means crypto key not loaded */ |
3690 | if ((error == EACCES) || (error == EBUSY)) | |
3691 | return (SET_ERROR(error)); | |
13fe0198 | 3692 | if (error != 0) |
2e528b49 | 3693 | return (SET_ERROR(EEXIST)); |
13fe0198 | 3694 | return (0); |
9babb374 | 3695 | } |
c409e464 | 3696 | |
0f699108 AZ |
3697 | EXPORT_SYMBOL(zil_alloc); |
3698 | EXPORT_SYMBOL(zil_free); | |
3699 | EXPORT_SYMBOL(zil_open); | |
3700 | EXPORT_SYMBOL(zil_close); | |
3701 | EXPORT_SYMBOL(zil_replay); | |
3702 | EXPORT_SYMBOL(zil_replaying); | |
3703 | EXPORT_SYMBOL(zil_destroy); | |
3704 | EXPORT_SYMBOL(zil_destroy_sync); | |
3705 | EXPORT_SYMBOL(zil_itx_create); | |
3706 | EXPORT_SYMBOL(zil_itx_destroy); | |
3707 | EXPORT_SYMBOL(zil_itx_assign); | |
3708 | EXPORT_SYMBOL(zil_commit); | |
0f699108 AZ |
3709 | EXPORT_SYMBOL(zil_claim); |
3710 | EXPORT_SYMBOL(zil_check_log_chain); | |
3711 | EXPORT_SYMBOL(zil_sync); | |
3712 | EXPORT_SYMBOL(zil_clean); | |
3713 | EXPORT_SYMBOL(zil_suspend); | |
3714 | EXPORT_SYMBOL(zil_resume); | |
1ce23dca | 3715 | EXPORT_SYMBOL(zil_lwb_add_block); |
0f699108 AZ |
3716 | EXPORT_SYMBOL(zil_bp_tree_add); |
3717 | EXPORT_SYMBOL(zil_set_sync); | |
3718 | EXPORT_SYMBOL(zil_set_logbias); | |
3719 | ||
1b7c1e5c | 3720 | /* BEGIN CSTYLED */ |
03fdcb9a MM |
3721 | ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, INT, ZMOD_RW, |
3722 | "ZIL block open timeout percentage"); | |
2fe61a7e | 3723 | |
03fdcb9a MM |
3724 | ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, |
3725 | "Disable intent logging replay"); | |
c409e464 | 3726 | |
03fdcb9a MM |
3727 | ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, |
3728 | "Disable ZIL cache flushes"); | |
ee191e80 | 3729 | |
03fdcb9a MM |
3730 | ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW, |
3731 | "Limit in bytes slog sync writes per commit"); | |
b8738257 | 3732 | |
03fdcb9a MM |
3733 | ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, INT, ZMOD_RW, |
3734 | "Limit in bytes of ZIL log block size"); | |
1b7c1e5c | 3735 | /* END CSTYLED */ |