]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zil.c
Align thread priority with Linux defaults
[mirror_zfs.git] / module / zfs / zil.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
5dbd68a3 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
34dc7c2f
BB
24 */
25
428870ff
BB
26/* Portions Copyright 2010 Robert Milkowski */
27
34dc7c2f
BB
28#include <sys/zfs_context.h>
29#include <sys/spa.h>
30#include <sys/dmu.h>
31#include <sys/zap.h>
32#include <sys/arc.h>
33#include <sys/stat.h>
34#include <sys/resource.h>
35#include <sys/zil.h>
36#include <sys/zil_impl.h>
37#include <sys/dsl_dataset.h>
572e2857 38#include <sys/vdev_impl.h>
34dc7c2f 39#include <sys/dmu_tx.h>
428870ff 40#include <sys/dsl_pool.h>
920dd524 41#include <sys/metaslab.h>
49ee64e5 42#include <sys/trace_zil.h>
34dc7c2f
BB
43
44/*
45 * The zfs intent log (ZIL) saves transaction records of system calls
46 * that change the file system in memory with enough information
47 * to be able to replay them. These are stored in memory until
48 * either the DMU transaction group (txg) commits them to the stable pool
49 * and they can be discarded, or they are flushed to the stable log
50 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
51 * requirement. In the event of a panic or power fail then those log
52 * records (transactions) are replayed.
53 *
54 * There is one ZIL per file system. Its on-disk (pool) format consists
55 * of 3 parts:
56 *
57 * - ZIL header
58 * - ZIL blocks
59 * - ZIL records
60 *
61 * A log record holds a system call transaction. Log blocks can
62 * hold many log records and the blocks are chained together.
63 * Each ZIL block contains a block pointer (blkptr_t) to the next
64 * ZIL block in the chain. The ZIL header points to the first
65 * block in the chain. Note there is not a fixed place in the pool
66 * to hold blocks. They are dynamically allocated and freed as
67 * needed from the blocks available. Figure X shows the ZIL structure:
68 */
69
b6ad9671
ED
70/*
71 * See zil.h for more information about these fields.
72 */
73zil_stats_t zil_stats = {
d1d7e268
MK
74 { "zil_commit_count", KSTAT_DATA_UINT64 },
75 { "zil_commit_writer_count", KSTAT_DATA_UINT64 },
76 { "zil_itx_count", KSTAT_DATA_UINT64 },
77 { "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
78 { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
79 { "zil_itx_copied_count", KSTAT_DATA_UINT64 },
80 { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
81 { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
82 { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
83 { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
84 { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
85 { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
86 { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
b6ad9671
ED
87};
88
89static kstat_t *zil_ksp;
90
34dc7c2f 91/*
d3cc8b15 92 * Disable intent logging replay. This global ZIL switch affects all pools.
34dc7c2f 93 */
d3cc8b15 94int zil_replay_disable = 0;
34dc7c2f
BB
95
96/*
97 * Tunable parameter for debugging or performance analysis. Setting
98 * zfs_nocacheflush will cause corruption on power loss if a volatile
99 * out-of-order write cache is enabled.
100 */
c409e464 101int zfs_nocacheflush = 0;
34dc7c2f
BB
102
103static kmem_cache_t *zil_lwb_cache;
104
572e2857 105static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
428870ff
BB
106
107#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
108 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
109
110
572e2857
BB
111/*
112 * ziltest is by and large an ugly hack, but very useful in
113 * checking replay without tedious work.
114 * When running ziltest we want to keep all itx's and so maintain
115 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
116 * We subtract TXG_CONCURRENT_STATES to allow for common code.
117 */
118#define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
119
34dc7c2f 120static int
428870ff 121zil_bp_compare(const void *x1, const void *x2)
34dc7c2f 122{
428870ff
BB
123 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
124 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
34dc7c2f
BB
125
126 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
127 return (-1);
128 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
129 return (1);
130
131 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
132 return (-1);
133 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
134 return (1);
135
136 return (0);
137}
138
139static void
428870ff 140zil_bp_tree_init(zilog_t *zilog)
34dc7c2f 141{
428870ff
BB
142 avl_create(&zilog->zl_bp_tree, zil_bp_compare,
143 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
34dc7c2f
BB
144}
145
146static void
428870ff 147zil_bp_tree_fini(zilog_t *zilog)
34dc7c2f 148{
428870ff
BB
149 avl_tree_t *t = &zilog->zl_bp_tree;
150 zil_bp_node_t *zn;
34dc7c2f
BB
151 void *cookie = NULL;
152
153 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
428870ff 154 kmem_free(zn, sizeof (zil_bp_node_t));
34dc7c2f
BB
155
156 avl_destroy(t);
157}
158
428870ff
BB
159int
160zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
34dc7c2f 161{
428870ff 162 avl_tree_t *t = &zilog->zl_bp_tree;
9b67f605 163 const dva_t *dva;
428870ff 164 zil_bp_node_t *zn;
34dc7c2f
BB
165 avl_index_t where;
166
9b67f605
MA
167 if (BP_IS_EMBEDDED(bp))
168 return (0);
169
170 dva = BP_IDENTITY(bp);
171
34dc7c2f 172 if (avl_find(t, dva, &where) != NULL)
2e528b49 173 return (SET_ERROR(EEXIST));
34dc7c2f 174
79c76d5b 175 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
34dc7c2f
BB
176 zn->zn_dva = *dva;
177 avl_insert(t, zn, where);
178
179 return (0);
180}
181
182static zil_header_t *
183zil_header_in_syncing_context(zilog_t *zilog)
184{
185 return ((zil_header_t *)zilog->zl_header);
186}
187
188static void
189zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
190{
191 zio_cksum_t *zc = &bp->blk_cksum;
192
193 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
194 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
195 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
196 zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
197}
198
199/*
428870ff 200 * Read a log block and make sure it's valid.
34dc7c2f
BB
201 */
202static int
428870ff
BB
203zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
204 char **end)
34dc7c2f 205{
428870ff 206 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
2a432414 207 arc_flags_t aflags = ARC_FLAG_WAIT;
428870ff 208 arc_buf_t *abuf = NULL;
5dbd68a3 209 zbookmark_phys_t zb;
34dc7c2f
BB
210 int error;
211
428870ff
BB
212 if (zilog->zl_header->zh_claim_txg == 0)
213 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
34dc7c2f 214
428870ff
BB
215 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
216 zio_flags |= ZIO_FLAG_SPECULATIVE;
34dc7c2f 217
428870ff
BB
218 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
219 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
220
294f6806 221 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
428870ff 222 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
34dc7c2f
BB
223
224 if (error == 0) {
34dc7c2f
BB
225 zio_cksum_t cksum = bp->blk_cksum;
226
227 /*
b128c09f
BB
228 * Validate the checksummed log block.
229 *
34dc7c2f
BB
230 * Sequence numbers should be... sequential. The checksum
231 * verifier for the next block should be bp's checksum plus 1.
b128c09f
BB
232 *
233 * Also check the log chain linkage and size used.
34dc7c2f
BB
234 */
235 cksum.zc_word[ZIL_ZC_SEQ]++;
236
428870ff
BB
237 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
238 zil_chain_t *zilc = abuf->b_data;
239 char *lr = (char *)(zilc + 1);
240 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
34dc7c2f 241
428870ff
BB
242 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
243 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
2e528b49 244 error = SET_ERROR(ECKSUM);
428870ff 245 } else {
f1512ee6 246 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
428870ff
BB
247 bcopy(lr, dst, len);
248 *end = (char *)dst + len;
249 *nbp = zilc->zc_next_blk;
250 }
251 } else {
252 char *lr = abuf->b_data;
253 uint64_t size = BP_GET_LSIZE(bp);
254 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
255
256 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
257 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
258 (zilc->zc_nused > (size - sizeof (*zilc)))) {
2e528b49 259 error = SET_ERROR(ECKSUM);
428870ff 260 } else {
f1512ee6
MA
261 ASSERT3U(zilc->zc_nused, <=,
262 SPA_OLD_MAXBLOCKSIZE);
428870ff
BB
263 bcopy(lr, dst, zilc->zc_nused);
264 *end = (char *)dst + zilc->zc_nused;
265 *nbp = zilc->zc_next_blk;
266 }
34dc7c2f 267 }
428870ff 268
13fe0198 269 VERIFY(arc_buf_remove_ref(abuf, &abuf));
428870ff
BB
270 }
271
272 return (error);
273}
274
275/*
276 * Read a TX_WRITE log data block.
277 */
278static int
279zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
280{
281 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
282 const blkptr_t *bp = &lr->lr_blkptr;
2a432414 283 arc_flags_t aflags = ARC_FLAG_WAIT;
428870ff 284 arc_buf_t *abuf = NULL;
5dbd68a3 285 zbookmark_phys_t zb;
428870ff
BB
286 int error;
287
288 if (BP_IS_HOLE(bp)) {
289 if (wbuf != NULL)
290 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
291 return (0);
34dc7c2f
BB
292 }
293
428870ff
BB
294 if (zilog->zl_header->zh_claim_txg == 0)
295 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
296
297 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
298 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
299
294f6806 300 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
428870ff
BB
301 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
302
303 if (error == 0) {
304 if (wbuf != NULL)
305 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
306 (void) arc_buf_remove_ref(abuf, &abuf);
307 }
34dc7c2f
BB
308
309 return (error);
310}
311
312/*
313 * Parse the intent log, and call parse_func for each valid record within.
34dc7c2f 314 */
428870ff 315int
34dc7c2f
BB
316zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
317 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
318{
319 const zil_header_t *zh = zilog->zl_header;
428870ff
BB
320 boolean_t claimed = !!zh->zh_claim_txg;
321 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
322 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
323 uint64_t max_blk_seq = 0;
324 uint64_t max_lr_seq = 0;
325 uint64_t blk_count = 0;
326 uint64_t lr_count = 0;
327 blkptr_t blk, next_blk;
34dc7c2f 328 char *lrbuf, *lrp;
428870ff 329 int error = 0;
34dc7c2f 330
d1d7e268 331 bzero(&next_blk, sizeof (blkptr_t));
d4ed6673 332
428870ff
BB
333 /*
334 * Old logs didn't record the maximum zh_claim_lr_seq.
335 */
336 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
337 claim_lr_seq = UINT64_MAX;
34dc7c2f
BB
338
339 /*
340 * Starting at the block pointed to by zh_log we read the log chain.
341 * For each block in the chain we strongly check that block to
342 * ensure its validity. We stop when an invalid block is found.
343 * For each block pointer in the chain we call parse_blk_func().
344 * For each record in each valid block we call parse_lr_func().
345 * If the log has been claimed, stop if we encounter a sequence
346 * number greater than the highest claimed sequence number.
347 */
f1512ee6 348 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
428870ff 349 zil_bp_tree_init(zilog);
34dc7c2f 350
428870ff
BB
351 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
352 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
353 int reclen;
d4ed6673 354 char *end = NULL;
34dc7c2f 355
428870ff
BB
356 if (blk_seq > claim_blk_seq)
357 break;
358 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
359 break;
360 ASSERT3U(max_blk_seq, <, blk_seq);
361 max_blk_seq = blk_seq;
362 blk_count++;
34dc7c2f 363
428870ff
BB
364 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
365 break;
34dc7c2f 366
428870ff 367 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
13fe0198 368 if (error != 0)
34dc7c2f
BB
369 break;
370
428870ff 371 for (lrp = lrbuf; lrp < end; lrp += reclen) {
34dc7c2f
BB
372 lr_t *lr = (lr_t *)lrp;
373 reclen = lr->lrc_reclen;
374 ASSERT3U(reclen, >=, sizeof (lr_t));
428870ff
BB
375 if (lr->lrc_seq > claim_lr_seq)
376 goto done;
377 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
378 goto done;
379 ASSERT3U(max_lr_seq, <, lr->lrc_seq);
380 max_lr_seq = lr->lrc_seq;
381 lr_count++;
34dc7c2f 382 }
34dc7c2f 383 }
428870ff
BB
384done:
385 zilog->zl_parse_error = error;
386 zilog->zl_parse_blk_seq = max_blk_seq;
387 zilog->zl_parse_lr_seq = max_lr_seq;
388 zilog->zl_parse_blk_count = blk_count;
389 zilog->zl_parse_lr_count = lr_count;
390
391 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
392 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
393
394 zil_bp_tree_fini(zilog);
f1512ee6 395 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
34dc7c2f 396
428870ff 397 return (error);
34dc7c2f
BB
398}
399
428870ff 400static int
34dc7c2f
BB
401zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
402{
34dc7c2f
BB
403 /*
404 * Claim log block if not already committed and not already claimed.
428870ff 405 * If tx == NULL, just verify that the block is claimable.
34dc7c2f 406 */
b0bc7a84
MG
407 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
408 zil_bp_tree_add(zilog, bp) != 0)
428870ff
BB
409 return (0);
410
411 return (zio_wait(zio_claim(NULL, zilog->zl_spa,
412 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
413 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
34dc7c2f
BB
414}
415
428870ff 416static int
34dc7c2f
BB
417zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
418{
428870ff
BB
419 lr_write_t *lr = (lr_write_t *)lrc;
420 int error;
421
422 if (lrc->lrc_txtype != TX_WRITE)
423 return (0);
424
425 /*
426 * If the block is not readable, don't claim it. This can happen
427 * in normal operation when a log block is written to disk before
428 * some of the dmu_sync() blocks it points to. In this case, the
429 * transaction cannot have been committed to anyone (we would have
430 * waited for all writes to be stable first), so it is semantically
431 * correct to declare this the end of the log.
432 */
433 if (lr->lr_blkptr.blk_birth >= first_txg &&
434 (error = zil_read_log_data(zilog, lr, NULL)) != 0)
435 return (error);
436 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
34dc7c2f
BB
437}
438
439/* ARGSUSED */
428870ff 440static int
34dc7c2f
BB
441zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
442{
428870ff
BB
443 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
444
445 return (0);
34dc7c2f
BB
446}
447
428870ff 448static int
34dc7c2f
BB
449zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
450{
428870ff
BB
451 lr_write_t *lr = (lr_write_t *)lrc;
452 blkptr_t *bp = &lr->lr_blkptr;
453
34dc7c2f
BB
454 /*
455 * If we previously claimed it, we need to free it.
456 */
428870ff 457 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
b0bc7a84
MG
458 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
459 !BP_IS_HOLE(bp))
428870ff
BB
460 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
461
462 return (0);
463}
464
465static lwb_t *
920dd524 466zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg, boolean_t fastwrite)
428870ff
BB
467{
468 lwb_t *lwb;
469
79c76d5b 470 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
428870ff
BB
471 lwb->lwb_zilog = zilog;
472 lwb->lwb_blk = *bp;
920dd524 473 lwb->lwb_fastwrite = fastwrite;
428870ff
BB
474 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
475 lwb->lwb_max_txg = txg;
476 lwb->lwb_zio = NULL;
477 lwb->lwb_tx = NULL;
478 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
479 lwb->lwb_nused = sizeof (zil_chain_t);
480 lwb->lwb_sz = BP_GET_LSIZE(bp);
481 } else {
482 lwb->lwb_nused = 0;
483 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
34dc7c2f 484 }
428870ff
BB
485
486 mutex_enter(&zilog->zl_lock);
487 list_insert_tail(&zilog->zl_lwb_list, lwb);
488 mutex_exit(&zilog->zl_lock);
489
490 return (lwb);
34dc7c2f
BB
491}
492
29809a6c
MA
493/*
494 * Called when we create in-memory log transactions so that we know
495 * to cleanup the itxs at the end of spa_sync().
496 */
497void
498zilog_dirty(zilog_t *zilog, uint64_t txg)
499{
500 dsl_pool_t *dp = zilog->zl_dmu_pool;
501 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
502
0c66c32d 503 if (ds->ds_is_snapshot)
29809a6c
MA
504 panic("dirtying snapshot!");
505
13fe0198 506 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
29809a6c
MA
507 /* up the hold count until we can be written out */
508 dmu_buf_add_ref(ds->ds_dbuf, zilog);
509 }
510}
511
512boolean_t
513zilog_is_dirty(zilog_t *zilog)
514{
515 dsl_pool_t *dp = zilog->zl_dmu_pool;
516 int t;
517
518 for (t = 0; t < TXG_SIZE; t++) {
519 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
520 return (B_TRUE);
521 }
522 return (B_FALSE);
523}
524
34dc7c2f
BB
525/*
526 * Create an on-disk intent log.
527 */
428870ff 528static lwb_t *
34dc7c2f
BB
529zil_create(zilog_t *zilog)
530{
531 const zil_header_t *zh = zilog->zl_header;
428870ff 532 lwb_t *lwb = NULL;
34dc7c2f
BB
533 uint64_t txg = 0;
534 dmu_tx_t *tx = NULL;
535 blkptr_t blk;
536 int error = 0;
920dd524 537 boolean_t fastwrite = FALSE;
34dc7c2f
BB
538
539 /*
540 * Wait for any previous destroy to complete.
541 */
542 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
543
544 ASSERT(zh->zh_claim_txg == 0);
545 ASSERT(zh->zh_replay_seq == 0);
546
547 blk = zh->zh_log;
548
549 /*
428870ff
BB
550 * Allocate an initial log block if:
551 * - there isn't one already
552 * - the existing block is the wrong endianess
34dc7c2f 553 */
fb5f0bc8 554 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
34dc7c2f 555 tx = dmu_tx_create(zilog->zl_os);
428870ff 556 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
34dc7c2f
BB
557 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
558 txg = dmu_tx_get_txg(tx);
559
fb5f0bc8 560 if (!BP_IS_HOLE(&blk)) {
428870ff 561 zio_free_zil(zilog->zl_spa, txg, &blk);
fb5f0bc8
BB
562 BP_ZERO(&blk);
563 }
564
920dd524 565 error = zio_alloc_zil(zilog->zl_spa, txg, &blk,
5d7a86d1 566 ZIL_MIN_BLKSZ, B_TRUE);
920dd524 567 fastwrite = TRUE;
34dc7c2f
BB
568
569 if (error == 0)
570 zil_init_log_chain(zilog, &blk);
571 }
572
573 /*
574 * Allocate a log write buffer (lwb) for the first log block.
575 */
428870ff 576 if (error == 0)
920dd524 577 lwb = zil_alloc_lwb(zilog, &blk, txg, fastwrite);
34dc7c2f
BB
578
579 /*
580 * If we just allocated the first log block, commit our transaction
581 * and wait for zil_sync() to stuff the block poiner into zh_log.
582 * (zh is part of the MOS, so we cannot modify it in open context.)
583 */
584 if (tx != NULL) {
585 dmu_tx_commit(tx);
586 txg_wait_synced(zilog->zl_dmu_pool, txg);
587 }
588
589 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
428870ff
BB
590
591 return (lwb);
34dc7c2f
BB
592}
593
594/*
595 * In one tx, free all log blocks and clear the log header.
596 * If keep_first is set, then we're replaying a log with no content.
597 * We want to keep the first block, however, so that the first
598 * synchronous transaction doesn't require a txg_wait_synced()
599 * in zil_create(). We don't need to txg_wait_synced() here either
600 * when keep_first is set, because both zil_create() and zil_destroy()
601 * will wait for any in-progress destroys to complete.
602 */
603void
604zil_destroy(zilog_t *zilog, boolean_t keep_first)
605{
606 const zil_header_t *zh = zilog->zl_header;
607 lwb_t *lwb;
608 dmu_tx_t *tx;
609 uint64_t txg;
610
611 /*
612 * Wait for any previous destroy to complete.
613 */
614 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
615
428870ff
BB
616 zilog->zl_old_header = *zh; /* debugging aid */
617
34dc7c2f
BB
618 if (BP_IS_HOLE(&zh->zh_log))
619 return;
620
621 tx = dmu_tx_create(zilog->zl_os);
428870ff 622 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
34dc7c2f
BB
623 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
624 txg = dmu_tx_get_txg(tx);
625
626 mutex_enter(&zilog->zl_lock);
627
34dc7c2f
BB
628 ASSERT3U(zilog->zl_destroy_txg, <, txg);
629 zilog->zl_destroy_txg = txg;
630 zilog->zl_keep_first = keep_first;
631
632 if (!list_is_empty(&zilog->zl_lwb_list)) {
633 ASSERT(zh->zh_claim_txg == 0);
3e31d2b0 634 VERIFY(!keep_first);
34dc7c2f 635 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
920dd524
ED
636 ASSERT(lwb->lwb_zio == NULL);
637 if (lwb->lwb_fastwrite)
638 metaslab_fastwrite_unmark(zilog->zl_spa,
639 &lwb->lwb_blk);
34dc7c2f
BB
640 list_remove(&zilog->zl_lwb_list, lwb);
641 if (lwb->lwb_buf != NULL)
642 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
428870ff 643 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
34dc7c2f
BB
644 kmem_cache_free(zil_lwb_cache, lwb);
645 }
428870ff 646 } else if (!keep_first) {
29809a6c 647 zil_destroy_sync(zilog, tx);
34dc7c2f
BB
648 }
649 mutex_exit(&zilog->zl_lock);
650
651 dmu_tx_commit(tx);
652}
653
29809a6c
MA
654void
655zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
656{
657 ASSERT(list_is_empty(&zilog->zl_lwb_list));
658 (void) zil_parse(zilog, zil_free_log_block,
659 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
660}
661
34dc7c2f 662int
9c43027b 663zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
34dc7c2f
BB
664{
665 dmu_tx_t *tx = txarg;
666 uint64_t first_txg = dmu_tx_get_txg(tx);
667 zilog_t *zilog;
668 zil_header_t *zh;
669 objset_t *os;
670 int error;
671
9c43027b
AJ
672 error = dmu_objset_own_obj(dp, ds->ds_object,
673 DMU_OST_ANY, B_FALSE, FTAG, &os);
13fe0198 674 if (error != 0) {
6d9036f3
MA
675 /*
676 * EBUSY indicates that the objset is inconsistent, in which
677 * case it can not have a ZIL.
678 */
679 if (error != EBUSY) {
9c43027b
AJ
680 cmn_err(CE_WARN, "can't open objset for %llu, error %u",
681 (unsigned long long)ds->ds_object, error);
6d9036f3
MA
682 }
683
34dc7c2f
BB
684 return (0);
685 }
686
687 zilog = dmu_objset_zil(os);
688 zh = zil_header_in_syncing_context(zilog);
689
428870ff 690 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
9babb374 691 if (!BP_IS_HOLE(&zh->zh_log))
428870ff 692 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
9babb374
BB
693 BP_ZERO(&zh->zh_log);
694 dsl_dataset_dirty(dmu_objset_ds(os), tx);
13fe0198 695 dmu_objset_disown(os, FTAG);
428870ff 696 return (0);
9babb374
BB
697 }
698
34dc7c2f
BB
699 /*
700 * Claim all log blocks if we haven't already done so, and remember
701 * the highest claimed sequence number. This ensures that if we can
702 * read only part of the log now (e.g. due to a missing device),
703 * but we can read the entire log later, we will not try to replay
704 * or destroy beyond the last block we successfully claimed.
705 */
706 ASSERT3U(zh->zh_claim_txg, <=, first_txg);
707 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
428870ff 708 (void) zil_parse(zilog, zil_claim_log_block,
34dc7c2f 709 zil_claim_log_record, tx, first_txg);
428870ff
BB
710 zh->zh_claim_txg = first_txg;
711 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
712 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
713 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
714 zh->zh_flags |= ZIL_REPLAY_NEEDED;
715 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
34dc7c2f
BB
716 dsl_dataset_dirty(dmu_objset_ds(os), tx);
717 }
718
719 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
13fe0198 720 dmu_objset_disown(os, FTAG);
34dc7c2f
BB
721 return (0);
722}
723
b128c09f
BB
724/*
725 * Check the log by walking the log chain.
726 * Checksum errors are ok as they indicate the end of the chain.
727 * Any other error (no device or read failure) returns an error.
728 */
9c43027b 729/* ARGSUSED */
b128c09f 730int
9c43027b 731zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
b128c09f
BB
732{
733 zilog_t *zilog;
b128c09f 734 objset_t *os;
572e2857 735 blkptr_t *bp;
b128c09f
BB
736 int error;
737
428870ff
BB
738 ASSERT(tx == NULL);
739
9c43027b 740 error = dmu_objset_from_ds(ds, &os);
13fe0198 741 if (error != 0) {
9c43027b
AJ
742 cmn_err(CE_WARN, "can't open objset %llu, error %d",
743 (unsigned long long)ds->ds_object, error);
b128c09f
BB
744 return (0);
745 }
746
747 zilog = dmu_objset_zil(os);
572e2857
BB
748 bp = (blkptr_t *)&zilog->zl_header->zh_log;
749
750 /*
751 * Check the first block and determine if it's on a log device
752 * which may have been removed or faulted prior to loading this
753 * pool. If so, there's no point in checking the rest of the log
754 * as its content should have already been synced to the pool.
755 */
756 if (!BP_IS_HOLE(bp)) {
757 vdev_t *vd;
758 boolean_t valid = B_TRUE;
759
760 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
761 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
762 if (vd->vdev_islog && vdev_is_dead(vd))
763 valid = vdev_log_state_valid(vd);
764 spa_config_exit(os->os_spa, SCL_STATE, FTAG);
765
9c43027b 766 if (!valid)
572e2857 767 return (0);
572e2857 768 }
b128c09f 769
428870ff
BB
770 /*
771 * Because tx == NULL, zil_claim_log_block() will not actually claim
772 * any blocks, but just determine whether it is possible to do so.
773 * In addition to checking the log chain, zil_claim_log_block()
774 * will invoke zio_claim() with a done func of spa_claim_notify(),
775 * which will update spa_max_claim_txg. See spa_load() for details.
776 */
777 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
778 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
779
428870ff 780 return ((error == ECKSUM || error == ENOENT) ? 0 : error);
b128c09f
BB
781}
782
34dc7c2f
BB
783static int
784zil_vdev_compare(const void *x1, const void *x2)
785{
572e2857
BB
786 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
787 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
34dc7c2f
BB
788
789 if (v1 < v2)
790 return (-1);
791 if (v1 > v2)
792 return (1);
793
794 return (0);
795}
796
797void
428870ff 798zil_add_block(zilog_t *zilog, const blkptr_t *bp)
34dc7c2f
BB
799{
800 avl_tree_t *t = &zilog->zl_vdev_tree;
801 avl_index_t where;
802 zil_vdev_node_t *zv, zvsearch;
803 int ndvas = BP_GET_NDVAS(bp);
804 int i;
805
806 if (zfs_nocacheflush)
807 return;
808
809 ASSERT(zilog->zl_writer);
810
811 /*
812 * Even though we're zl_writer, we still need a lock because the
813 * zl_get_data() callbacks may have dmu_sync() done callbacks
814 * that will run concurrently.
815 */
816 mutex_enter(&zilog->zl_vdev_lock);
817 for (i = 0; i < ndvas; i++) {
818 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
819 if (avl_find(t, &zvsearch, &where) == NULL) {
79c76d5b 820 zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
34dc7c2f
BB
821 zv->zv_vdev = zvsearch.zv_vdev;
822 avl_insert(t, zv, where);
823 }
824 }
825 mutex_exit(&zilog->zl_vdev_lock);
826}
827
572e2857 828static void
34dc7c2f
BB
829zil_flush_vdevs(zilog_t *zilog)
830{
831 spa_t *spa = zilog->zl_spa;
832 avl_tree_t *t = &zilog->zl_vdev_tree;
833 void *cookie = NULL;
834 zil_vdev_node_t *zv;
835 zio_t *zio;
836
837 ASSERT(zilog->zl_writer);
838
839 /*
840 * We don't need zl_vdev_lock here because we're the zl_writer,
841 * and all zl_get_data() callbacks are done.
842 */
843 if (avl_numnodes(t) == 0)
844 return;
845
b128c09f 846 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
34dc7c2f 847
b128c09f 848 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
34dc7c2f
BB
849
850 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
851 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
852 if (vd != NULL)
853 zio_flush(zio, vd);
854 kmem_free(zv, sizeof (*zv));
855 }
856
857 /*
858 * Wait for all the flushes to complete. Not all devices actually
859 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
860 */
861 (void) zio_wait(zio);
862
b128c09f 863 spa_config_exit(spa, SCL_STATE, FTAG);
34dc7c2f
BB
864}
865
866/*
867 * Function called when a log block write completes
868 */
869static void
870zil_lwb_write_done(zio_t *zio)
871{
872 lwb_t *lwb = zio->io_private;
873 zilog_t *zilog = lwb->lwb_zilog;
428870ff 874 dmu_tx_t *tx = lwb->lwb_tx;
34dc7c2f 875
b128c09f 876 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
b128c09f
BB
877 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
878 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
879 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
880 ASSERT(!BP_IS_GANG(zio->io_bp));
881 ASSERT(!BP_IS_HOLE(zio->io_bp));
9b67f605 882 ASSERT(BP_GET_FILL(zio->io_bp) == 0);
b128c09f 883
34dc7c2f 884 /*
9babb374
BB
885 * Ensure the lwb buffer pointer is cleared before releasing
886 * the txg. If we have had an allocation failure and
887 * the txg is waiting to sync then we want want zil_sync()
888 * to remove the lwb so that it's not picked up as the next new
889 * one in zil_commit_writer(). zil_sync() will only remove
890 * the lwb if lwb_buf is null.
34dc7c2f 891 */
34dc7c2f
BB
892 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
893 mutex_enter(&zilog->zl_lock);
920dd524
ED
894 lwb->lwb_zio = NULL;
895 lwb->lwb_fastwrite = FALSE;
34dc7c2f 896 lwb->lwb_buf = NULL;
428870ff
BB
897 lwb->lwb_tx = NULL;
898 mutex_exit(&zilog->zl_lock);
9babb374
BB
899
900 /*
901 * Now that we've written this log block, we have a stable pointer
902 * to the next block in the chain, so it's OK to let the txg in
428870ff 903 * which we allocated the next block sync.
9babb374 904 */
428870ff 905 dmu_tx_commit(tx);
34dc7c2f
BB
906}
907
908/*
909 * Initialize the io for a log block.
34dc7c2f
BB
910 */
911static void
912zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
913{
5dbd68a3 914 zbookmark_phys_t zb;
34dc7c2f 915
428870ff
BB
916 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
917 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
918 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
34dc7c2f
BB
919
920 if (zilog->zl_root_zio == NULL) {
921 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
922 ZIO_FLAG_CANFAIL);
923 }
920dd524
ED
924
925 /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
926 mutex_enter(&zilog->zl_lock);
34dc7c2f 927 if (lwb->lwb_zio == NULL) {
920dd524
ED
928 if (!lwb->lwb_fastwrite) {
929 metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
930 lwb->lwb_fastwrite = 1;
931 }
34dc7c2f 932 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
428870ff 933 0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
e8b96c60 934 zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE,
920dd524
ED
935 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
936 ZIO_FLAG_FASTWRITE, &zb);
34dc7c2f 937 }
920dd524 938 mutex_exit(&zilog->zl_lock);
34dc7c2f
BB
939}
940
428870ff
BB
941/*
942 * Define a limited set of intent log block sizes.
d3cc8b15 943 *
428870ff
BB
944 * These must be a multiple of 4KB. Note only the amount used (again
945 * aligned to 4KB) actually gets written. However, we can't always just
f1512ee6 946 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
428870ff
BB
947 */
948uint64_t zil_block_buckets[] = {
949 4096, /* non TX_WRITE */
950 8192+4096, /* data base */
951 32*1024 + 4096, /* NFS writes */
952 UINT64_MAX
953};
954
955/*
5d7a86d1
ED
956 * Use the slog as long as the current commit size is less than the
957 * limit or the total list size is less than 2X the limit. Limit
958 * checking is disabled by setting zil_slog_limit to UINT64_MAX.
428870ff 959 */
ee191e80 960unsigned long zil_slog_limit = 1024 * 1024;
5d7a86d1
ED
961#define USE_SLOG(zilog) (((zilog)->zl_cur_used < zil_slog_limit) || \
962 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))
428870ff 963
34dc7c2f
BB
964/*
965 * Start a log block write and advance to the next log block.
966 * Calls are serialized.
967 */
968static lwb_t *
969zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
970{
428870ff
BB
971 lwb_t *nlwb = NULL;
972 zil_chain_t *zilc;
34dc7c2f 973 spa_t *spa = zilog->zl_spa;
428870ff
BB
974 blkptr_t *bp;
975 dmu_tx_t *tx;
34dc7c2f 976 uint64_t txg;
428870ff
BB
977 uint64_t zil_blksz, wsz;
978 int i, error;
b6ad9671 979 boolean_t use_slog;
428870ff
BB
980
981 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
982 zilc = (zil_chain_t *)lwb->lwb_buf;
983 bp = &zilc->zc_next_blk;
984 } else {
985 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
986 bp = &zilc->zc_next_blk;
987 }
34dc7c2f 988
428870ff 989 ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
34dc7c2f
BB
990
991 /*
992 * Allocate the next block and save its address in this block
993 * before writing it in order to establish the log chain.
994 * Note that if the allocation of nlwb synced before we wrote
995 * the block that points at it (lwb), we'd leak it if we crashed.
428870ff
BB
996 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
997 * We dirty the dataset to ensure that zil_sync() will be called
998 * to clean up in the event of allocation failure or I/O failure.
34dc7c2f 999 */
428870ff
BB
1000 tx = dmu_tx_create(zilog->zl_os);
1001 VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
1002 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1003 txg = dmu_tx_get_txg(tx);
1004
1005 lwb->lwb_tx = tx;
34dc7c2f
BB
1006
1007 /*
428870ff
BB
1008 * Log blocks are pre-allocated. Here we select the size of the next
1009 * block, based on size used in the last block.
1010 * - first find the smallest bucket that will fit the block from a
1011 * limited set of block sizes. This is because it's faster to write
1012 * blocks allocated from the same metaslab as they are adjacent or
1013 * close.
1014 * - next find the maximum from the new suggested size and an array of
1015 * previous sizes. This lessens a picket fence effect of wrongly
1016 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
1017 * requests.
1018 *
1019 * Note we only write what is used, but we can't just allocate
1020 * the maximum block size because we can exhaust the available
1021 * pool log space.
34dc7c2f 1022 */
428870ff
BB
1023 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
1024 for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
1025 continue;
1026 zil_blksz = zil_block_buckets[i];
1027 if (zil_blksz == UINT64_MAX)
f1512ee6 1028 zil_blksz = SPA_OLD_MAXBLOCKSIZE;
428870ff
BB
1029 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
1030 for (i = 0; i < ZIL_PREV_BLKS; i++)
1031 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
1032 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
34dc7c2f
BB
1033
1034 BP_ZERO(bp);
b6ad9671 1035 use_slog = USE_SLOG(zilog);
13fe0198
MA
1036 error = zio_alloc_zil(spa, txg, bp, zil_blksz,
1037 USE_SLOG(zilog));
d1d7e268 1038 if (use_slog) {
b6ad9671
ED
1039 ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
1040 ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
d1d7e268 1041 } else {
b6ad9671
ED
1042 ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
1043 ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
1044 }
13fe0198 1045 if (error == 0) {
428870ff
BB
1046 ASSERT3U(bp->blk_birth, ==, txg);
1047 bp->blk_cksum = lwb->lwb_blk.blk_cksum;
1048 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
34dc7c2f
BB
1049
1050 /*
428870ff 1051 * Allocate a new log write buffer (lwb).
34dc7c2f 1052 */
920dd524 1053 nlwb = zil_alloc_lwb(zilog, bp, txg, TRUE);
34dc7c2f 1054
428870ff
BB
1055 /* Record the block for later vdev flushing */
1056 zil_add_block(zilog, &lwb->lwb_blk);
34dc7c2f
BB
1057 }
1058
428870ff
BB
1059 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1060 /* For Slim ZIL only write what is used. */
1061 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1062 ASSERT3U(wsz, <=, lwb->lwb_sz);
1063 zio_shrink(lwb->lwb_zio, wsz);
34dc7c2f 1064
428870ff
BB
1065 } else {
1066 wsz = lwb->lwb_sz;
1067 }
34dc7c2f 1068
428870ff
BB
1069 zilc->zc_pad = 0;
1070 zilc->zc_nused = lwb->lwb_nused;
1071 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
34dc7c2f
BB
1072
1073 /*
428870ff 1074 * clear unused data for security
34dc7c2f 1075 */
428870ff 1076 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
34dc7c2f 1077
428870ff 1078 zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */
34dc7c2f
BB
1079
1080 /*
428870ff
BB
1081 * If there was an allocation failure then nlwb will be null which
1082 * forces a txg_wait_synced().
34dc7c2f 1083 */
34dc7c2f
BB
1084 return (nlwb);
1085}
1086
1087static lwb_t *
1088zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1089{
1090 lr_t *lrc = &itx->itx_lr; /* common log record */
428870ff
BB
1091 lr_write_t *lrw = (lr_write_t *)lrc;
1092 char *lr_buf;
34dc7c2f
BB
1093 uint64_t txg = lrc->lrc_txg;
1094 uint64_t reclen = lrc->lrc_reclen;
428870ff 1095 uint64_t dlen = 0;
34dc7c2f
BB
1096
1097 if (lwb == NULL)
1098 return (NULL);
428870ff 1099
34dc7c2f 1100 ASSERT(lwb->lwb_buf != NULL);
29809a6c
MA
1101 ASSERT(zilog_is_dirty(zilog) ||
1102 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
34dc7c2f
BB
1103
1104 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
1105 dlen = P2ROUNDUP_TYPED(
428870ff 1106 lrw->lr_length, sizeof (uint64_t), uint64_t);
34dc7c2f
BB
1107
1108 zilog->zl_cur_used += (reclen + dlen);
1109
1110 zil_lwb_write_init(zilog, lwb);
1111
1112 /*
1113 * If this record won't fit in the current log block, start a new one.
1114 */
428870ff 1115 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
34dc7c2f
BB
1116 lwb = zil_lwb_write_start(zilog, lwb);
1117 if (lwb == NULL)
1118 return (NULL);
1119 zil_lwb_write_init(zilog, lwb);
428870ff
BB
1120 ASSERT(LWB_EMPTY(lwb));
1121 if (lwb->lwb_nused + reclen + dlen > lwb->lwb_sz) {
34dc7c2f
BB
1122 txg_wait_synced(zilog->zl_dmu_pool, txg);
1123 return (lwb);
1124 }
1125 }
1126
428870ff
BB
1127 lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1128 bcopy(lrc, lr_buf, reclen);
1129 lrc = (lr_t *)lr_buf;
1130 lrw = (lr_write_t *)lrc;
34dc7c2f 1131
b6ad9671
ED
1132 ZIL_STAT_BUMP(zil_itx_count);
1133
34dc7c2f
BB
1134 /*
1135 * If it's a write, fetch the data or get its blkptr as appropriate.
1136 */
1137 if (lrc->lrc_txtype == TX_WRITE) {
1138 if (txg > spa_freeze_txg(zilog->zl_spa))
1139 txg_wait_synced(zilog->zl_dmu_pool, txg);
b6ad9671
ED
1140 if (itx->itx_wr_state == WR_COPIED) {
1141 ZIL_STAT_BUMP(zil_itx_copied_count);
1142 ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
1143 } else {
34dc7c2f
BB
1144 char *dbuf;
1145 int error;
1146
34dc7c2f
BB
1147 if (dlen) {
1148 ASSERT(itx->itx_wr_state == WR_NEED_COPY);
428870ff
BB
1149 dbuf = lr_buf + reclen;
1150 lrw->lr_common.lrc_reclen += dlen;
b6ad9671 1151 ZIL_STAT_BUMP(zil_itx_needcopy_count);
d1d7e268
MK
1152 ZIL_STAT_INCR(zil_itx_needcopy_bytes,
1153 lrw->lr_length);
34dc7c2f
BB
1154 } else {
1155 ASSERT(itx->itx_wr_state == WR_INDIRECT);
1156 dbuf = NULL;
b6ad9671 1157 ZIL_STAT_BUMP(zil_itx_indirect_count);
d1d7e268
MK
1158 ZIL_STAT_INCR(zil_itx_indirect_bytes,
1159 lrw->lr_length);
34dc7c2f
BB
1160 }
1161 error = zilog->zl_get_data(
428870ff 1162 itx->itx_private, lrw, dbuf, lwb->lwb_zio);
45d1cae3
BB
1163 if (error == EIO) {
1164 txg_wait_synced(zilog->zl_dmu_pool, txg);
1165 return (lwb);
1166 }
13fe0198 1167 if (error != 0) {
34dc7c2f
BB
1168 ASSERT(error == ENOENT || error == EEXIST ||
1169 error == EALREADY);
1170 return (lwb);
1171 }
1172 }
1173 }
1174
428870ff
BB
1175 /*
1176 * We're actually making an entry, so update lrc_seq to be the
1177 * log record sequence number. Note that this is generally not
1178 * equal to the itx sequence number because not all transactions
1179 * are synchronous, and sometimes spa_sync() gets there first.
1180 */
1181 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
34dc7c2f
BB
1182 lwb->lwb_nused += reclen + dlen;
1183 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
428870ff 1184 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
c99c9001 1185 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
34dc7c2f
BB
1186
1187 return (lwb);
1188}
1189
1190itx_t *
1191zil_itx_create(uint64_t txtype, size_t lrsize)
1192{
1193 itx_t *itx;
1194
1195 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1196
19ea3d25 1197 itx = zio_data_buf_alloc(offsetof(itx_t, itx_lr) + lrsize);
34dc7c2f
BB
1198 itx->itx_lr.lrc_txtype = txtype;
1199 itx->itx_lr.lrc_reclen = lrsize;
1200 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
1201 itx->itx_lr.lrc_seq = 0; /* defensive */
572e2857 1202 itx->itx_sync = B_TRUE; /* default is synchronous */
119a394a
ED
1203 itx->itx_callback = NULL;
1204 itx->itx_callback_data = NULL;
34dc7c2f
BB
1205
1206 return (itx);
1207}
1208
428870ff
BB
1209void
1210zil_itx_destroy(itx_t *itx)
1211{
19ea3d25 1212 zio_data_buf_free(itx, offsetof(itx_t, itx_lr)+itx->itx_lr.lrc_reclen);
428870ff
BB
1213}
1214
572e2857
BB
1215/*
1216 * Free up the sync and async itxs. The itxs_t has already been detached
1217 * so no locks are needed.
1218 */
1219static void
1220zil_itxg_clean(itxs_t *itxs)
34dc7c2f 1221{
572e2857
BB
1222 itx_t *itx;
1223 list_t *list;
1224 avl_tree_t *t;
1225 void *cookie;
1226 itx_async_node_t *ian;
1227
1228 list = &itxs->i_sync_list;
1229 while ((itx = list_head(list)) != NULL) {
119a394a
ED
1230 if (itx->itx_callback != NULL)
1231 itx->itx_callback(itx->itx_callback_data);
572e2857 1232 list_remove(list, itx);
19ea3d25 1233 zil_itx_destroy(itx);
572e2857 1234 }
34dc7c2f 1235
572e2857
BB
1236 cookie = NULL;
1237 t = &itxs->i_async_tree;
1238 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1239 list = &ian->ia_list;
1240 while ((itx = list_head(list)) != NULL) {
119a394a
ED
1241 if (itx->itx_callback != NULL)
1242 itx->itx_callback(itx->itx_callback_data);
572e2857 1243 list_remove(list, itx);
19ea3d25 1244 zil_itx_destroy(itx);
572e2857
BB
1245 }
1246 list_destroy(list);
1247 kmem_free(ian, sizeof (itx_async_node_t));
1248 }
1249 avl_destroy(t);
34dc7c2f 1250
572e2857
BB
1251 kmem_free(itxs, sizeof (itxs_t));
1252}
34dc7c2f 1253
572e2857
BB
1254static int
1255zil_aitx_compare(const void *x1, const void *x2)
1256{
1257 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1258 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1259
1260 if (o1 < o2)
1261 return (-1);
1262 if (o1 > o2)
1263 return (1);
1264
1265 return (0);
34dc7c2f
BB
1266}
1267
1268/*
572e2857 1269 * Remove all async itx with the given oid.
34dc7c2f
BB
1270 */
1271static void
572e2857 1272zil_remove_async(zilog_t *zilog, uint64_t oid)
34dc7c2f 1273{
572e2857
BB
1274 uint64_t otxg, txg;
1275 itx_async_node_t *ian;
1276 avl_tree_t *t;
1277 avl_index_t where;
34dc7c2f
BB
1278 list_t clean_list;
1279 itx_t *itx;
1280
572e2857 1281 ASSERT(oid != 0);
34dc7c2f
BB
1282 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1283
572e2857
BB
1284 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1285 otxg = ZILTEST_TXG;
1286 else
1287 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
34dc7c2f 1288
572e2857
BB
1289 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1290 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1291
1292 mutex_enter(&itxg->itxg_lock);
1293 if (itxg->itxg_txg != txg) {
1294 mutex_exit(&itxg->itxg_lock);
1295 continue;
1296 }
34dc7c2f 1297
572e2857
BB
1298 /*
1299 * Locate the object node and append its list.
1300 */
1301 t = &itxg->itxg_itxs->i_async_tree;
1302 ian = avl_find(t, &oid, &where);
1303 if (ian != NULL)
1304 list_move_tail(&clean_list, &ian->ia_list);
1305 mutex_exit(&itxg->itxg_lock);
1306 }
34dc7c2f 1307 while ((itx = list_head(&clean_list)) != NULL) {
119a394a
ED
1308 if (itx->itx_callback != NULL)
1309 itx->itx_callback(itx->itx_callback_data);
34dc7c2f 1310 list_remove(&clean_list, itx);
19ea3d25 1311 zil_itx_destroy(itx);
34dc7c2f
BB
1312 }
1313 list_destroy(&clean_list);
1314}
1315
572e2857
BB
1316void
1317zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1318{
1319 uint64_t txg;
1320 itxg_t *itxg;
1321 itxs_t *itxs, *clean = NULL;
1322
1323 /*
1324 * Object ids can be re-instantiated in the next txg so
1325 * remove any async transactions to avoid future leaks.
1326 * This can happen if a fsync occurs on the re-instantiated
1327 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1328 * the new file data and flushes a write record for the old object.
1329 */
1330 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1331 zil_remove_async(zilog, itx->itx_oid);
1332
1333 /*
1334 * Ensure the data of a renamed file is committed before the rename.
1335 */
1336 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1337 zil_async_to_sync(zilog, itx->itx_oid);
1338
29809a6c 1339 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
572e2857
BB
1340 txg = ZILTEST_TXG;
1341 else
1342 txg = dmu_tx_get_txg(tx);
1343
1344 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1345 mutex_enter(&itxg->itxg_lock);
1346 itxs = itxg->itxg_itxs;
1347 if (itxg->itxg_txg != txg) {
1348 if (itxs != NULL) {
1349 /*
1350 * The zil_clean callback hasn't got around to cleaning
1351 * this itxg. Save the itxs for release below.
1352 * This should be rare.
1353 */
1354 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1355 itxg->itxg_sod = 0;
1356 clean = itxg->itxg_itxs;
1357 }
1358 ASSERT(itxg->itxg_sod == 0);
1359 itxg->itxg_txg = txg;
d1d7e268 1360 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
79c76d5b 1361 KM_SLEEP);
572e2857
BB
1362
1363 list_create(&itxs->i_sync_list, sizeof (itx_t),
1364 offsetof(itx_t, itx_node));
1365 avl_create(&itxs->i_async_tree, zil_aitx_compare,
1366 sizeof (itx_async_node_t),
1367 offsetof(itx_async_node_t, ia_node));
1368 }
1369 if (itx->itx_sync) {
1370 list_insert_tail(&itxs->i_sync_list, itx);
1371 atomic_add_64(&zilog->zl_itx_list_sz, itx->itx_sod);
1372 itxg->itxg_sod += itx->itx_sod;
1373 } else {
1374 avl_tree_t *t = &itxs->i_async_tree;
1375 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1376 itx_async_node_t *ian;
1377 avl_index_t where;
1378
1379 ian = avl_find(t, &foid, &where);
1380 if (ian == NULL) {
d1d7e268 1381 ian = kmem_alloc(sizeof (itx_async_node_t),
79c76d5b 1382 KM_SLEEP);
572e2857
BB
1383 list_create(&ian->ia_list, sizeof (itx_t),
1384 offsetof(itx_t, itx_node));
1385 ian->ia_foid = foid;
1386 avl_insert(t, ian, where);
1387 }
1388 list_insert_tail(&ian->ia_list, itx);
1389 }
1390
1391 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
29809a6c 1392 zilog_dirty(zilog, txg);
572e2857
BB
1393 mutex_exit(&itxg->itxg_lock);
1394
1395 /* Release the old itxs now we've dropped the lock */
1396 if (clean != NULL)
1397 zil_itxg_clean(clean);
1398}
1399
34dc7c2f
BB
1400/*
1401 * If there are any in-memory intent log transactions which have now been
29809a6c
MA
1402 * synced then start up a taskq to free them. We should only do this after we
1403 * have written out the uberblocks (i.e. txg has been comitted) so that
1404 * don't inadvertently clean out in-memory log records that would be required
1405 * by zil_commit().
34dc7c2f
BB
1406 */
1407void
572e2857 1408zil_clean(zilog_t *zilog, uint64_t synced_txg)
34dc7c2f 1409{
572e2857
BB
1410 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1411 itxs_t *clean_me;
34dc7c2f 1412
572e2857
BB
1413 mutex_enter(&itxg->itxg_lock);
1414 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1415 mutex_exit(&itxg->itxg_lock);
1416 return;
1417 }
1418 ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1419 ASSERT(itxg->itxg_txg != 0);
1420 ASSERT(zilog->zl_clean_taskq != NULL);
1421 atomic_add_64(&zilog->zl_itx_list_sz, -itxg->itxg_sod);
1422 itxg->itxg_sod = 0;
1423 clean_me = itxg->itxg_itxs;
1424 itxg->itxg_itxs = NULL;
1425 itxg->itxg_txg = 0;
1426 mutex_exit(&itxg->itxg_lock);
1427 /*
1428 * Preferably start a task queue to free up the old itxs but
1429 * if taskq_dispatch can't allocate resources to do that then
1430 * free it in-line. This should be rare. Note, using TQ_SLEEP
1431 * created a bad performance problem.
1432 */
1433 if (taskq_dispatch(zilog->zl_clean_taskq,
b8864a23 1434 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0)
572e2857
BB
1435 zil_itxg_clean(clean_me);
1436}
1437
1438/*
1439 * Get the list of itxs to commit into zl_itx_commit_list.
1440 */
1441static void
1442zil_get_commit_list(zilog_t *zilog)
1443{
1444 uint64_t otxg, txg;
1445 list_t *commit_list = &zilog->zl_itx_commit_list;
1446 uint64_t push_sod = 0;
1447
1448 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1449 otxg = ZILTEST_TXG;
1450 else
1451 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1452
1453 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1454 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1455
1456 mutex_enter(&itxg->itxg_lock);
1457 if (itxg->itxg_txg != txg) {
1458 mutex_exit(&itxg->itxg_lock);
1459 continue;
1460 }
1461
1462 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1463 push_sod += itxg->itxg_sod;
1464 itxg->itxg_sod = 0;
1465
1466 mutex_exit(&itxg->itxg_lock);
1467 }
1468 atomic_add_64(&zilog->zl_itx_list_sz, -push_sod);
1469}
1470
1471/*
1472 * Move the async itxs for a specified object to commit into sync lists.
1473 */
1474static void
1475zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1476{
1477 uint64_t otxg, txg;
1478 itx_async_node_t *ian;
1479 avl_tree_t *t;
1480 avl_index_t where;
1481
1482 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1483 otxg = ZILTEST_TXG;
1484 else
1485 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1486
1487 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1488 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1489
1490 mutex_enter(&itxg->itxg_lock);
1491 if (itxg->itxg_txg != txg) {
1492 mutex_exit(&itxg->itxg_lock);
1493 continue;
1494 }
1495
1496 /*
1497 * If a foid is specified then find that node and append its
1498 * list. Otherwise walk the tree appending all the lists
1499 * to the sync list. We add to the end rather than the
1500 * beginning to ensure the create has happened.
1501 */
1502 t = &itxg->itxg_itxs->i_async_tree;
1503 if (foid != 0) {
1504 ian = avl_find(t, &foid, &where);
1505 if (ian != NULL) {
1506 list_move_tail(&itxg->itxg_itxs->i_sync_list,
1507 &ian->ia_list);
1508 }
1509 } else {
1510 void *cookie = NULL;
1511
1512 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1513 list_move_tail(&itxg->itxg_itxs->i_sync_list,
1514 &ian->ia_list);
1515 list_destroy(&ian->ia_list);
1516 kmem_free(ian, sizeof (itx_async_node_t));
1517 }
1518 }
1519 mutex_exit(&itxg->itxg_lock);
34dc7c2f 1520 }
34dc7c2f
BB
1521}
1522
b128c09f 1523static void
572e2857 1524zil_commit_writer(zilog_t *zilog)
34dc7c2f
BB
1525{
1526 uint64_t txg;
572e2857 1527 itx_t *itx;
34dc7c2f 1528 lwb_t *lwb;
572e2857 1529 spa_t *spa = zilog->zl_spa;
428870ff 1530 int error = 0;
34dc7c2f 1531
b128c09f 1532 ASSERT(zilog->zl_root_zio == NULL);
572e2857
BB
1533
1534 mutex_exit(&zilog->zl_lock);
1535
1536 zil_get_commit_list(zilog);
1537
1538 /*
1539 * Return if there's nothing to commit before we dirty the fs by
1540 * calling zil_create().
1541 */
1542 if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1543 mutex_enter(&zilog->zl_lock);
1544 return;
1545 }
34dc7c2f
BB
1546
1547 if (zilog->zl_suspend) {
1548 lwb = NULL;
1549 } else {
1550 lwb = list_tail(&zilog->zl_lwb_list);
572e2857 1551 if (lwb == NULL)
428870ff 1552 lwb = zil_create(zilog);
34dc7c2f
BB
1553 }
1554
34dc7c2f 1555 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
119a394a 1556 for (itx = list_head(&zilog->zl_itx_commit_list); itx != NULL;
d1d7e268 1557 itx = list_next(&zilog->zl_itx_commit_list, itx)) {
34dc7c2f
BB
1558 txg = itx->itx_lr.lrc_txg;
1559 ASSERT(txg);
1560
572e2857 1561 if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
34dc7c2f 1562 lwb = zil_lwb_commit(zilog, itx, lwb);
34dc7c2f
BB
1563 }
1564 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
34dc7c2f
BB
1565
1566 /* write the last block out */
1567 if (lwb != NULL && lwb->lwb_zio != NULL)
1568 lwb = zil_lwb_write_start(zilog, lwb);
1569
34dc7c2f
BB
1570 zilog->zl_cur_used = 0;
1571
1572 /*
1573 * Wait if necessary for the log blocks to be on stable storage.
1574 */
1575 if (zilog->zl_root_zio) {
428870ff 1576 error = zio_wait(zilog->zl_root_zio);
b128c09f 1577 zilog->zl_root_zio = NULL;
34dc7c2f
BB
1578 zil_flush_vdevs(zilog);
1579 }
1580
428870ff 1581 if (error || lwb == NULL)
34dc7c2f 1582 txg_wait_synced(zilog->zl_dmu_pool, 0);
34dc7c2f 1583
119a394a
ED
1584 while ((itx = list_head(&zilog->zl_itx_commit_list))) {
1585 txg = itx->itx_lr.lrc_txg;
1586 ASSERT(txg);
1587
1588 if (itx->itx_callback != NULL)
1589 itx->itx_callback(itx->itx_callback_data);
1590 list_remove(&zilog->zl_itx_commit_list, itx);
19ea3d25 1591 zil_itx_destroy(itx);
119a394a
ED
1592 }
1593
34dc7c2f 1594 mutex_enter(&zilog->zl_lock);
428870ff
BB
1595
1596 /*
1597 * Remember the highest committed log sequence number for ztest.
1598 * We only update this value when all the log writes succeeded,
1599 * because ztest wants to ASSERT that it got the whole log chain.
1600 */
1601 if (error == 0 && lwb != NULL)
1602 zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
34dc7c2f
BB
1603}
1604
1605/*
572e2857 1606 * Commit zfs transactions to stable storage.
34dc7c2f 1607 * If foid is 0 push out all transactions, otherwise push only those
572e2857
BB
1608 * for that object or might reference that object.
1609 *
1610 * itxs are committed in batches. In a heavily stressed zil there will be
1611 * a commit writer thread who is writing out a bunch of itxs to the log
1612 * for a set of committing threads (cthreads) in the same batch as the writer.
1613 * Those cthreads are all waiting on the same cv for that batch.
1614 *
1615 * There will also be a different and growing batch of threads that are
1616 * waiting to commit (qthreads). When the committing batch completes
1617 * a transition occurs such that the cthreads exit and the qthreads become
1618 * cthreads. One of the new cthreads becomes the writer thread for the
1619 * batch. Any new threads arriving become new qthreads.
1620 *
1621 * Only 2 condition variables are needed and there's no transition
1622 * between the two cvs needed. They just flip-flop between qthreads
1623 * and cthreads.
1624 *
1625 * Using this scheme we can efficiently wakeup up only those threads
1626 * that have been committed.
34dc7c2f
BB
1627 */
1628void
572e2857 1629zil_commit(zilog_t *zilog, uint64_t foid)
34dc7c2f 1630{
572e2857 1631 uint64_t mybatch;
34dc7c2f 1632
572e2857
BB
1633 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1634 return;
34dc7c2f 1635
b6ad9671
ED
1636 ZIL_STAT_BUMP(zil_commit_count);
1637
572e2857
BB
1638 /* move the async itxs for the foid to the sync queues */
1639 zil_async_to_sync(zilog, foid);
34dc7c2f 1640
572e2857
BB
1641 mutex_enter(&zilog->zl_lock);
1642 mybatch = zilog->zl_next_batch;
34dc7c2f 1643 while (zilog->zl_writer) {
572e2857
BB
1644 cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1645 if (mybatch <= zilog->zl_com_batch) {
34dc7c2f
BB
1646 mutex_exit(&zilog->zl_lock);
1647 return;
1648 }
1649 }
428870ff 1650
572e2857
BB
1651 zilog->zl_next_batch++;
1652 zilog->zl_writer = B_TRUE;
b6ad9671 1653 ZIL_STAT_BUMP(zil_commit_writer_count);
572e2857
BB
1654 zil_commit_writer(zilog);
1655 zilog->zl_com_batch = mybatch;
1656 zilog->zl_writer = B_FALSE;
428870ff 1657
572e2857
BB
1658 /* wake up one thread to become the next writer */
1659 cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
428870ff 1660
572e2857
BB
1661 /* wake up all threads waiting for this batch to be committed */
1662 cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
8c0712fd
BB
1663
1664 mutex_exit(&zilog->zl_lock);
428870ff
BB
1665}
1666
34dc7c2f
BB
1667/*
1668 * Called in syncing context to free committed log blocks and update log header.
1669 */
1670void
1671zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1672{
1673 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1674 uint64_t txg = dmu_tx_get_txg(tx);
1675 spa_t *spa = zilog->zl_spa;
428870ff 1676 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
34dc7c2f
BB
1677 lwb_t *lwb;
1678
9babb374
BB
1679 /*
1680 * We don't zero out zl_destroy_txg, so make sure we don't try
1681 * to destroy it twice.
1682 */
1683 if (spa_sync_pass(spa) != 1)
1684 return;
1685
34dc7c2f
BB
1686 mutex_enter(&zilog->zl_lock);
1687
1688 ASSERT(zilog->zl_stop_sync == 0);
1689
428870ff
BB
1690 if (*replayed_seq != 0) {
1691 ASSERT(zh->zh_replay_seq < *replayed_seq);
1692 zh->zh_replay_seq = *replayed_seq;
1693 *replayed_seq = 0;
1694 }
34dc7c2f
BB
1695
1696 if (zilog->zl_destroy_txg == txg) {
1697 blkptr_t blk = zh->zh_log;
1698
1699 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
34dc7c2f
BB
1700
1701 bzero(zh, sizeof (zil_header_t));
fb5f0bc8 1702 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
34dc7c2f
BB
1703
1704 if (zilog->zl_keep_first) {
1705 /*
1706 * If this block was part of log chain that couldn't
1707 * be claimed because a device was missing during
1708 * zil_claim(), but that device later returns,
1709 * then this block could erroneously appear valid.
1710 * To guard against this, assign a new GUID to the new
1711 * log chain so it doesn't matter what blk points to.
1712 */
1713 zil_init_log_chain(zilog, &blk);
1714 zh->zh_log = blk;
1715 }
1716 }
1717
9babb374 1718 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
34dc7c2f
BB
1719 zh->zh_log = lwb->lwb_blk;
1720 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1721 break;
920dd524
ED
1722
1723 ASSERT(lwb->lwb_zio == NULL);
1724
34dc7c2f 1725 list_remove(&zilog->zl_lwb_list, lwb);
428870ff 1726 zio_free_zil(spa, txg, &lwb->lwb_blk);
34dc7c2f
BB
1727 kmem_cache_free(zil_lwb_cache, lwb);
1728
1729 /*
1730 * If we don't have anything left in the lwb list then
1731 * we've had an allocation failure and we need to zero
1732 * out the zil_header blkptr so that we don't end
1733 * up freeing the same block twice.
1734 */
1735 if (list_head(&zilog->zl_lwb_list) == NULL)
1736 BP_ZERO(&zh->zh_log);
1737 }
920dd524
ED
1738
1739 /*
1740 * Remove fastwrite on any blocks that have been pre-allocated for
1741 * the next commit. This prevents fastwrite counter pollution by
1742 * unused, long-lived LWBs.
1743 */
1744 for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
1745 if (lwb->lwb_fastwrite && !lwb->lwb_zio) {
1746 metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
1747 lwb->lwb_fastwrite = 0;
1748 }
1749 }
1750
34dc7c2f
BB
1751 mutex_exit(&zilog->zl_lock);
1752}
1753
1754void
1755zil_init(void)
1756{
1757 zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1758 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
b6ad9671
ED
1759
1760 zil_ksp = kstat_create("zfs", 0, "zil", "misc",
d1d7e268 1761 KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
b6ad9671
ED
1762 KSTAT_FLAG_VIRTUAL);
1763
1764 if (zil_ksp != NULL) {
1765 zil_ksp->ks_data = &zil_stats;
1766 kstat_install(zil_ksp);
1767 }
34dc7c2f
BB
1768}
1769
1770void
1771zil_fini(void)
1772{
1773 kmem_cache_destroy(zil_lwb_cache);
b6ad9671
ED
1774
1775 if (zil_ksp != NULL) {
1776 kstat_delete(zil_ksp);
1777 zil_ksp = NULL;
1778 }
34dc7c2f
BB
1779}
1780
428870ff
BB
1781void
1782zil_set_sync(zilog_t *zilog, uint64_t sync)
1783{
1784 zilog->zl_sync = sync;
1785}
1786
1787void
1788zil_set_logbias(zilog_t *zilog, uint64_t logbias)
1789{
1790 zilog->zl_logbias = logbias;
1791}
1792
34dc7c2f
BB
1793zilog_t *
1794zil_alloc(objset_t *os, zil_header_t *zh_phys)
1795{
1796 zilog_t *zilog;
d6320ddb 1797 int i;
34dc7c2f 1798
79c76d5b 1799 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
34dc7c2f
BB
1800
1801 zilog->zl_header = zh_phys;
1802 zilog->zl_os = os;
1803 zilog->zl_spa = dmu_objset_spa(os);
1804 zilog->zl_dmu_pool = dmu_objset_pool(os);
1805 zilog->zl_destroy_txg = TXG_INITIAL - 1;
428870ff
BB
1806 zilog->zl_logbias = dmu_objset_logbias(os);
1807 zilog->zl_sync = dmu_objset_syncprop(os);
572e2857 1808 zilog->zl_next_batch = 1;
34dc7c2f
BB
1809
1810 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1811
d6320ddb 1812 for (i = 0; i < TXG_SIZE; i++) {
572e2857
BB
1813 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1814 MUTEX_DEFAULT, NULL);
1815 }
34dc7c2f
BB
1816
1817 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1818 offsetof(lwb_t, lwb_node));
1819
572e2857
BB
1820 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1821 offsetof(itx_t, itx_node));
1822
34dc7c2f
BB
1823 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1824
1825 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1826 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1827
1828 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1829 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
572e2857
BB
1830 cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1831 cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
34dc7c2f
BB
1832
1833 return (zilog);
1834}
1835
1836void
1837zil_free(zilog_t *zilog)
1838{
d6320ddb 1839 int i;
34dc7c2f
BB
1840
1841 zilog->zl_stop_sync = 1;
1842
13fe0198
MA
1843 ASSERT0(zilog->zl_suspend);
1844 ASSERT0(zilog->zl_suspending);
1845
3e31d2b0 1846 ASSERT(list_is_empty(&zilog->zl_lwb_list));
34dc7c2f
BB
1847 list_destroy(&zilog->zl_lwb_list);
1848
1849 avl_destroy(&zilog->zl_vdev_tree);
1850 mutex_destroy(&zilog->zl_vdev_lock);
1851
572e2857
BB
1852 ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1853 list_destroy(&zilog->zl_itx_commit_list);
1854
d6320ddb 1855 for (i = 0; i < TXG_SIZE; i++) {
572e2857
BB
1856 /*
1857 * It's possible for an itx to be generated that doesn't dirty
1858 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1859 * callback to remove the entry. We remove those here.
1860 *
1861 * Also free up the ziltest itxs.
1862 */
1863 if (zilog->zl_itxg[i].itxg_itxs)
1864 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1865 mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1866 }
1867
34dc7c2f
BB
1868 mutex_destroy(&zilog->zl_lock);
1869
1870 cv_destroy(&zilog->zl_cv_writer);
1871 cv_destroy(&zilog->zl_cv_suspend);
572e2857
BB
1872 cv_destroy(&zilog->zl_cv_batch[0]);
1873 cv_destroy(&zilog->zl_cv_batch[1]);
34dc7c2f
BB
1874
1875 kmem_free(zilog, sizeof (zilog_t));
1876}
1877
34dc7c2f
BB
1878/*
1879 * Open an intent log.
1880 */
1881zilog_t *
1882zil_open(objset_t *os, zil_get_data_t *get_data)
1883{
1884 zilog_t *zilog = dmu_objset_zil(os);
1885
3e31d2b0
ES
1886 ASSERT(zilog->zl_clean_taskq == NULL);
1887 ASSERT(zilog->zl_get_data == NULL);
1888 ASSERT(list_is_empty(&zilog->zl_lwb_list));
1889
34dc7c2f 1890 zilog->zl_get_data = get_data;
1229323d 1891 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, defclsyspri,
34dc7c2f
BB
1892 2, 2, TASKQ_PREPOPULATE);
1893
1894 return (zilog);
1895}
1896
1897/*
1898 * Close an intent log.
1899 */
1900void
1901zil_close(zilog_t *zilog)
1902{
3e31d2b0 1903 lwb_t *lwb;
572e2857
BB
1904 uint64_t txg = 0;
1905
1906 zil_commit(zilog, 0); /* commit all itx */
1907
34dc7c2f 1908 /*
572e2857
BB
1909 * The lwb_max_txg for the stubby lwb will reflect the last activity
1910 * for the zil. After a txg_wait_synced() on the txg we know all the
1911 * callbacks have occurred that may clean the zil. Only then can we
1912 * destroy the zl_clean_taskq.
34dc7c2f 1913 */
572e2857 1914 mutex_enter(&zilog->zl_lock);
3e31d2b0
ES
1915 lwb = list_tail(&zilog->zl_lwb_list);
1916 if (lwb != NULL)
1917 txg = lwb->lwb_max_txg;
572e2857
BB
1918 mutex_exit(&zilog->zl_lock);
1919 if (txg)
34dc7c2f 1920 txg_wait_synced(zilog->zl_dmu_pool, txg);
29809a6c 1921 ASSERT(!zilog_is_dirty(zilog));
34dc7c2f
BB
1922
1923 taskq_destroy(zilog->zl_clean_taskq);
1924 zilog->zl_clean_taskq = NULL;
1925 zilog->zl_get_data = NULL;
3e31d2b0
ES
1926
1927 /*
1928 * We should have only one LWB left on the list; remove it now.
1929 */
1930 mutex_enter(&zilog->zl_lock);
1931 lwb = list_head(&zilog->zl_lwb_list);
1932 if (lwb != NULL) {
1933 ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
920dd524
ED
1934 ASSERT(lwb->lwb_zio == NULL);
1935 if (lwb->lwb_fastwrite)
1936 metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
3e31d2b0
ES
1937 list_remove(&zilog->zl_lwb_list, lwb);
1938 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1939 kmem_cache_free(zil_lwb_cache, lwb);
1940 }
1941 mutex_exit(&zilog->zl_lock);
34dc7c2f
BB
1942}
1943
13fe0198
MA
1944static char *suspend_tag = "zil suspending";
1945
34dc7c2f
BB
1946/*
1947 * Suspend an intent log. While in suspended mode, we still honor
1948 * synchronous semantics, but we rely on txg_wait_synced() to do it.
13fe0198
MA
1949 * On old version pools, we suspend the log briefly when taking a
1950 * snapshot so that it will have an empty intent log.
1951 *
1952 * Long holds are not really intended to be used the way we do here --
1953 * held for such a short time. A concurrent caller of dsl_dataset_long_held()
1954 * could fail. Therefore we take pains to only put a long hold if it is
1955 * actually necessary. Fortunately, it will only be necessary if the
1956 * objset is currently mounted (or the ZVOL equivalent). In that case it
1957 * will already have a long hold, so we are not really making things any worse.
1958 *
1959 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
1960 * zvol_state_t), and use their mechanism to prevent their hold from being
1961 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for
1962 * very little gain.
1963 *
1964 * if cookiep == NULL, this does both the suspend & resume.
1965 * Otherwise, it returns with the dataset "long held", and the cookie
1966 * should be passed into zil_resume().
34dc7c2f
BB
1967 */
1968int
13fe0198 1969zil_suspend(const char *osname, void **cookiep)
34dc7c2f 1970{
13fe0198
MA
1971 objset_t *os;
1972 zilog_t *zilog;
1973 const zil_header_t *zh;
1974 int error;
1975
1976 error = dmu_objset_hold(osname, suspend_tag, &os);
1977 if (error != 0)
1978 return (error);
1979 zilog = dmu_objset_zil(os);
34dc7c2f
BB
1980
1981 mutex_enter(&zilog->zl_lock);
13fe0198
MA
1982 zh = zilog->zl_header;
1983
9babb374 1984 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
34dc7c2f 1985 mutex_exit(&zilog->zl_lock);
13fe0198 1986 dmu_objset_rele(os, suspend_tag);
2e528b49 1987 return (SET_ERROR(EBUSY));
34dc7c2f 1988 }
13fe0198
MA
1989
1990 /*
1991 * Don't put a long hold in the cases where we can avoid it. This
1992 * is when there is no cookie so we are doing a suspend & resume
1993 * (i.e. called from zil_vdev_offline()), and there's nothing to do
1994 * for the suspend because it's already suspended, or there's no ZIL.
1995 */
1996 if (cookiep == NULL && !zilog->zl_suspending &&
1997 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1998 mutex_exit(&zilog->zl_lock);
1999 dmu_objset_rele(os, suspend_tag);
2000 return (0);
2001 }
2002
2003 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
2004 dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
2005
2006 zilog->zl_suspend++;
2007
2008 if (zilog->zl_suspend > 1) {
34dc7c2f 2009 /*
13fe0198 2010 * Someone else is already suspending it.
34dc7c2f
BB
2011 * Just wait for them to finish.
2012 */
13fe0198 2013
34dc7c2f
BB
2014 while (zilog->zl_suspending)
2015 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
34dc7c2f 2016 mutex_exit(&zilog->zl_lock);
13fe0198
MA
2017
2018 if (cookiep == NULL)
2019 zil_resume(os);
2020 else
2021 *cookiep = os;
2022 return (0);
2023 }
2024
2025 /*
2026 * If there is no pointer to an on-disk block, this ZIL must not
2027 * be active (e.g. filesystem not mounted), so there's nothing
2028 * to clean up.
2029 */
2030 if (BP_IS_HOLE(&zh->zh_log)) {
2031 ASSERT(cookiep != NULL); /* fast path already handled */
2032
2033 *cookiep = os;
2034 mutex_exit(&zilog->zl_lock);
34dc7c2f
BB
2035 return (0);
2036 }
13fe0198 2037
34dc7c2f
BB
2038 zilog->zl_suspending = B_TRUE;
2039 mutex_exit(&zilog->zl_lock);
2040
572e2857 2041 zil_commit(zilog, 0);
34dc7c2f
BB
2042
2043 zil_destroy(zilog, B_FALSE);
2044
2045 mutex_enter(&zilog->zl_lock);
2046 zilog->zl_suspending = B_FALSE;
2047 cv_broadcast(&zilog->zl_cv_suspend);
2048 mutex_exit(&zilog->zl_lock);
2049
13fe0198
MA
2050 if (cookiep == NULL)
2051 zil_resume(os);
2052 else
2053 *cookiep = os;
34dc7c2f
BB
2054 return (0);
2055}
2056
2057void
13fe0198 2058zil_resume(void *cookie)
34dc7c2f 2059{
13fe0198
MA
2060 objset_t *os = cookie;
2061 zilog_t *zilog = dmu_objset_zil(os);
2062
34dc7c2f
BB
2063 mutex_enter(&zilog->zl_lock);
2064 ASSERT(zilog->zl_suspend != 0);
2065 zilog->zl_suspend--;
2066 mutex_exit(&zilog->zl_lock);
13fe0198
MA
2067 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
2068 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
34dc7c2f
BB
2069}
2070
2071typedef struct zil_replay_arg {
b01615d5 2072 zil_replay_func_t *zr_replay;
34dc7c2f 2073 void *zr_arg;
34dc7c2f 2074 boolean_t zr_byteswap;
428870ff 2075 char *zr_lr;
34dc7c2f
BB
2076} zil_replay_arg_t;
2077
428870ff
BB
2078static int
2079zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
2080{
2081 char name[MAXNAMELEN];
2082
2083 zilog->zl_replaying_seq--; /* didn't actually replay this one */
2084
2085 dmu_objset_name(zilog->zl_os, name);
2086
2087 cmn_err(CE_WARN, "ZFS replay transaction error %d, "
2088 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
2089 (u_longlong_t)lr->lrc_seq,
2090 (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
2091 (lr->lrc_txtype & TX_CI) ? "CI" : "");
2092
2093 return (error);
2094}
2095
2096static int
34dc7c2f
BB
2097zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
2098{
2099 zil_replay_arg_t *zr = zra;
2100 const zil_header_t *zh = zilog->zl_header;
2101 uint64_t reclen = lr->lrc_reclen;
2102 uint64_t txtype = lr->lrc_txtype;
428870ff 2103 int error = 0;
34dc7c2f 2104
428870ff 2105 zilog->zl_replaying_seq = lr->lrc_seq;
34dc7c2f
BB
2106
2107 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
428870ff
BB
2108 return (0);
2109
2110 if (lr->lrc_txg < claim_txg) /* already committed */
2111 return (0);
34dc7c2f
BB
2112
2113 /* Strip case-insensitive bit, still present in log record */
2114 txtype &= ~TX_CI;
2115
428870ff
BB
2116 if (txtype == 0 || txtype >= TX_MAX_TYPE)
2117 return (zil_replay_error(zilog, lr, EINVAL));
2118
2119 /*
2120 * If this record type can be logged out of order, the object
2121 * (lr_foid) may no longer exist. That's legitimate, not an error.
2122 */
2123 if (TX_OOO(txtype)) {
2124 error = dmu_object_info(zilog->zl_os,
2125 ((lr_ooo_t *)lr)->lr_foid, NULL);
2126 if (error == ENOENT || error == EEXIST)
2127 return (0);
fb5f0bc8
BB
2128 }
2129
34dc7c2f
BB
2130 /*
2131 * Make a copy of the data so we can revise and extend it.
2132 */
428870ff
BB
2133 bcopy(lr, zr->zr_lr, reclen);
2134
2135 /*
2136 * If this is a TX_WRITE with a blkptr, suck in the data.
2137 */
2138 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
2139 error = zil_read_log_data(zilog, (lr_write_t *)lr,
2140 zr->zr_lr + reclen);
13fe0198 2141 if (error != 0)
428870ff
BB
2142 return (zil_replay_error(zilog, lr, error));
2143 }
34dc7c2f
BB
2144
2145 /*
2146 * The log block containing this lr may have been byteswapped
2147 * so that we can easily examine common fields like lrc_txtype.
428870ff 2148 * However, the log is a mix of different record types, and only the
34dc7c2f
BB
2149 * replay vectors know how to byteswap their records. Therefore, if
2150 * the lr was byteswapped, undo it before invoking the replay vector.
2151 */
2152 if (zr->zr_byteswap)
428870ff 2153 byteswap_uint64_array(zr->zr_lr, reclen);
34dc7c2f
BB
2154
2155 /*
2156 * We must now do two things atomically: replay this log record,
fb5f0bc8
BB
2157 * and update the log header sequence number to reflect the fact that
2158 * we did so. At the end of each replay function the sequence number
2159 * is updated if we are in replay mode.
34dc7c2f 2160 */
428870ff 2161 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
13fe0198 2162 if (error != 0) {
34dc7c2f
BB
2163 /*
2164 * The DMU's dnode layer doesn't see removes until the txg
2165 * commits, so a subsequent claim can spuriously fail with
fb5f0bc8 2166 * EEXIST. So if we receive any error we try syncing out
428870ff
BB
2167 * any removes then retry the transaction. Note that we
2168 * specify B_FALSE for byteswap now, so we don't do it twice.
34dc7c2f 2169 */
428870ff
BB
2170 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2171 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
13fe0198 2172 if (error != 0)
428870ff 2173 return (zil_replay_error(zilog, lr, error));
34dc7c2f 2174 }
428870ff 2175 return (0);
34dc7c2f
BB
2176}
2177
2178/* ARGSUSED */
428870ff 2179static int
34dc7c2f
BB
2180zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2181{
2182 zilog->zl_replay_blks++;
428870ff
BB
2183
2184 return (0);
34dc7c2f
BB
2185}
2186
2187/*
2188 * If this dataset has a non-empty intent log, replay it and destroy it.
2189 */
2190void
b01615d5 2191zil_replay(objset_t *os, void *arg, zil_replay_func_t replay_func[TX_MAX_TYPE])
34dc7c2f
BB
2192{
2193 zilog_t *zilog = dmu_objset_zil(os);
2194 const zil_header_t *zh = zilog->zl_header;
2195 zil_replay_arg_t zr;
2196
9babb374 2197 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
34dc7c2f
BB
2198 zil_destroy(zilog, B_TRUE);
2199 return;
2200 }
2201
34dc7c2f
BB
2202 zr.zr_replay = replay_func;
2203 zr.zr_arg = arg;
34dc7c2f 2204 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
79c76d5b 2205 zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
34dc7c2f
BB
2206
2207 /*
2208 * Wait for in-progress removes to sync before starting replay.
2209 */
2210 txg_wait_synced(zilog->zl_dmu_pool, 0);
2211
fb5f0bc8 2212 zilog->zl_replay = B_TRUE;
428870ff 2213 zilog->zl_replay_time = ddi_get_lbolt();
34dc7c2f
BB
2214 ASSERT(zilog->zl_replay_blks == 0);
2215 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2216 zh->zh_claim_txg);
00b46022 2217 vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
34dc7c2f
BB
2218
2219 zil_destroy(zilog, B_FALSE);
2220 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
fb5f0bc8 2221 zilog->zl_replay = B_FALSE;
34dc7c2f
BB
2222}
2223
428870ff
BB
2224boolean_t
2225zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
34dc7c2f 2226{
428870ff
BB
2227 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2228 return (B_TRUE);
34dc7c2f 2229
428870ff
BB
2230 if (zilog->zl_replay) {
2231 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2232 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2233 zilog->zl_replaying_seq;
2234 return (B_TRUE);
34dc7c2f
BB
2235 }
2236
428870ff 2237 return (B_FALSE);
34dc7c2f 2238}
9babb374
BB
2239
2240/* ARGSUSED */
2241int
428870ff 2242zil_vdev_offline(const char *osname, void *arg)
9babb374 2243{
9babb374
BB
2244 int error;
2245
13fe0198
MA
2246 error = zil_suspend(osname, NULL);
2247 if (error != 0)
2e528b49 2248 return (SET_ERROR(EEXIST));
13fe0198 2249 return (0);
9babb374 2250}
c409e464
BB
2251
2252#if defined(_KERNEL) && defined(HAVE_SPL)
0f699108
AZ
2253EXPORT_SYMBOL(zil_alloc);
2254EXPORT_SYMBOL(zil_free);
2255EXPORT_SYMBOL(zil_open);
2256EXPORT_SYMBOL(zil_close);
2257EXPORT_SYMBOL(zil_replay);
2258EXPORT_SYMBOL(zil_replaying);
2259EXPORT_SYMBOL(zil_destroy);
2260EXPORT_SYMBOL(zil_destroy_sync);
2261EXPORT_SYMBOL(zil_itx_create);
2262EXPORT_SYMBOL(zil_itx_destroy);
2263EXPORT_SYMBOL(zil_itx_assign);
2264EXPORT_SYMBOL(zil_commit);
2265EXPORT_SYMBOL(zil_vdev_offline);
2266EXPORT_SYMBOL(zil_claim);
2267EXPORT_SYMBOL(zil_check_log_chain);
2268EXPORT_SYMBOL(zil_sync);
2269EXPORT_SYMBOL(zil_clean);
2270EXPORT_SYMBOL(zil_suspend);
2271EXPORT_SYMBOL(zil_resume);
2272EXPORT_SYMBOL(zil_add_block);
2273EXPORT_SYMBOL(zil_bp_tree_add);
2274EXPORT_SYMBOL(zil_set_sync);
2275EXPORT_SYMBOL(zil_set_logbias);
2276
c409e464
BB
2277module_param(zil_replay_disable, int, 0644);
2278MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
2279
2280module_param(zfs_nocacheflush, int, 0644);
2281MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
ee191e80
ED
2282
2283module_param(zil_slog_limit, ulong, 0644);
2284MODULE_PARM_DESC(zil_slog_limit, "Max commit bytes to separate log device");
c409e464 2285#endif