4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
27 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/zfs_context.h>
35 #include <sys/resource.h>
37 #include <sys/zil_impl.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/metaslab.h>
43 #include <sys/trace_zil.h>
47 * The zfs intent log (ZIL) saves transaction records of system calls
48 * that change the file system in memory with enough information
49 * to be able to replay them. These are stored in memory until
50 * either the DMU transaction group (txg) commits them to the stable pool
51 * and they can be discarded, or they are flushed to the stable log
52 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
53 * requirement. In the event of a panic or power fail then those log
54 * records (transactions) are replayed.
56 * There is one ZIL per file system. Its on-disk (pool) format consists
63 * A log record holds a system call transaction. Log blocks can
64 * hold many log records and the blocks are chained together.
65 * Each ZIL block contains a block pointer (blkptr_t) to the next
66 * ZIL block in the chain. The ZIL header points to the first
67 * block in the chain. Note there is not a fixed place in the pool
68 * to hold blocks. They are dynamically allocated and freed as
69 * needed from the blocks available. Figure X shows the ZIL structure:
73 * See zil.h for more information about these fields.
75 zil_stats_t zil_stats
= {
76 { "zil_commit_count", KSTAT_DATA_UINT64
},
77 { "zil_commit_writer_count", KSTAT_DATA_UINT64
},
78 { "zil_itx_count", KSTAT_DATA_UINT64
},
79 { "zil_itx_indirect_count", KSTAT_DATA_UINT64
},
80 { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64
},
81 { "zil_itx_copied_count", KSTAT_DATA_UINT64
},
82 { "zil_itx_copied_bytes", KSTAT_DATA_UINT64
},
83 { "zil_itx_needcopy_count", KSTAT_DATA_UINT64
},
84 { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64
},
85 { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64
},
86 { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64
},
87 { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64
},
88 { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64
},
91 static kstat_t
*zil_ksp
;
94 * Disable intent logging replay. This global ZIL switch affects all pools.
96 int zil_replay_disable
= 0;
99 * Tunable parameter for debugging or performance analysis. Setting
100 * zfs_nocacheflush will cause corruption on power loss if a volatile
101 * out-of-order write cache is enabled.
103 int zfs_nocacheflush
= 0;
106 * Limit SLOG write size per commit executed with synchronous priority.
107 * Any writes above that will be executed with lower (asynchronous) priority
108 * to limit potential SLOG device abuse by single active ZIL writer.
110 unsigned long zil_slog_bulk
= 768 * 1024;
112 static kmem_cache_t
*zil_lwb_cache
;
114 static void zil_async_to_sync(zilog_t
*zilog
, uint64_t foid
);
116 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
117 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
120 zil_bp_compare(const void *x1
, const void *x2
)
122 const dva_t
*dva1
= &((zil_bp_node_t
*)x1
)->zn_dva
;
123 const dva_t
*dva2
= &((zil_bp_node_t
*)x2
)->zn_dva
;
125 int cmp
= AVL_CMP(DVA_GET_VDEV(dva1
), DVA_GET_VDEV(dva2
));
129 return (AVL_CMP(DVA_GET_OFFSET(dva1
), DVA_GET_OFFSET(dva2
)));
133 zil_bp_tree_init(zilog_t
*zilog
)
135 avl_create(&zilog
->zl_bp_tree
, zil_bp_compare
,
136 sizeof (zil_bp_node_t
), offsetof(zil_bp_node_t
, zn_node
));
140 zil_bp_tree_fini(zilog_t
*zilog
)
142 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
146 while ((zn
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
147 kmem_free(zn
, sizeof (zil_bp_node_t
));
153 zil_bp_tree_add(zilog_t
*zilog
, const blkptr_t
*bp
)
155 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
160 if (BP_IS_EMBEDDED(bp
))
163 dva
= BP_IDENTITY(bp
);
165 if (avl_find(t
, dva
, &where
) != NULL
)
166 return (SET_ERROR(EEXIST
));
168 zn
= kmem_alloc(sizeof (zil_bp_node_t
), KM_SLEEP
);
170 avl_insert(t
, zn
, where
);
175 static zil_header_t
*
176 zil_header_in_syncing_context(zilog_t
*zilog
)
178 return ((zil_header_t
*)zilog
->zl_header
);
182 zil_init_log_chain(zilog_t
*zilog
, blkptr_t
*bp
)
184 zio_cksum_t
*zc
= &bp
->blk_cksum
;
186 zc
->zc_word
[ZIL_ZC_GUID_0
] = spa_get_random(-1ULL);
187 zc
->zc_word
[ZIL_ZC_GUID_1
] = spa_get_random(-1ULL);
188 zc
->zc_word
[ZIL_ZC_OBJSET
] = dmu_objset_id(zilog
->zl_os
);
189 zc
->zc_word
[ZIL_ZC_SEQ
] = 1ULL;
193 * Read a log block and make sure it's valid.
196 zil_read_log_block(zilog_t
*zilog
, boolean_t decrypt
, const blkptr_t
*bp
,
197 blkptr_t
*nbp
, void *dst
, char **end
)
199 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
200 arc_flags_t aflags
= ARC_FLAG_WAIT
;
201 arc_buf_t
*abuf
= NULL
;
205 if (zilog
->zl_header
->zh_claim_txg
== 0)
206 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
208 if (!(zilog
->zl_header
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
209 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
212 zio_flags
|= ZIO_FLAG_RAW
;
214 SET_BOOKMARK(&zb
, bp
->blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
215 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
, bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
217 error
= arc_read(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
,
218 &abuf
, ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
221 zio_cksum_t cksum
= bp
->blk_cksum
;
224 * Validate the checksummed log block.
226 * Sequence numbers should be... sequential. The checksum
227 * verifier for the next block should be bp's checksum plus 1.
229 * Also check the log chain linkage and size used.
231 cksum
.zc_word
[ZIL_ZC_SEQ
]++;
233 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
234 zil_chain_t
*zilc
= abuf
->b_data
;
235 char *lr
= (char *)(zilc
+ 1);
236 uint64_t len
= zilc
->zc_nused
- sizeof (zil_chain_t
);
238 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
239 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
)) {
240 error
= SET_ERROR(ECKSUM
);
242 ASSERT3U(len
, <=, SPA_OLD_MAXBLOCKSIZE
);
244 *end
= (char *)dst
+ len
;
245 *nbp
= zilc
->zc_next_blk
;
248 char *lr
= abuf
->b_data
;
249 uint64_t size
= BP_GET_LSIZE(bp
);
250 zil_chain_t
*zilc
= (zil_chain_t
*)(lr
+ size
) - 1;
252 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
253 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
) ||
254 (zilc
->zc_nused
> (size
- sizeof (*zilc
)))) {
255 error
= SET_ERROR(ECKSUM
);
257 ASSERT3U(zilc
->zc_nused
, <=,
258 SPA_OLD_MAXBLOCKSIZE
);
259 bcopy(lr
, dst
, zilc
->zc_nused
);
260 *end
= (char *)dst
+ zilc
->zc_nused
;
261 *nbp
= zilc
->zc_next_blk
;
265 arc_buf_destroy(abuf
, &abuf
);
272 * Read a TX_WRITE log data block.
275 zil_read_log_data(zilog_t
*zilog
, const lr_write_t
*lr
, void *wbuf
)
277 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
278 const blkptr_t
*bp
= &lr
->lr_blkptr
;
279 arc_flags_t aflags
= ARC_FLAG_WAIT
;
280 arc_buf_t
*abuf
= NULL
;
284 if (BP_IS_HOLE(bp
)) {
286 bzero(wbuf
, MAX(BP_GET_LSIZE(bp
), lr
->lr_length
));
290 if (zilog
->zl_header
->zh_claim_txg
== 0)
291 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
294 * If we are not using the resulting data, we are just checking that
295 * it hasn't been corrupted so we don't need to waste CPU time
296 * decompressing and decrypting it.
299 zio_flags
|= ZIO_FLAG_RAW
;
301 SET_BOOKMARK(&zb
, dmu_objset_id(zilog
->zl_os
), lr
->lr_foid
,
302 ZB_ZIL_LEVEL
, lr
->lr_offset
/ BP_GET_LSIZE(bp
));
304 error
= arc_read(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
, &abuf
,
305 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
309 bcopy(abuf
->b_data
, wbuf
, arc_buf_size(abuf
));
310 arc_buf_destroy(abuf
, &abuf
);
317 * Parse the intent log, and call parse_func for each valid record within.
320 zil_parse(zilog_t
*zilog
, zil_parse_blk_func_t
*parse_blk_func
,
321 zil_parse_lr_func_t
*parse_lr_func
, void *arg
, uint64_t txg
,
324 const zil_header_t
*zh
= zilog
->zl_header
;
325 boolean_t claimed
= !!zh
->zh_claim_txg
;
326 uint64_t claim_blk_seq
= claimed
? zh
->zh_claim_blk_seq
: UINT64_MAX
;
327 uint64_t claim_lr_seq
= claimed
? zh
->zh_claim_lr_seq
: UINT64_MAX
;
328 uint64_t max_blk_seq
= 0;
329 uint64_t max_lr_seq
= 0;
330 uint64_t blk_count
= 0;
331 uint64_t lr_count
= 0;
332 blkptr_t blk
, next_blk
;
336 bzero(&next_blk
, sizeof (blkptr_t
));
339 * Old logs didn't record the maximum zh_claim_lr_seq.
341 if (!(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
342 claim_lr_seq
= UINT64_MAX
;
345 * Starting at the block pointed to by zh_log we read the log chain.
346 * For each block in the chain we strongly check that block to
347 * ensure its validity. We stop when an invalid block is found.
348 * For each block pointer in the chain we call parse_blk_func().
349 * For each record in each valid block we call parse_lr_func().
350 * If the log has been claimed, stop if we encounter a sequence
351 * number greater than the highest claimed sequence number.
353 lrbuf
= zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE
);
354 zil_bp_tree_init(zilog
);
356 for (blk
= zh
->zh_log
; !BP_IS_HOLE(&blk
); blk
= next_blk
) {
357 uint64_t blk_seq
= blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
361 if (blk_seq
> claim_blk_seq
)
364 error
= parse_blk_func(zilog
, &blk
, arg
, txg
);
367 ASSERT3U(max_blk_seq
, <, blk_seq
);
368 max_blk_seq
= blk_seq
;
371 if (max_lr_seq
== claim_lr_seq
&& max_blk_seq
== claim_blk_seq
)
374 error
= zil_read_log_block(zilog
, decrypt
, &blk
, &next_blk
,
379 for (lrp
= lrbuf
; lrp
< end
; lrp
+= reclen
) {
380 lr_t
*lr
= (lr_t
*)lrp
;
381 reclen
= lr
->lrc_reclen
;
382 ASSERT3U(reclen
, >=, sizeof (lr_t
));
383 if (lr
->lrc_seq
> claim_lr_seq
)
386 error
= parse_lr_func(zilog
, lr
, arg
, txg
);
389 ASSERT3U(max_lr_seq
, <, lr
->lrc_seq
);
390 max_lr_seq
= lr
->lrc_seq
;
395 zilog
->zl_parse_error
= error
;
396 zilog
->zl_parse_blk_seq
= max_blk_seq
;
397 zilog
->zl_parse_lr_seq
= max_lr_seq
;
398 zilog
->zl_parse_blk_count
= blk_count
;
399 zilog
->zl_parse_lr_count
= lr_count
;
401 ASSERT(!claimed
|| !(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
) ||
402 (max_blk_seq
== claim_blk_seq
&& max_lr_seq
== claim_lr_seq
) ||
403 (decrypt
&& error
== EIO
));
405 zil_bp_tree_fini(zilog
);
406 zio_buf_free(lrbuf
, SPA_OLD_MAXBLOCKSIZE
);
412 zil_claim_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t first_txg
)
415 * Claim log block if not already committed and not already claimed.
416 * If tx == NULL, just verify that the block is claimable.
418 if (BP_IS_HOLE(bp
) || bp
->blk_birth
< first_txg
||
419 zil_bp_tree_add(zilog
, bp
) != 0)
422 return (zio_wait(zio_claim(NULL
, zilog
->zl_spa
,
423 tx
== NULL
? 0 : first_txg
, bp
, spa_claim_notify
, NULL
,
424 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
)));
428 zil_claim_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t first_txg
)
430 lr_write_t
*lr
= (lr_write_t
*)lrc
;
433 if (lrc
->lrc_txtype
!= TX_WRITE
)
437 * If the block is not readable, don't claim it. This can happen
438 * in normal operation when a log block is written to disk before
439 * some of the dmu_sync() blocks it points to. In this case, the
440 * transaction cannot have been committed to anyone (we would have
441 * waited for all writes to be stable first), so it is semantically
442 * correct to declare this the end of the log.
444 if (lr
->lr_blkptr
.blk_birth
>= first_txg
) {
445 error
= zil_read_log_data(zilog
, lr
, NULL
);
450 return (zil_claim_log_block(zilog
, &lr
->lr_blkptr
, tx
, first_txg
));
455 zil_free_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t claim_txg
)
457 zio_free_zil(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
463 zil_free_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t claim_txg
)
465 lr_write_t
*lr
= (lr_write_t
*)lrc
;
466 blkptr_t
*bp
= &lr
->lr_blkptr
;
469 * If we previously claimed it, we need to free it.
471 if (claim_txg
!= 0 && lrc
->lrc_txtype
== TX_WRITE
&&
472 bp
->blk_birth
>= claim_txg
&& zil_bp_tree_add(zilog
, bp
) == 0 &&
474 zio_free(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
480 zil_alloc_lwb(zilog_t
*zilog
, blkptr_t
*bp
, boolean_t slog
, uint64_t txg
,
485 lwb
= kmem_cache_alloc(zil_lwb_cache
, KM_SLEEP
);
486 lwb
->lwb_zilog
= zilog
;
488 lwb
->lwb_fastwrite
= fastwrite
;
489 lwb
->lwb_slog
= slog
;
490 lwb
->lwb_buf
= zio_buf_alloc(BP_GET_LSIZE(bp
));
491 lwb
->lwb_max_txg
= txg
;
494 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
495 lwb
->lwb_nused
= sizeof (zil_chain_t
);
496 lwb
->lwb_sz
= BP_GET_LSIZE(bp
);
499 lwb
->lwb_sz
= BP_GET_LSIZE(bp
) - sizeof (zil_chain_t
);
502 mutex_enter(&zilog
->zl_lock
);
503 list_insert_tail(&zilog
->zl_lwb_list
, lwb
);
504 mutex_exit(&zilog
->zl_lock
);
510 * Called when we create in-memory log transactions so that we know
511 * to cleanup the itxs at the end of spa_sync().
514 zilog_dirty(zilog_t
*zilog
, uint64_t txg
)
516 dsl_pool_t
*dp
= zilog
->zl_dmu_pool
;
517 dsl_dataset_t
*ds
= dmu_objset_ds(zilog
->zl_os
);
519 if (ds
->ds_is_snapshot
)
520 panic("dirtying snapshot!");
522 if (txg_list_add(&dp
->dp_dirty_zilogs
, zilog
, txg
)) {
523 /* up the hold count until we can be written out */
524 dmu_buf_add_ref(ds
->ds_dbuf
, zilog
);
529 * Determine if the zil is dirty in the specified txg. Callers wanting to
530 * ensure that the dirty state does not change must hold the itxg_lock for
531 * the specified txg. Holding the lock will ensure that the zil cannot be
532 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
536 zilog_is_dirty_in_txg(zilog_t
*zilog
, uint64_t txg
)
538 dsl_pool_t
*dp
= zilog
->zl_dmu_pool
;
540 if (txg_list_member(&dp
->dp_dirty_zilogs
, zilog
, txg
& TXG_MASK
))
546 * Determine if the zil is dirty. The zil is considered dirty if it has
547 * any pending itx records that have not been cleaned by zil_clean().
550 zilog_is_dirty(zilog_t
*zilog
)
552 dsl_pool_t
*dp
= zilog
->zl_dmu_pool
;
554 for (int t
= 0; t
< TXG_SIZE
; t
++) {
555 if (txg_list_member(&dp
->dp_dirty_zilogs
, zilog
, t
))
562 * Create an on-disk intent log.
565 zil_create(zilog_t
*zilog
)
567 const zil_header_t
*zh
= zilog
->zl_header
;
573 boolean_t fastwrite
= FALSE
;
574 boolean_t slog
= FALSE
;
577 * Wait for any previous destroy to complete.
579 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
581 ASSERT(zh
->zh_claim_txg
== 0);
582 ASSERT(zh
->zh_replay_seq
== 0);
587 * Allocate an initial log block if:
588 * - there isn't one already
589 * - the existing block is the wrong endianness
591 if (BP_IS_HOLE(&blk
) || BP_SHOULD_BYTESWAP(&blk
)) {
592 tx
= dmu_tx_create(zilog
->zl_os
);
593 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
594 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
595 txg
= dmu_tx_get_txg(tx
);
597 if (!BP_IS_HOLE(&blk
)) {
598 zio_free_zil(zilog
->zl_spa
, txg
, &blk
);
602 error
= zio_alloc_zil(zilog
->zl_spa
, zilog
->zl_os
, txg
, &blk
,
603 ZIL_MIN_BLKSZ
, &slog
);
607 zil_init_log_chain(zilog
, &blk
);
611 * Allocate a log write buffer (lwb) for the first log block.
614 lwb
= zil_alloc_lwb(zilog
, &blk
, slog
, txg
, fastwrite
);
617 * If we just allocated the first log block, commit our transaction
618 * and wait for zil_sync() to stuff the block poiner into zh_log.
619 * (zh is part of the MOS, so we cannot modify it in open context.)
623 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
626 ASSERT(bcmp(&blk
, &zh
->zh_log
, sizeof (blk
)) == 0);
632 * In one tx, free all log blocks and clear the log header.
633 * If keep_first is set, then we're replaying a log with no content.
634 * We want to keep the first block, however, so that the first
635 * synchronous transaction doesn't require a txg_wait_synced()
636 * in zil_create(). We don't need to txg_wait_synced() here either
637 * when keep_first is set, because both zil_create() and zil_destroy()
638 * will wait for any in-progress destroys to complete.
641 zil_destroy(zilog_t
*zilog
, boolean_t keep_first
)
643 const zil_header_t
*zh
= zilog
->zl_header
;
649 * Wait for any previous destroy to complete.
651 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
653 zilog
->zl_old_header
= *zh
; /* debugging aid */
655 if (BP_IS_HOLE(&zh
->zh_log
))
658 tx
= dmu_tx_create(zilog
->zl_os
);
659 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
660 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
661 txg
= dmu_tx_get_txg(tx
);
663 mutex_enter(&zilog
->zl_lock
);
665 ASSERT3U(zilog
->zl_destroy_txg
, <, txg
);
666 zilog
->zl_destroy_txg
= txg
;
667 zilog
->zl_keep_first
= keep_first
;
669 if (!list_is_empty(&zilog
->zl_lwb_list
)) {
670 ASSERT(zh
->zh_claim_txg
== 0);
672 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
673 ASSERT(lwb
->lwb_zio
== NULL
);
674 if (lwb
->lwb_fastwrite
)
675 metaslab_fastwrite_unmark(zilog
->zl_spa
,
677 list_remove(&zilog
->zl_lwb_list
, lwb
);
678 if (lwb
->lwb_buf
!= NULL
)
679 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
680 zio_free_zil(zilog
->zl_spa
, txg
, &lwb
->lwb_blk
);
681 kmem_cache_free(zil_lwb_cache
, lwb
);
683 } else if (!keep_first
) {
684 zil_destroy_sync(zilog
, tx
);
686 mutex_exit(&zilog
->zl_lock
);
692 zil_destroy_sync(zilog_t
*zilog
, dmu_tx_t
*tx
)
694 ASSERT(list_is_empty(&zilog
->zl_lwb_list
));
695 (void) zil_parse(zilog
, zil_free_log_block
,
696 zil_free_log_record
, tx
, zilog
->zl_header
->zh_claim_txg
, B_FALSE
);
700 zil_claim(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *txarg
)
702 dmu_tx_t
*tx
= txarg
;
703 uint64_t first_txg
= dmu_tx_get_txg(tx
);
709 error
= dmu_objset_own_obj(dp
, ds
->ds_object
,
710 DMU_OST_ANY
, B_FALSE
, B_FALSE
, FTAG
, &os
);
713 * EBUSY indicates that the objset is inconsistent, in which
714 * case it can not have a ZIL.
716 if (error
!= EBUSY
) {
717 cmn_err(CE_WARN
, "can't open objset for %llu, error %u",
718 (unsigned long long)ds
->ds_object
, error
);
724 zilog
= dmu_objset_zil(os
);
725 zh
= zil_header_in_syncing_context(zilog
);
727 if (spa_get_log_state(zilog
->zl_spa
) == SPA_LOG_CLEAR
) {
728 if (!BP_IS_HOLE(&zh
->zh_log
))
729 zio_free_zil(zilog
->zl_spa
, first_txg
, &zh
->zh_log
);
730 BP_ZERO(&zh
->zh_log
);
731 if (os
->os_encrypted
)
732 os
->os_next_write_raw
= B_TRUE
;
733 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
734 dmu_objset_disown(os
, B_FALSE
, FTAG
);
739 * Claim all log blocks if we haven't already done so, and remember
740 * the highest claimed sequence number. This ensures that if we can
741 * read only part of the log now (e.g. due to a missing device),
742 * but we can read the entire log later, we will not try to replay
743 * or destroy beyond the last block we successfully claimed.
745 ASSERT3U(zh
->zh_claim_txg
, <=, first_txg
);
746 if (zh
->zh_claim_txg
== 0 && !BP_IS_HOLE(&zh
->zh_log
)) {
747 (void) zil_parse(zilog
, zil_claim_log_block
,
748 zil_claim_log_record
, tx
, first_txg
, B_FALSE
);
749 zh
->zh_claim_txg
= first_txg
;
750 zh
->zh_claim_blk_seq
= zilog
->zl_parse_blk_seq
;
751 zh
->zh_claim_lr_seq
= zilog
->zl_parse_lr_seq
;
752 if (zilog
->zl_parse_lr_count
|| zilog
->zl_parse_blk_count
> 1)
753 zh
->zh_flags
|= ZIL_REPLAY_NEEDED
;
754 zh
->zh_flags
|= ZIL_CLAIM_LR_SEQ_VALID
;
755 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
758 ASSERT3U(first_txg
, ==, (spa_last_synced_txg(zilog
->zl_spa
) + 1));
759 dmu_objset_disown(os
, B_FALSE
, FTAG
);
764 * Check the log by walking the log chain.
765 * Checksum errors are ok as they indicate the end of the chain.
766 * Any other error (no device or read failure) returns an error.
770 zil_check_log_chain(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *tx
)
779 error
= dmu_objset_from_ds(ds
, &os
);
781 cmn_err(CE_WARN
, "can't open objset %llu, error %d",
782 (unsigned long long)ds
->ds_object
, error
);
786 zilog
= dmu_objset_zil(os
);
787 bp
= (blkptr_t
*)&zilog
->zl_header
->zh_log
;
790 * Check the first block and determine if it's on a log device
791 * which may have been removed or faulted prior to loading this
792 * pool. If so, there's no point in checking the rest of the log
793 * as its content should have already been synced to the pool.
795 if (!BP_IS_HOLE(bp
)) {
797 boolean_t valid
= B_TRUE
;
799 spa_config_enter(os
->os_spa
, SCL_STATE
, FTAG
, RW_READER
);
800 vd
= vdev_lookup_top(os
->os_spa
, DVA_GET_VDEV(&bp
->blk_dva
[0]));
801 if (vd
->vdev_islog
&& vdev_is_dead(vd
))
802 valid
= vdev_log_state_valid(vd
);
803 spa_config_exit(os
->os_spa
, SCL_STATE
, FTAG
);
810 * Because tx == NULL, zil_claim_log_block() will not actually claim
811 * any blocks, but just determine whether it is possible to do so.
812 * In addition to checking the log chain, zil_claim_log_block()
813 * will invoke zio_claim() with a done func of spa_claim_notify(),
814 * which will update spa_max_claim_txg. See spa_load() for details.
816 error
= zil_parse(zilog
, zil_claim_log_block
, zil_claim_log_record
, tx
,
817 zilog
->zl_header
->zh_claim_txg
? -1ULL : spa_first_txg(os
->os_spa
),
820 return ((error
== ECKSUM
|| error
== ENOENT
) ? 0 : error
);
824 zil_vdev_compare(const void *x1
, const void *x2
)
826 const uint64_t v1
= ((zil_vdev_node_t
*)x1
)->zv_vdev
;
827 const uint64_t v2
= ((zil_vdev_node_t
*)x2
)->zv_vdev
;
829 return (AVL_CMP(v1
, v2
));
833 zil_add_block(zilog_t
*zilog
, const blkptr_t
*bp
)
835 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
837 zil_vdev_node_t
*zv
, zvsearch
;
838 int ndvas
= BP_GET_NDVAS(bp
);
841 if (zfs_nocacheflush
)
844 ASSERT(zilog
->zl_writer
);
847 * Even though we're zl_writer, we still need a lock because the
848 * zl_get_data() callbacks may have dmu_sync() done callbacks
849 * that will run concurrently.
851 mutex_enter(&zilog
->zl_vdev_lock
);
852 for (i
= 0; i
< ndvas
; i
++) {
853 zvsearch
.zv_vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
854 if (avl_find(t
, &zvsearch
, &where
) == NULL
) {
855 zv
= kmem_alloc(sizeof (*zv
), KM_SLEEP
);
856 zv
->zv_vdev
= zvsearch
.zv_vdev
;
857 avl_insert(t
, zv
, where
);
860 mutex_exit(&zilog
->zl_vdev_lock
);
864 zil_flush_vdevs(zilog_t
*zilog
)
866 spa_t
*spa
= zilog
->zl_spa
;
867 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
872 ASSERT(zilog
->zl_writer
);
875 * We don't need zl_vdev_lock here because we're the zl_writer,
876 * and all zl_get_data() callbacks are done.
878 if (avl_numnodes(t
) == 0)
881 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
883 zio
= zio_root(spa
, NULL
, NULL
, ZIO_FLAG_CANFAIL
);
885 while ((zv
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
886 vdev_t
*vd
= vdev_lookup_top(spa
, zv
->zv_vdev
);
889 kmem_free(zv
, sizeof (*zv
));
893 * Wait for all the flushes to complete. Not all devices actually
894 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
896 (void) zio_wait(zio
);
898 spa_config_exit(spa
, SCL_STATE
, FTAG
);
902 * Function called when a log block write completes
905 zil_lwb_write_done(zio_t
*zio
)
907 lwb_t
*lwb
= zio
->io_private
;
908 zilog_t
*zilog
= lwb
->lwb_zilog
;
909 dmu_tx_t
*tx
= lwb
->lwb_tx
;
911 ASSERT(BP_GET_COMPRESS(zio
->io_bp
) == ZIO_COMPRESS_OFF
);
912 ASSERT(BP_GET_TYPE(zio
->io_bp
) == DMU_OT_INTENT_LOG
);
913 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
914 ASSERT(BP_GET_BYTEORDER(zio
->io_bp
) == ZFS_HOST_BYTEORDER
);
915 ASSERT(!BP_IS_GANG(zio
->io_bp
));
916 ASSERT(!BP_IS_HOLE(zio
->io_bp
));
917 ASSERT(BP_GET_FILL(zio
->io_bp
) == 0);
920 * Ensure the lwb buffer pointer is cleared before releasing
921 * the txg. If we have had an allocation failure and
922 * the txg is waiting to sync then we want want zil_sync()
923 * to remove the lwb so that it's not picked up as the next new
924 * one in zil_commit_writer(). zil_sync() will only remove
925 * the lwb if lwb_buf is null.
927 abd_put(zio
->io_abd
);
928 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
929 mutex_enter(&zilog
->zl_lock
);
931 lwb
->lwb_fastwrite
= FALSE
;
934 mutex_exit(&zilog
->zl_lock
);
937 * Now that we've written this log block, we have a stable pointer
938 * to the next block in the chain, so it's OK to let the txg in
939 * which we allocated the next block sync.
945 * Initialize the io for a log block.
948 zil_lwb_write_init(zilog_t
*zilog
, lwb_t
*lwb
)
953 SET_BOOKMARK(&zb
, lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
954 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
,
955 lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
957 if (zilog
->zl_root_zio
== NULL
) {
958 zilog
->zl_root_zio
= zio_root(zilog
->zl_spa
, NULL
, NULL
,
962 /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
963 mutex_enter(&zilog
->zl_lock
);
964 if (lwb
->lwb_zio
== NULL
) {
965 abd_t
*lwb_abd
= abd_get_from_buf(lwb
->lwb_buf
,
966 BP_GET_LSIZE(&lwb
->lwb_blk
));
967 if (!lwb
->lwb_fastwrite
) {
968 metaslab_fastwrite_mark(zilog
->zl_spa
, &lwb
->lwb_blk
);
969 lwb
->lwb_fastwrite
= 1;
971 if (!lwb
->lwb_slog
|| zilog
->zl_cur_used
<= zil_slog_bulk
)
972 prio
= ZIO_PRIORITY_SYNC_WRITE
;
974 prio
= ZIO_PRIORITY_ASYNC_WRITE
;
975 lwb
->lwb_zio
= zio_rewrite(zilog
->zl_root_zio
, zilog
->zl_spa
,
976 0, &lwb
->lwb_blk
, lwb_abd
, BP_GET_LSIZE(&lwb
->lwb_blk
),
977 zil_lwb_write_done
, lwb
, prio
,
978 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
|
979 ZIO_FLAG_FASTWRITE
, &zb
);
981 mutex_exit(&zilog
->zl_lock
);
985 * Define a limited set of intent log block sizes.
987 * These must be a multiple of 4KB. Note only the amount used (again
988 * aligned to 4KB) actually gets written. However, we can't always just
989 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
991 uint64_t zil_block_buckets
[] = {
992 4096, /* non TX_WRITE */
993 8192+4096, /* data base */
994 32*1024 + 4096, /* NFS writes */
999 * Start a log block write and advance to the next log block.
1000 * Calls are serialized.
1003 zil_lwb_write_start(zilog_t
*zilog
, lwb_t
*lwb
)
1007 spa_t
*spa
= zilog
->zl_spa
;
1011 uint64_t zil_blksz
, wsz
;
1015 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
1016 zilc
= (zil_chain_t
*)lwb
->lwb_buf
;
1017 bp
= &zilc
->zc_next_blk
;
1019 zilc
= (zil_chain_t
*)(lwb
->lwb_buf
+ lwb
->lwb_sz
);
1020 bp
= &zilc
->zc_next_blk
;
1023 ASSERT(lwb
->lwb_nused
<= lwb
->lwb_sz
);
1026 * Allocate the next block and save its address in this block
1027 * before writing it in order to establish the log chain.
1028 * Note that if the allocation of nlwb synced before we wrote
1029 * the block that points at it (lwb), we'd leak it if we crashed.
1030 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
1031 * We dirty the dataset to ensure that zil_sync() will be called
1032 * to clean up in the event of allocation failure or I/O failure.
1034 tx
= dmu_tx_create(zilog
->zl_os
);
1037 * Since we are not going to create any new dirty data and we can even
1038 * help with clearing the existing dirty data, we should not be subject
1039 * to the dirty data based delays.
1040 * We (ab)use TXG_WAITED to bypass the delay mechanism.
1041 * One side effect from using TXG_WAITED is that dmu_tx_assign() can
1042 * fail if the pool is suspended. Those are dramatic circumstances,
1043 * so we return NULL to signal that the normal ZIL processing is not
1044 * possible and txg_wait_synced() should be used to ensure that the data
1047 error
= dmu_tx_assign(tx
, TXG_WAITED
);
1049 ASSERT3S(error
, ==, EIO
);
1053 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
1054 txg
= dmu_tx_get_txg(tx
);
1059 * Log blocks are pre-allocated. Here we select the size of the next
1060 * block, based on size used in the last block.
1061 * - first find the smallest bucket that will fit the block from a
1062 * limited set of block sizes. This is because it's faster to write
1063 * blocks allocated from the same metaslab as they are adjacent or
1065 * - next find the maximum from the new suggested size and an array of
1066 * previous sizes. This lessens a picket fence effect of wrongly
1067 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
1070 * Note we only write what is used, but we can't just allocate
1071 * the maximum block size because we can exhaust the available
1074 zil_blksz
= zilog
->zl_cur_used
+ sizeof (zil_chain_t
);
1075 for (i
= 0; zil_blksz
> zil_block_buckets
[i
]; i
++)
1077 zil_blksz
= zil_block_buckets
[i
];
1078 if (zil_blksz
== UINT64_MAX
)
1079 zil_blksz
= SPA_OLD_MAXBLOCKSIZE
;
1080 zilog
->zl_prev_blks
[zilog
->zl_prev_rotor
] = zil_blksz
;
1081 for (i
= 0; i
< ZIL_PREV_BLKS
; i
++)
1082 zil_blksz
= MAX(zil_blksz
, zilog
->zl_prev_blks
[i
]);
1083 zilog
->zl_prev_rotor
= (zilog
->zl_prev_rotor
+ 1) & (ZIL_PREV_BLKS
- 1);
1086 error
= zio_alloc_zil(spa
, zilog
->zl_os
, txg
, bp
, zil_blksz
, &slog
);
1088 ZIL_STAT_BUMP(zil_itx_metaslab_slog_count
);
1089 ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes
, lwb
->lwb_nused
);
1091 ZIL_STAT_BUMP(zil_itx_metaslab_normal_count
);
1092 ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes
, lwb
->lwb_nused
);
1095 ASSERT3U(bp
->blk_birth
, ==, txg
);
1096 bp
->blk_cksum
= lwb
->lwb_blk
.blk_cksum
;
1097 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]++;
1100 * Allocate a new log write buffer (lwb).
1102 nlwb
= zil_alloc_lwb(zilog
, bp
, slog
, txg
, TRUE
);
1104 /* Record the block for later vdev flushing */
1105 zil_add_block(zilog
, &lwb
->lwb_blk
);
1108 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
1109 /* For Slim ZIL only write what is used. */
1110 wsz
= P2ROUNDUP_TYPED(lwb
->lwb_nused
, ZIL_MIN_BLKSZ
, uint64_t);
1111 ASSERT3U(wsz
, <=, lwb
->lwb_sz
);
1112 zio_shrink(lwb
->lwb_zio
, wsz
);
1119 zilc
->zc_nused
= lwb
->lwb_nused
;
1120 zilc
->zc_eck
.zec_cksum
= lwb
->lwb_blk
.blk_cksum
;
1123 * clear unused data for security
1125 bzero(lwb
->lwb_buf
+ lwb
->lwb_nused
, wsz
- lwb
->lwb_nused
);
1127 zio_nowait(lwb
->lwb_zio
); /* Kick off the write for the old log block */
1130 * If there was an allocation failure then nlwb will be null which
1131 * forces a txg_wait_synced().
1137 zil_lwb_commit(zilog_t
*zilog
, itx_t
*itx
, lwb_t
*lwb
)
1140 lr_write_t
*lrwb
, *lrw
;
1142 uint64_t dlen
, dnow
, lwb_sp
, reclen
, txg
;
1147 ASSERT(lwb
->lwb_buf
!= NULL
);
1149 lrc
= &itx
->itx_lr
; /* Common log record inside itx. */
1150 lrw
= (lr_write_t
*)lrc
; /* Write log record inside itx. */
1151 if (lrc
->lrc_txtype
== TX_WRITE
&& itx
->itx_wr_state
== WR_NEED_COPY
) {
1152 dlen
= P2ROUNDUP_TYPED(
1153 lrw
->lr_length
, sizeof (uint64_t), uint64_t);
1157 reclen
= lrc
->lrc_reclen
;
1158 zilog
->zl_cur_used
+= (reclen
+ dlen
);
1161 zil_lwb_write_init(zilog
, lwb
);
1165 * If this record won't fit in the current log block, start a new one.
1166 * For WR_NEED_COPY optimize layout for minimal number of chunks.
1168 lwb_sp
= lwb
->lwb_sz
- lwb
->lwb_nused
;
1169 if (reclen
> lwb_sp
|| (reclen
+ dlen
> lwb_sp
&&
1170 lwb_sp
< ZIL_MAX_WASTE_SPACE
&& (dlen
% ZIL_MAX_LOG_DATA
== 0 ||
1171 lwb_sp
< reclen
+ dlen
% ZIL_MAX_LOG_DATA
))) {
1172 lwb
= zil_lwb_write_start(zilog
, lwb
);
1175 zil_lwb_write_init(zilog
, lwb
);
1176 ASSERT(LWB_EMPTY(lwb
));
1177 lwb_sp
= lwb
->lwb_sz
- lwb
->lwb_nused
;
1178 ASSERT3U(reclen
+ MIN(dlen
, sizeof (uint64_t)), <=, lwb_sp
);
1181 dnow
= MIN(dlen
, lwb_sp
- reclen
);
1182 lr_buf
= lwb
->lwb_buf
+ lwb
->lwb_nused
;
1183 bcopy(lrc
, lr_buf
, reclen
);
1184 lrcb
= (lr_t
*)lr_buf
; /* Like lrc, but inside lwb. */
1185 lrwb
= (lr_write_t
*)lrcb
; /* Like lrw, but inside lwb. */
1187 ZIL_STAT_BUMP(zil_itx_count
);
1190 * If it's a write, fetch the data or get its blkptr as appropriate.
1192 if (lrc
->lrc_txtype
== TX_WRITE
) {
1193 if (txg
> spa_freeze_txg(zilog
->zl_spa
))
1194 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1195 if (itx
->itx_wr_state
== WR_COPIED
) {
1196 ZIL_STAT_BUMP(zil_itx_copied_count
);
1197 ZIL_STAT_INCR(zil_itx_copied_bytes
, lrw
->lr_length
);
1202 if (itx
->itx_wr_state
== WR_NEED_COPY
) {
1203 dbuf
= lr_buf
+ reclen
;
1204 lrcb
->lrc_reclen
+= dnow
;
1205 if (lrwb
->lr_length
> dnow
)
1206 lrwb
->lr_length
= dnow
;
1207 lrw
->lr_offset
+= dnow
;
1208 lrw
->lr_length
-= dnow
;
1209 ZIL_STAT_BUMP(zil_itx_needcopy_count
);
1210 ZIL_STAT_INCR(zil_itx_needcopy_bytes
,
1213 ASSERT(itx
->itx_wr_state
== WR_INDIRECT
);
1215 ZIL_STAT_BUMP(zil_itx_indirect_count
);
1216 ZIL_STAT_INCR(zil_itx_indirect_bytes
,
1219 error
= zilog
->zl_get_data(
1220 itx
->itx_private
, lrwb
, dbuf
, lwb
->lwb_zio
);
1222 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1226 ASSERT(error
== ENOENT
|| error
== EEXIST
||
1234 * We're actually making an entry, so update lrc_seq to be the
1235 * log record sequence number. Note that this is generally not
1236 * equal to the itx sequence number because not all transactions
1237 * are synchronous, and sometimes spa_sync() gets there first.
1239 lrcb
->lrc_seq
= ++zilog
->zl_lr_seq
; /* we are single threaded */
1240 lwb
->lwb_nused
+= reclen
+ dnow
;
1241 lwb
->lwb_max_txg
= MAX(lwb
->lwb_max_txg
, txg
);
1242 ASSERT3U(lwb
->lwb_nused
, <=, lwb
->lwb_sz
);
1243 ASSERT0(P2PHASE(lwb
->lwb_nused
, sizeof (uint64_t)));
1247 zilog
->zl_cur_used
+= reclen
;
1255 zil_itx_create(uint64_t txtype
, size_t lrsize
)
1259 lrsize
= P2ROUNDUP_TYPED(lrsize
, sizeof (uint64_t), size_t);
1261 itx
= zio_data_buf_alloc(offsetof(itx_t
, itx_lr
) + lrsize
);
1262 itx
->itx_lr
.lrc_txtype
= txtype
;
1263 itx
->itx_lr
.lrc_reclen
= lrsize
;
1264 itx
->itx_lr
.lrc_seq
= 0; /* defensive */
1265 itx
->itx_sync
= B_TRUE
; /* default is synchronous */
1266 itx
->itx_callback
= NULL
;
1267 itx
->itx_callback_data
= NULL
;
1273 zil_itx_destroy(itx_t
*itx
)
1275 zio_data_buf_free(itx
, offsetof(itx_t
, itx_lr
)+itx
->itx_lr
.lrc_reclen
);
1279 * Free up the sync and async itxs. The itxs_t has already been detached
1280 * so no locks are needed.
1283 zil_itxg_clean(itxs_t
*itxs
)
1289 itx_async_node_t
*ian
;
1291 list
= &itxs
->i_sync_list
;
1292 while ((itx
= list_head(list
)) != NULL
) {
1293 if (itx
->itx_callback
!= NULL
)
1294 itx
->itx_callback(itx
->itx_callback_data
);
1295 list_remove(list
, itx
);
1296 zil_itx_destroy(itx
);
1300 t
= &itxs
->i_async_tree
;
1301 while ((ian
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
1302 list
= &ian
->ia_list
;
1303 while ((itx
= list_head(list
)) != NULL
) {
1304 if (itx
->itx_callback
!= NULL
)
1305 itx
->itx_callback(itx
->itx_callback_data
);
1306 list_remove(list
, itx
);
1307 zil_itx_destroy(itx
);
1310 kmem_free(ian
, sizeof (itx_async_node_t
));
1314 kmem_free(itxs
, sizeof (itxs_t
));
1318 zil_aitx_compare(const void *x1
, const void *x2
)
1320 const uint64_t o1
= ((itx_async_node_t
*)x1
)->ia_foid
;
1321 const uint64_t o2
= ((itx_async_node_t
*)x2
)->ia_foid
;
1323 return (AVL_CMP(o1
, o2
));
1327 * Remove all async itx with the given oid.
1330 zil_remove_async(zilog_t
*zilog
, uint64_t oid
)
1333 itx_async_node_t
*ian
;
1340 list_create(&clean_list
, sizeof (itx_t
), offsetof(itx_t
, itx_node
));
1342 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1345 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1347 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1348 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1350 mutex_enter(&itxg
->itxg_lock
);
1351 if (itxg
->itxg_txg
!= txg
) {
1352 mutex_exit(&itxg
->itxg_lock
);
1357 * Locate the object node and append its list.
1359 t
= &itxg
->itxg_itxs
->i_async_tree
;
1360 ian
= avl_find(t
, &oid
, &where
);
1362 list_move_tail(&clean_list
, &ian
->ia_list
);
1363 mutex_exit(&itxg
->itxg_lock
);
1365 while ((itx
= list_head(&clean_list
)) != NULL
) {
1366 if (itx
->itx_callback
!= NULL
)
1367 itx
->itx_callback(itx
->itx_callback_data
);
1368 list_remove(&clean_list
, itx
);
1369 zil_itx_destroy(itx
);
1371 list_destroy(&clean_list
);
1375 zil_itx_assign(zilog_t
*zilog
, itx_t
*itx
, dmu_tx_t
*tx
)
1379 itxs_t
*itxs
, *clean
= NULL
;
1382 * Object ids can be re-instantiated in the next txg so
1383 * remove any async transactions to avoid future leaks.
1384 * This can happen if a fsync occurs on the re-instantiated
1385 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1386 * the new file data and flushes a write record for the old object.
1388 if ((itx
->itx_lr
.lrc_txtype
& ~TX_CI
) == TX_REMOVE
)
1389 zil_remove_async(zilog
, itx
->itx_oid
);
1392 * Ensure the data of a renamed file is committed before the rename.
1394 if ((itx
->itx_lr
.lrc_txtype
& ~TX_CI
) == TX_RENAME
)
1395 zil_async_to_sync(zilog
, itx
->itx_oid
);
1397 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
)
1400 txg
= dmu_tx_get_txg(tx
);
1402 itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1403 mutex_enter(&itxg
->itxg_lock
);
1404 itxs
= itxg
->itxg_itxs
;
1405 if (itxg
->itxg_txg
!= txg
) {
1408 * The zil_clean callback hasn't got around to cleaning
1409 * this itxg. Save the itxs for release below.
1410 * This should be rare.
1412 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
1413 "txg %llu", itxg
->itxg_txg
);
1414 clean
= itxg
->itxg_itxs
;
1416 itxg
->itxg_txg
= txg
;
1417 itxs
= itxg
->itxg_itxs
= kmem_zalloc(sizeof (itxs_t
),
1420 list_create(&itxs
->i_sync_list
, sizeof (itx_t
),
1421 offsetof(itx_t
, itx_node
));
1422 avl_create(&itxs
->i_async_tree
, zil_aitx_compare
,
1423 sizeof (itx_async_node_t
),
1424 offsetof(itx_async_node_t
, ia_node
));
1426 if (itx
->itx_sync
) {
1427 list_insert_tail(&itxs
->i_sync_list
, itx
);
1429 avl_tree_t
*t
= &itxs
->i_async_tree
;
1431 LR_FOID_GET_OBJ(((lr_ooo_t
*)&itx
->itx_lr
)->lr_foid
);
1432 itx_async_node_t
*ian
;
1435 ian
= avl_find(t
, &foid
, &where
);
1437 ian
= kmem_alloc(sizeof (itx_async_node_t
),
1439 list_create(&ian
->ia_list
, sizeof (itx_t
),
1440 offsetof(itx_t
, itx_node
));
1441 ian
->ia_foid
= foid
;
1442 avl_insert(t
, ian
, where
);
1444 list_insert_tail(&ian
->ia_list
, itx
);
1447 itx
->itx_lr
.lrc_txg
= dmu_tx_get_txg(tx
);
1448 zilog_dirty(zilog
, txg
);
1449 mutex_exit(&itxg
->itxg_lock
);
1451 /* Release the old itxs now we've dropped the lock */
1453 zil_itxg_clean(clean
);
1457 * If there are any in-memory intent log transactions which have now been
1458 * synced then start up a taskq to free them. We should only do this after we
1459 * have written out the uberblocks (i.e. txg has been comitted) so that
1460 * don't inadvertently clean out in-memory log records that would be required
1464 zil_clean(zilog_t
*zilog
, uint64_t synced_txg
)
1466 itxg_t
*itxg
= &zilog
->zl_itxg
[synced_txg
& TXG_MASK
];
1469 mutex_enter(&itxg
->itxg_lock
);
1470 if (itxg
->itxg_itxs
== NULL
|| itxg
->itxg_txg
== ZILTEST_TXG
) {
1471 mutex_exit(&itxg
->itxg_lock
);
1474 ASSERT3U(itxg
->itxg_txg
, <=, synced_txg
);
1475 ASSERT3U(itxg
->itxg_txg
, !=, 0);
1476 clean_me
= itxg
->itxg_itxs
;
1477 itxg
->itxg_itxs
= NULL
;
1479 mutex_exit(&itxg
->itxg_lock
);
1481 * Preferably start a task queue to free up the old itxs but
1482 * if taskq_dispatch can't allocate resources to do that then
1483 * free it in-line. This should be rare. Note, using TQ_SLEEP
1484 * created a bad performance problem.
1486 ASSERT3P(zilog
->zl_dmu_pool
, !=, NULL
);
1487 ASSERT3P(zilog
->zl_dmu_pool
->dp_zil_clean_taskq
, !=, NULL
);
1488 taskqid_t id
= taskq_dispatch(zilog
->zl_dmu_pool
->dp_zil_clean_taskq
,
1489 (void (*)(void *))zil_itxg_clean
, clean_me
, TQ_NOSLEEP
);
1490 if (id
== TASKQID_INVALID
)
1491 zil_itxg_clean(clean_me
);
1495 * Get the list of itxs to commit into zl_itx_commit_list.
1498 zil_get_commit_list(zilog_t
*zilog
)
1501 list_t
*commit_list
= &zilog
->zl_itx_commit_list
;
1503 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1506 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1509 * This is inherently racy, since there is nothing to prevent
1510 * the last synced txg from changing. That's okay since we'll
1511 * only commit things in the future.
1513 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1514 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1516 mutex_enter(&itxg
->itxg_lock
);
1517 if (itxg
->itxg_txg
!= txg
) {
1518 mutex_exit(&itxg
->itxg_lock
);
1523 * If we're adding itx records to the zl_itx_commit_list,
1524 * then the zil better be dirty in this "txg". We can assert
1525 * that here since we're holding the itxg_lock which will
1526 * prevent spa_sync from cleaning it. Once we add the itxs
1527 * to the zl_itx_commit_list we must commit it to disk even
1528 * if it's unnecessary (i.e. the txg was synced).
1530 ASSERT(zilog_is_dirty_in_txg(zilog
, txg
) ||
1531 spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
);
1532 list_move_tail(commit_list
, &itxg
->itxg_itxs
->i_sync_list
);
1534 mutex_exit(&itxg
->itxg_lock
);
1539 * Move the async itxs for a specified object to commit into sync lists.
1542 zil_async_to_sync(zilog_t
*zilog
, uint64_t foid
)
1545 itx_async_node_t
*ian
;
1549 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1552 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1555 * This is inherently racy, since there is nothing to prevent
1556 * the last synced txg from changing.
1558 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1559 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1561 mutex_enter(&itxg
->itxg_lock
);
1562 if (itxg
->itxg_txg
!= txg
) {
1563 mutex_exit(&itxg
->itxg_lock
);
1568 * If a foid is specified then find that node and append its
1569 * list. Otherwise walk the tree appending all the lists
1570 * to the sync list. We add to the end rather than the
1571 * beginning to ensure the create has happened.
1573 t
= &itxg
->itxg_itxs
->i_async_tree
;
1575 ian
= avl_find(t
, &foid
, &where
);
1577 list_move_tail(&itxg
->itxg_itxs
->i_sync_list
,
1581 void *cookie
= NULL
;
1583 while ((ian
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
1584 list_move_tail(&itxg
->itxg_itxs
->i_sync_list
,
1586 list_destroy(&ian
->ia_list
);
1587 kmem_free(ian
, sizeof (itx_async_node_t
));
1590 mutex_exit(&itxg
->itxg_lock
);
1595 zil_commit_writer(zilog_t
*zilog
)
1600 spa_t
*spa
= zilog
->zl_spa
;
1603 ASSERT(zilog
->zl_root_zio
== NULL
);
1605 mutex_exit(&zilog
->zl_lock
);
1607 zil_get_commit_list(zilog
);
1610 * Return if there's nothing to commit before we dirty the fs by
1611 * calling zil_create().
1613 if (list_head(&zilog
->zl_itx_commit_list
) == NULL
) {
1614 mutex_enter(&zilog
->zl_lock
);
1618 if (zilog
->zl_suspend
) {
1621 lwb
= list_tail(&zilog
->zl_lwb_list
);
1623 lwb
= zil_create(zilog
);
1626 DTRACE_PROBE1(zil__cw1
, zilog_t
*, zilog
);
1627 for (itx
= list_head(&zilog
->zl_itx_commit_list
); itx
!= NULL
;
1628 itx
= list_next(&zilog
->zl_itx_commit_list
, itx
)) {
1629 txg
= itx
->itx_lr
.lrc_txg
;
1630 ASSERT3U(txg
, !=, 0);
1633 * This is inherently racy and may result in us writing
1634 * out a log block for a txg that was just synced. This is
1635 * ok since we'll end cleaning up that log block the next
1636 * time we call zil_sync().
1638 if (txg
> spa_last_synced_txg(spa
) || txg
> spa_freeze_txg(spa
))
1639 lwb
= zil_lwb_commit(zilog
, itx
, lwb
);
1641 DTRACE_PROBE1(zil__cw2
, zilog_t
*, zilog
);
1643 /* write the last block out */
1644 if (lwb
!= NULL
&& lwb
->lwb_zio
!= NULL
)
1645 lwb
= zil_lwb_write_start(zilog
, lwb
);
1647 zilog
->zl_cur_used
= 0;
1650 * Wait if necessary for the log blocks to be on stable storage.
1652 if (zilog
->zl_root_zio
) {
1653 error
= zio_wait(zilog
->zl_root_zio
);
1654 zilog
->zl_root_zio
= NULL
;
1655 zil_flush_vdevs(zilog
);
1658 if (error
|| lwb
== NULL
)
1659 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1661 while ((itx
= list_head(&zilog
->zl_itx_commit_list
))) {
1662 txg
= itx
->itx_lr
.lrc_txg
;
1665 if (itx
->itx_callback
!= NULL
)
1666 itx
->itx_callback(itx
->itx_callback_data
);
1667 list_remove(&zilog
->zl_itx_commit_list
, itx
);
1668 zil_itx_destroy(itx
);
1671 mutex_enter(&zilog
->zl_lock
);
1674 * Remember the highest committed log sequence number for ztest.
1675 * We only update this value when all the log writes succeeded,
1676 * because ztest wants to ASSERT that it got the whole log chain.
1678 if (error
== 0 && lwb
!= NULL
)
1679 zilog
->zl_commit_lr_seq
= zilog
->zl_lr_seq
;
1683 * Commit zfs transactions to stable storage.
1684 * If foid is 0 push out all transactions, otherwise push only those
1685 * for that object or might reference that object.
1687 * itxs are committed in batches. In a heavily stressed zil there will be
1688 * a commit writer thread who is writing out a bunch of itxs to the log
1689 * for a set of committing threads (cthreads) in the same batch as the writer.
1690 * Those cthreads are all waiting on the same cv for that batch.
1692 * There will also be a different and growing batch of threads that are
1693 * waiting to commit (qthreads). When the committing batch completes
1694 * a transition occurs such that the cthreads exit and the qthreads become
1695 * cthreads. One of the new cthreads becomes the writer thread for the
1696 * batch. Any new threads arriving become new qthreads.
1698 * Only 2 condition variables are needed and there's no transition
1699 * between the two cvs needed. They just flip-flop between qthreads
1702 * Using this scheme we can efficiently wakeup up only those threads
1703 * that have been committed.
1706 zil_commit(zilog_t
*zilog
, uint64_t foid
)
1710 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
)
1713 ZIL_STAT_BUMP(zil_commit_count
);
1715 /* move the async itxs for the foid to the sync queues */
1716 zil_async_to_sync(zilog
, foid
);
1718 mutex_enter(&zilog
->zl_lock
);
1719 mybatch
= zilog
->zl_next_batch
;
1720 while (zilog
->zl_writer
) {
1721 cv_wait(&zilog
->zl_cv_batch
[mybatch
& 1], &zilog
->zl_lock
);
1722 if (mybatch
<= zilog
->zl_com_batch
) {
1723 mutex_exit(&zilog
->zl_lock
);
1728 zilog
->zl_next_batch
++;
1729 zilog
->zl_writer
= B_TRUE
;
1730 ZIL_STAT_BUMP(zil_commit_writer_count
);
1731 zil_commit_writer(zilog
);
1732 zilog
->zl_com_batch
= mybatch
;
1733 zilog
->zl_writer
= B_FALSE
;
1735 /* wake up one thread to become the next writer */
1736 cv_signal(&zilog
->zl_cv_batch
[(mybatch
+1) & 1]);
1738 /* wake up all threads waiting for this batch to be committed */
1739 cv_broadcast(&zilog
->zl_cv_batch
[mybatch
& 1]);
1741 mutex_exit(&zilog
->zl_lock
);
1745 * Called in syncing context to free committed log blocks and update log header.
1748 zil_sync(zilog_t
*zilog
, dmu_tx_t
*tx
)
1750 zil_header_t
*zh
= zil_header_in_syncing_context(zilog
);
1751 uint64_t txg
= dmu_tx_get_txg(tx
);
1752 spa_t
*spa
= zilog
->zl_spa
;
1753 uint64_t *replayed_seq
= &zilog
->zl_replayed_seq
[txg
& TXG_MASK
];
1757 * We don't zero out zl_destroy_txg, so make sure we don't try
1758 * to destroy it twice.
1760 if (spa_sync_pass(spa
) != 1)
1763 mutex_enter(&zilog
->zl_lock
);
1765 ASSERT(zilog
->zl_stop_sync
== 0);
1767 if (*replayed_seq
!= 0) {
1768 ASSERT(zh
->zh_replay_seq
< *replayed_seq
);
1769 zh
->zh_replay_seq
= *replayed_seq
;
1773 if (zilog
->zl_destroy_txg
== txg
) {
1774 blkptr_t blk
= zh
->zh_log
;
1776 ASSERT(list_head(&zilog
->zl_lwb_list
) == NULL
);
1778 bzero(zh
, sizeof (zil_header_t
));
1779 bzero(zilog
->zl_replayed_seq
, sizeof (zilog
->zl_replayed_seq
));
1781 if (zilog
->zl_keep_first
) {
1783 * If this block was part of log chain that couldn't
1784 * be claimed because a device was missing during
1785 * zil_claim(), but that device later returns,
1786 * then this block could erroneously appear valid.
1787 * To guard against this, assign a new GUID to the new
1788 * log chain so it doesn't matter what blk points to.
1790 zil_init_log_chain(zilog
, &blk
);
1795 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
1796 zh
->zh_log
= lwb
->lwb_blk
;
1797 if (lwb
->lwb_buf
!= NULL
|| lwb
->lwb_max_txg
> txg
)
1800 ASSERT(lwb
->lwb_zio
== NULL
);
1802 list_remove(&zilog
->zl_lwb_list
, lwb
);
1803 zio_free_zil(spa
, txg
, &lwb
->lwb_blk
);
1804 kmem_cache_free(zil_lwb_cache
, lwb
);
1807 * If we don't have anything left in the lwb list then
1808 * we've had an allocation failure and we need to zero
1809 * out the zil_header blkptr so that we don't end
1810 * up freeing the same block twice.
1812 if (list_head(&zilog
->zl_lwb_list
) == NULL
)
1813 BP_ZERO(&zh
->zh_log
);
1817 * Remove fastwrite on any blocks that have been pre-allocated for
1818 * the next commit. This prevents fastwrite counter pollution by
1819 * unused, long-lived LWBs.
1821 for (; lwb
!= NULL
; lwb
= list_next(&zilog
->zl_lwb_list
, lwb
)) {
1822 if (lwb
->lwb_fastwrite
&& !lwb
->lwb_zio
) {
1823 metaslab_fastwrite_unmark(zilog
->zl_spa
, &lwb
->lwb_blk
);
1824 lwb
->lwb_fastwrite
= 0;
1828 mutex_exit(&zilog
->zl_lock
);
1834 zil_lwb_cache
= kmem_cache_create("zil_lwb_cache",
1835 sizeof (struct lwb
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
1837 zil_ksp
= kstat_create("zfs", 0, "zil", "misc",
1838 KSTAT_TYPE_NAMED
, sizeof (zil_stats
) / sizeof (kstat_named_t
),
1839 KSTAT_FLAG_VIRTUAL
);
1841 if (zil_ksp
!= NULL
) {
1842 zil_ksp
->ks_data
= &zil_stats
;
1843 kstat_install(zil_ksp
);
1850 kmem_cache_destroy(zil_lwb_cache
);
1852 if (zil_ksp
!= NULL
) {
1853 kstat_delete(zil_ksp
);
1859 zil_set_sync(zilog_t
*zilog
, uint64_t sync
)
1861 zilog
->zl_sync
= sync
;
1865 zil_set_logbias(zilog_t
*zilog
, uint64_t logbias
)
1867 zilog
->zl_logbias
= logbias
;
1871 zil_alloc(objset_t
*os
, zil_header_t
*zh_phys
)
1875 zilog
= kmem_zalloc(sizeof (zilog_t
), KM_SLEEP
);
1877 zilog
->zl_header
= zh_phys
;
1879 zilog
->zl_spa
= dmu_objset_spa(os
);
1880 zilog
->zl_dmu_pool
= dmu_objset_pool(os
);
1881 zilog
->zl_destroy_txg
= TXG_INITIAL
- 1;
1882 zilog
->zl_logbias
= dmu_objset_logbias(os
);
1883 zilog
->zl_sync
= dmu_objset_syncprop(os
);
1884 zilog
->zl_next_batch
= 1;
1886 mutex_init(&zilog
->zl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1888 for (int i
= 0; i
< TXG_SIZE
; i
++) {
1889 mutex_init(&zilog
->zl_itxg
[i
].itxg_lock
, NULL
,
1890 MUTEX_DEFAULT
, NULL
);
1893 list_create(&zilog
->zl_lwb_list
, sizeof (lwb_t
),
1894 offsetof(lwb_t
, lwb_node
));
1896 list_create(&zilog
->zl_itx_commit_list
, sizeof (itx_t
),
1897 offsetof(itx_t
, itx_node
));
1899 mutex_init(&zilog
->zl_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1901 avl_create(&zilog
->zl_vdev_tree
, zil_vdev_compare
,
1902 sizeof (zil_vdev_node_t
), offsetof(zil_vdev_node_t
, zv_node
));
1904 cv_init(&zilog
->zl_cv_writer
, NULL
, CV_DEFAULT
, NULL
);
1905 cv_init(&zilog
->zl_cv_suspend
, NULL
, CV_DEFAULT
, NULL
);
1906 cv_init(&zilog
->zl_cv_batch
[0], NULL
, CV_DEFAULT
, NULL
);
1907 cv_init(&zilog
->zl_cv_batch
[1], NULL
, CV_DEFAULT
, NULL
);
1913 zil_free(zilog_t
*zilog
)
1917 zilog
->zl_stop_sync
= 1;
1919 ASSERT0(zilog
->zl_suspend
);
1920 ASSERT0(zilog
->zl_suspending
);
1922 ASSERT(list_is_empty(&zilog
->zl_lwb_list
));
1923 list_destroy(&zilog
->zl_lwb_list
);
1925 avl_destroy(&zilog
->zl_vdev_tree
);
1926 mutex_destroy(&zilog
->zl_vdev_lock
);
1928 ASSERT(list_is_empty(&zilog
->zl_itx_commit_list
));
1929 list_destroy(&zilog
->zl_itx_commit_list
);
1931 for (i
= 0; i
< TXG_SIZE
; i
++) {
1933 * It's possible for an itx to be generated that doesn't dirty
1934 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1935 * callback to remove the entry. We remove those here.
1937 * Also free up the ziltest itxs.
1939 if (zilog
->zl_itxg
[i
].itxg_itxs
)
1940 zil_itxg_clean(zilog
->zl_itxg
[i
].itxg_itxs
);
1941 mutex_destroy(&zilog
->zl_itxg
[i
].itxg_lock
);
1944 mutex_destroy(&zilog
->zl_lock
);
1946 cv_destroy(&zilog
->zl_cv_writer
);
1947 cv_destroy(&zilog
->zl_cv_suspend
);
1948 cv_destroy(&zilog
->zl_cv_batch
[0]);
1949 cv_destroy(&zilog
->zl_cv_batch
[1]);
1951 kmem_free(zilog
, sizeof (zilog_t
));
1955 * Open an intent log.
1958 zil_open(objset_t
*os
, zil_get_data_t
*get_data
)
1960 zilog_t
*zilog
= dmu_objset_zil(os
);
1962 ASSERT(zilog
->zl_get_data
== NULL
);
1963 ASSERT(list_is_empty(&zilog
->zl_lwb_list
));
1965 zilog
->zl_get_data
= get_data
;
1971 * Close an intent log.
1974 zil_close(zilog_t
*zilog
)
1979 zil_commit(zilog
, 0); /* commit all itx */
1982 * The lwb_max_txg for the stubby lwb will reflect the last activity
1983 * for the zil. After a txg_wait_synced() on the txg we know all the
1984 * callbacks have occurred that may clean the zil. Only then can we
1985 * destroy the zl_clean_taskq.
1987 mutex_enter(&zilog
->zl_lock
);
1988 lwb
= list_tail(&zilog
->zl_lwb_list
);
1990 txg
= lwb
->lwb_max_txg
;
1991 mutex_exit(&zilog
->zl_lock
);
1993 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1995 if (zilog_is_dirty(zilog
))
1996 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog
, txg
);
1997 if (txg
< spa_freeze_txg(zilog
->zl_spa
))
1998 VERIFY(!zilog_is_dirty(zilog
));
2000 zilog
->zl_get_data
= NULL
;
2003 * We should have only one LWB left on the list; remove it now.
2005 mutex_enter(&zilog
->zl_lock
);
2006 lwb
= list_head(&zilog
->zl_lwb_list
);
2008 ASSERT(lwb
== list_tail(&zilog
->zl_lwb_list
));
2009 ASSERT(lwb
->lwb_zio
== NULL
);
2010 if (lwb
->lwb_fastwrite
)
2011 metaslab_fastwrite_unmark(zilog
->zl_spa
, &lwb
->lwb_blk
);
2012 list_remove(&zilog
->zl_lwb_list
, lwb
);
2013 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
2014 kmem_cache_free(zil_lwb_cache
, lwb
);
2016 mutex_exit(&zilog
->zl_lock
);
2019 static char *suspend_tag
= "zil suspending";
2022 * Suspend an intent log. While in suspended mode, we still honor
2023 * synchronous semantics, but we rely on txg_wait_synced() to do it.
2024 * On old version pools, we suspend the log briefly when taking a
2025 * snapshot so that it will have an empty intent log.
2027 * Long holds are not really intended to be used the way we do here --
2028 * held for such a short time. A concurrent caller of dsl_dataset_long_held()
2029 * could fail. Therefore we take pains to only put a long hold if it is
2030 * actually necessary. Fortunately, it will only be necessary if the
2031 * objset is currently mounted (or the ZVOL equivalent). In that case it
2032 * will already have a long hold, so we are not really making things any worse.
2034 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
2035 * zvol_state_t), and use their mechanism to prevent their hold from being
2036 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for
2039 * if cookiep == NULL, this does both the suspend & resume.
2040 * Otherwise, it returns with the dataset "long held", and the cookie
2041 * should be passed into zil_resume().
2044 zil_suspend(const char *osname
, void **cookiep
)
2048 const zil_header_t
*zh
;
2051 error
= dmu_objset_hold(osname
, suspend_tag
, &os
);
2054 zilog
= dmu_objset_zil(os
);
2056 mutex_enter(&zilog
->zl_lock
);
2057 zh
= zilog
->zl_header
;
2059 if (zh
->zh_flags
& ZIL_REPLAY_NEEDED
) { /* unplayed log */
2060 mutex_exit(&zilog
->zl_lock
);
2061 dmu_objset_rele(os
, suspend_tag
);
2062 return (SET_ERROR(EBUSY
));
2066 * Don't put a long hold in the cases where we can avoid it. This
2067 * is when there is no cookie so we are doing a suspend & resume
2068 * (i.e. called from zil_vdev_offline()), and there's nothing to do
2069 * for the suspend because it's already suspended, or there's no ZIL.
2071 if (cookiep
== NULL
&& !zilog
->zl_suspending
&&
2072 (zilog
->zl_suspend
> 0 || BP_IS_HOLE(&zh
->zh_log
))) {
2073 mutex_exit(&zilog
->zl_lock
);
2074 dmu_objset_rele(os
, suspend_tag
);
2078 dsl_dataset_long_hold(dmu_objset_ds(os
), suspend_tag
);
2079 dsl_pool_rele(dmu_objset_pool(os
), suspend_tag
);
2081 zilog
->zl_suspend
++;
2083 if (zilog
->zl_suspend
> 1) {
2085 * Someone else is already suspending it.
2086 * Just wait for them to finish.
2089 while (zilog
->zl_suspending
)
2090 cv_wait(&zilog
->zl_cv_suspend
, &zilog
->zl_lock
);
2091 mutex_exit(&zilog
->zl_lock
);
2093 if (cookiep
== NULL
)
2101 * If there is no pointer to an on-disk block, this ZIL must not
2102 * be active (e.g. filesystem not mounted), so there's nothing
2105 if (BP_IS_HOLE(&zh
->zh_log
)) {
2106 ASSERT(cookiep
!= NULL
); /* fast path already handled */
2109 mutex_exit(&zilog
->zl_lock
);
2114 * The ZIL has work to do. Ensure that the associated encryption
2115 * key will remain mapped while we are committing the log by
2116 * grabbing a reference to it. If the key isn't loaded we have no
2117 * choice but to return an error until the wrapping key is loaded.
2119 if (os
->os_encrypted
&& spa_keystore_create_mapping(os
->os_spa
,
2120 dmu_objset_ds(os
), FTAG
) != 0) {
2121 zilog
->zl_suspend
--;
2122 mutex_exit(&zilog
->zl_lock
);
2123 dsl_dataset_long_rele(dmu_objset_ds(os
), suspend_tag
);
2124 dsl_dataset_rele(dmu_objset_ds(os
), suspend_tag
);
2125 return (SET_ERROR(EBUSY
));
2128 zilog
->zl_suspending
= B_TRUE
;
2129 mutex_exit(&zilog
->zl_lock
);
2131 zil_commit(zilog
, 0);
2133 zil_destroy(zilog
, B_FALSE
);
2135 mutex_enter(&zilog
->zl_lock
);
2136 zilog
->zl_suspending
= B_FALSE
;
2137 cv_broadcast(&zilog
->zl_cv_suspend
);
2138 mutex_exit(&zilog
->zl_lock
);
2140 if (os
->os_encrypted
) {
2142 * Encrypted datasets need to wait for all data to be
2143 * synced out before removing the mapping.
2145 * XXX: Depending on the number of datasets with
2146 * outstanding ZIL data on a given log device, this
2147 * might cause spa_offline_log() to take a long time.
2149 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
2150 VERIFY0(spa_keystore_remove_mapping(os
->os_spa
,
2151 dmu_objset_id(os
), FTAG
));
2154 if (cookiep
== NULL
)
2162 zil_resume(void *cookie
)
2164 objset_t
*os
= cookie
;
2165 zilog_t
*zilog
= dmu_objset_zil(os
);
2167 mutex_enter(&zilog
->zl_lock
);
2168 ASSERT(zilog
->zl_suspend
!= 0);
2169 zilog
->zl_suspend
--;
2170 mutex_exit(&zilog
->zl_lock
);
2171 dsl_dataset_long_rele(dmu_objset_ds(os
), suspend_tag
);
2172 dsl_dataset_rele(dmu_objset_ds(os
), suspend_tag
);
2175 typedef struct zil_replay_arg
{
2176 zil_replay_func_t
**zr_replay
;
2178 boolean_t zr_byteswap
;
2183 zil_replay_error(zilog_t
*zilog
, lr_t
*lr
, int error
)
2185 char name
[ZFS_MAX_DATASET_NAME_LEN
];
2187 zilog
->zl_replaying_seq
--; /* didn't actually replay this one */
2189 dmu_objset_name(zilog
->zl_os
, name
);
2191 cmn_err(CE_WARN
, "ZFS replay transaction error %d, "
2192 "dataset %s, seq 0x%llx, txtype %llu %s\n", error
, name
,
2193 (u_longlong_t
)lr
->lrc_seq
,
2194 (u_longlong_t
)(lr
->lrc_txtype
& ~TX_CI
),
2195 (lr
->lrc_txtype
& TX_CI
) ? "CI" : "");
2201 zil_replay_log_record(zilog_t
*zilog
, lr_t
*lr
, void *zra
, uint64_t claim_txg
)
2203 zil_replay_arg_t
*zr
= zra
;
2204 const zil_header_t
*zh
= zilog
->zl_header
;
2205 uint64_t reclen
= lr
->lrc_reclen
;
2206 uint64_t txtype
= lr
->lrc_txtype
;
2209 zilog
->zl_replaying_seq
= lr
->lrc_seq
;
2211 if (lr
->lrc_seq
<= zh
->zh_replay_seq
) /* already replayed */
2214 if (lr
->lrc_txg
< claim_txg
) /* already committed */
2217 /* Strip case-insensitive bit, still present in log record */
2220 if (txtype
== 0 || txtype
>= TX_MAX_TYPE
)
2221 return (zil_replay_error(zilog
, lr
, EINVAL
));
2224 * If this record type can be logged out of order, the object
2225 * (lr_foid) may no longer exist. That's legitimate, not an error.
2227 if (TX_OOO(txtype
)) {
2228 error
= dmu_object_info(zilog
->zl_os
,
2229 LR_FOID_GET_OBJ(((lr_ooo_t
*)lr
)->lr_foid
), NULL
);
2230 if (error
== ENOENT
|| error
== EEXIST
)
2235 * Make a copy of the data so we can revise and extend it.
2237 bcopy(lr
, zr
->zr_lr
, reclen
);
2240 * If this is a TX_WRITE with a blkptr, suck in the data.
2242 if (txtype
== TX_WRITE
&& reclen
== sizeof (lr_write_t
)) {
2243 error
= zil_read_log_data(zilog
, (lr_write_t
*)lr
,
2244 zr
->zr_lr
+ reclen
);
2246 return (zil_replay_error(zilog
, lr
, error
));
2250 * The log block containing this lr may have been byteswapped
2251 * so that we can easily examine common fields like lrc_txtype.
2252 * However, the log is a mix of different record types, and only the
2253 * replay vectors know how to byteswap their records. Therefore, if
2254 * the lr was byteswapped, undo it before invoking the replay vector.
2256 if (zr
->zr_byteswap
)
2257 byteswap_uint64_array(zr
->zr_lr
, reclen
);
2260 * We must now do two things atomically: replay this log record,
2261 * and update the log header sequence number to reflect the fact that
2262 * we did so. At the end of each replay function the sequence number
2263 * is updated if we are in replay mode.
2265 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, zr
->zr_byteswap
);
2268 * The DMU's dnode layer doesn't see removes until the txg
2269 * commits, so a subsequent claim can spuriously fail with
2270 * EEXIST. So if we receive any error we try syncing out
2271 * any removes then retry the transaction. Note that we
2272 * specify B_FALSE for byteswap now, so we don't do it twice.
2274 txg_wait_synced(spa_get_dsl(zilog
->zl_spa
), 0);
2275 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, B_FALSE
);
2277 return (zil_replay_error(zilog
, lr
, error
));
2284 zil_incr_blks(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
2286 zilog
->zl_replay_blks
++;
2292 * If this dataset has a non-empty intent log, replay it and destroy it.
2295 zil_replay(objset_t
*os
, void *arg
, zil_replay_func_t
*replay_func
[TX_MAX_TYPE
])
2297 zilog_t
*zilog
= dmu_objset_zil(os
);
2298 const zil_header_t
*zh
= zilog
->zl_header
;
2299 zil_replay_arg_t zr
;
2301 if ((zh
->zh_flags
& ZIL_REPLAY_NEEDED
) == 0) {
2302 zil_destroy(zilog
, B_TRUE
);
2306 zr
.zr_replay
= replay_func
;
2308 zr
.zr_byteswap
= BP_SHOULD_BYTESWAP(&zh
->zh_log
);
2309 zr
.zr_lr
= vmem_alloc(2 * SPA_MAXBLOCKSIZE
, KM_SLEEP
);
2312 * Wait for in-progress removes to sync before starting replay.
2314 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
2316 zilog
->zl_replay
= B_TRUE
;
2317 zilog
->zl_replay_time
= ddi_get_lbolt();
2318 ASSERT(zilog
->zl_replay_blks
== 0);
2319 (void) zil_parse(zilog
, zil_incr_blks
, zil_replay_log_record
, &zr
,
2320 zh
->zh_claim_txg
, B_TRUE
);
2321 vmem_free(zr
.zr_lr
, 2 * SPA_MAXBLOCKSIZE
);
2323 zil_destroy(zilog
, B_FALSE
);
2324 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
2325 zilog
->zl_replay
= B_FALSE
;
2329 zil_replaying(zilog_t
*zilog
, dmu_tx_t
*tx
)
2331 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
)
2334 if (zilog
->zl_replay
) {
2335 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
2336 zilog
->zl_replayed_seq
[dmu_tx_get_txg(tx
) & TXG_MASK
] =
2337 zilog
->zl_replaying_seq
;
2346 zil_vdev_offline(const char *osname
, void *arg
)
2350 error
= zil_suspend(osname
, NULL
);
2352 return (SET_ERROR(EEXIST
));
2356 #if defined(_KERNEL) && defined(HAVE_SPL)
2357 EXPORT_SYMBOL(zil_alloc
);
2358 EXPORT_SYMBOL(zil_free
);
2359 EXPORT_SYMBOL(zil_open
);
2360 EXPORT_SYMBOL(zil_close
);
2361 EXPORT_SYMBOL(zil_replay
);
2362 EXPORT_SYMBOL(zil_replaying
);
2363 EXPORT_SYMBOL(zil_destroy
);
2364 EXPORT_SYMBOL(zil_destroy_sync
);
2365 EXPORT_SYMBOL(zil_itx_create
);
2366 EXPORT_SYMBOL(zil_itx_destroy
);
2367 EXPORT_SYMBOL(zil_itx_assign
);
2368 EXPORT_SYMBOL(zil_commit
);
2369 EXPORT_SYMBOL(zil_vdev_offline
);
2370 EXPORT_SYMBOL(zil_claim
);
2371 EXPORT_SYMBOL(zil_check_log_chain
);
2372 EXPORT_SYMBOL(zil_sync
);
2373 EXPORT_SYMBOL(zil_clean
);
2374 EXPORT_SYMBOL(zil_suspend
);
2375 EXPORT_SYMBOL(zil_resume
);
2376 EXPORT_SYMBOL(zil_add_block
);
2377 EXPORT_SYMBOL(zil_bp_tree_add
);
2378 EXPORT_SYMBOL(zil_set_sync
);
2379 EXPORT_SYMBOL(zil_set_logbias
);
2382 module_param(zil_replay_disable
, int, 0644);
2383 MODULE_PARM_DESC(zil_replay_disable
, "Disable intent logging replay");
2385 module_param(zfs_nocacheflush
, int, 0644);
2386 MODULE_PARM_DESC(zfs_nocacheflush
, "Disable cache flushes");
2388 module_param(zil_slog_bulk
, ulong
, 0644);
2389 MODULE_PARM_DESC(zil_slog_bulk
, "Limit in bytes slog sync writes per commit");