]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zil.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2010 Robert Milkowski */
27 #include <sys/zfs_context.h>
33 #include <sys/resource.h>
35 #include <sys/zil_impl.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/dsl_pool.h>
42 * The zfs intent log (ZIL) saves transaction records of system calls
43 * that change the file system in memory with enough information
44 * to be able to replay them. These are stored in memory until
45 * either the DMU transaction group (txg) commits them to the stable pool
46 * and they can be discarded, or they are flushed to the stable log
47 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
48 * requirement. In the event of a panic or power fail then those log
49 * records (transactions) are replayed.
51 * There is one ZIL per file system. Its on-disk (pool) format consists
58 * A log record holds a system call transaction. Log blocks can
59 * hold many log records and the blocks are chained together.
60 * Each ZIL block contains a block pointer (blkptr_t) to the next
61 * ZIL block in the chain. The ZIL header points to the first
62 * block in the chain. Note there is not a fixed place in the pool
63 * to hold blocks. They are dynamically allocated and freed as
64 * needed from the blocks available. Figure X shows the ZIL structure:
68 * This global ZIL switch affects all pools
70 int zil_replay_disable
= 0; /* disable intent logging replay */
73 * Tunable parameter for debugging or performance analysis. Setting
74 * zfs_nocacheflush will cause corruption on power loss if a volatile
75 * out-of-order write cache is enabled.
77 int zfs_nocacheflush
= 0;
79 static kmem_cache_t
*zil_lwb_cache
;
81 static void zil_async_to_sync(zilog_t
*zilog
, uint64_t foid
);
83 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
84 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
88 * ziltest is by and large an ugly hack, but very useful in
89 * checking replay without tedious work.
90 * When running ziltest we want to keep all itx's and so maintain
91 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
92 * We subtract TXG_CONCURRENT_STATES to allow for common code.
94 #define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
97 zil_bp_compare(const void *x1
, const void *x2
)
99 const dva_t
*dva1
= &((zil_bp_node_t
*)x1
)->zn_dva
;
100 const dva_t
*dva2
= &((zil_bp_node_t
*)x2
)->zn_dva
;
102 if (DVA_GET_VDEV(dva1
) < DVA_GET_VDEV(dva2
))
104 if (DVA_GET_VDEV(dva1
) > DVA_GET_VDEV(dva2
))
107 if (DVA_GET_OFFSET(dva1
) < DVA_GET_OFFSET(dva2
))
109 if (DVA_GET_OFFSET(dva1
) > DVA_GET_OFFSET(dva2
))
116 zil_bp_tree_init(zilog_t
*zilog
)
118 avl_create(&zilog
->zl_bp_tree
, zil_bp_compare
,
119 sizeof (zil_bp_node_t
), offsetof(zil_bp_node_t
, zn_node
));
123 zil_bp_tree_fini(zilog_t
*zilog
)
125 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
129 while ((zn
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
130 kmem_free(zn
, sizeof (zil_bp_node_t
));
136 zil_bp_tree_add(zilog_t
*zilog
, const blkptr_t
*bp
)
138 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
139 const dva_t
*dva
= BP_IDENTITY(bp
);
143 if (avl_find(t
, dva
, &where
) != NULL
)
146 zn
= kmem_alloc(sizeof (zil_bp_node_t
), KM_SLEEP
);
148 avl_insert(t
, zn
, where
);
153 static zil_header_t
*
154 zil_header_in_syncing_context(zilog_t
*zilog
)
156 return ((zil_header_t
*)zilog
->zl_header
);
160 zil_init_log_chain(zilog_t
*zilog
, blkptr_t
*bp
)
162 zio_cksum_t
*zc
= &bp
->blk_cksum
;
164 zc
->zc_word
[ZIL_ZC_GUID_0
] = spa_get_random(-1ULL);
165 zc
->zc_word
[ZIL_ZC_GUID_1
] = spa_get_random(-1ULL);
166 zc
->zc_word
[ZIL_ZC_OBJSET
] = dmu_objset_id(zilog
->zl_os
);
167 zc
->zc_word
[ZIL_ZC_SEQ
] = 1ULL;
171 * Read a log block and make sure it's valid.
174 zil_read_log_block(zilog_t
*zilog
, const blkptr_t
*bp
, blkptr_t
*nbp
, void *dst
,
177 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
178 uint32_t aflags
= ARC_WAIT
;
179 arc_buf_t
*abuf
= NULL
;
183 if (zilog
->zl_header
->zh_claim_txg
== 0)
184 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
186 if (!(zilog
->zl_header
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
187 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
189 SET_BOOKMARK(&zb
, bp
->blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
190 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
, bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
192 error
= dsl_read_nolock(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
, &abuf
,
193 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
196 zio_cksum_t cksum
= bp
->blk_cksum
;
199 * Validate the checksummed log block.
201 * Sequence numbers should be... sequential. The checksum
202 * verifier for the next block should be bp's checksum plus 1.
204 * Also check the log chain linkage and size used.
206 cksum
.zc_word
[ZIL_ZC_SEQ
]++;
208 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
209 zil_chain_t
*zilc
= abuf
->b_data
;
210 char *lr
= (char *)(zilc
+ 1);
211 uint64_t len
= zilc
->zc_nused
- sizeof (zil_chain_t
);
213 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
214 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
)) {
218 *end
= (char *)dst
+ len
;
219 *nbp
= zilc
->zc_next_blk
;
222 char *lr
= abuf
->b_data
;
223 uint64_t size
= BP_GET_LSIZE(bp
);
224 zil_chain_t
*zilc
= (zil_chain_t
*)(lr
+ size
) - 1;
226 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
227 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
) ||
228 (zilc
->zc_nused
> (size
- sizeof (*zilc
)))) {
231 bcopy(lr
, dst
, zilc
->zc_nused
);
232 *end
= (char *)dst
+ zilc
->zc_nused
;
233 *nbp
= zilc
->zc_next_blk
;
237 VERIFY(arc_buf_remove_ref(abuf
, &abuf
) == 1);
244 * Read a TX_WRITE log data block.
247 zil_read_log_data(zilog_t
*zilog
, const lr_write_t
*lr
, void *wbuf
)
249 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
250 const blkptr_t
*bp
= &lr
->lr_blkptr
;
251 uint32_t aflags
= ARC_WAIT
;
252 arc_buf_t
*abuf
= NULL
;
256 if (BP_IS_HOLE(bp
)) {
258 bzero(wbuf
, MAX(BP_GET_LSIZE(bp
), lr
->lr_length
));
262 if (zilog
->zl_header
->zh_claim_txg
== 0)
263 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
265 SET_BOOKMARK(&zb
, dmu_objset_id(zilog
->zl_os
), lr
->lr_foid
,
266 ZB_ZIL_LEVEL
, lr
->lr_offset
/ BP_GET_LSIZE(bp
));
268 error
= arc_read_nolock(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
, &abuf
,
269 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
273 bcopy(abuf
->b_data
, wbuf
, arc_buf_size(abuf
));
274 (void) arc_buf_remove_ref(abuf
, &abuf
);
281 * Parse the intent log, and call parse_func for each valid record within.
284 zil_parse(zilog_t
*zilog
, zil_parse_blk_func_t
*parse_blk_func
,
285 zil_parse_lr_func_t
*parse_lr_func
, void *arg
, uint64_t txg
)
287 const zil_header_t
*zh
= zilog
->zl_header
;
288 boolean_t claimed
= !!zh
->zh_claim_txg
;
289 uint64_t claim_blk_seq
= claimed
? zh
->zh_claim_blk_seq
: UINT64_MAX
;
290 uint64_t claim_lr_seq
= claimed
? zh
->zh_claim_lr_seq
: UINT64_MAX
;
291 uint64_t max_blk_seq
= 0;
292 uint64_t max_lr_seq
= 0;
293 uint64_t blk_count
= 0;
294 uint64_t lr_count
= 0;
295 blkptr_t blk
, next_blk
;
299 bzero(&next_blk
, sizeof(blkptr_t
));
302 * Old logs didn't record the maximum zh_claim_lr_seq.
304 if (!(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
305 claim_lr_seq
= UINT64_MAX
;
308 * Starting at the block pointed to by zh_log we read the log chain.
309 * For each block in the chain we strongly check that block to
310 * ensure its validity. We stop when an invalid block is found.
311 * For each block pointer in the chain we call parse_blk_func().
312 * For each record in each valid block we call parse_lr_func().
313 * If the log has been claimed, stop if we encounter a sequence
314 * number greater than the highest claimed sequence number.
316 lrbuf
= zio_buf_alloc(SPA_MAXBLOCKSIZE
);
317 zil_bp_tree_init(zilog
);
319 for (blk
= zh
->zh_log
; !BP_IS_HOLE(&blk
); blk
= next_blk
) {
320 uint64_t blk_seq
= blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
324 if (blk_seq
> claim_blk_seq
)
326 if ((error
= parse_blk_func(zilog
, &blk
, arg
, txg
)) != 0)
328 ASSERT3U(max_blk_seq
, <, blk_seq
);
329 max_blk_seq
= blk_seq
;
332 if (max_lr_seq
== claim_lr_seq
&& max_blk_seq
== claim_blk_seq
)
335 error
= zil_read_log_block(zilog
, &blk
, &next_blk
, lrbuf
, &end
);
339 for (lrp
= lrbuf
; lrp
< end
; lrp
+= reclen
) {
340 lr_t
*lr
= (lr_t
*)lrp
;
341 reclen
= lr
->lrc_reclen
;
342 ASSERT3U(reclen
, >=, sizeof (lr_t
));
343 if (lr
->lrc_seq
> claim_lr_seq
)
345 if ((error
= parse_lr_func(zilog
, lr
, arg
, txg
)) != 0)
347 ASSERT3U(max_lr_seq
, <, lr
->lrc_seq
);
348 max_lr_seq
= lr
->lrc_seq
;
353 zilog
->zl_parse_error
= error
;
354 zilog
->zl_parse_blk_seq
= max_blk_seq
;
355 zilog
->zl_parse_lr_seq
= max_lr_seq
;
356 zilog
->zl_parse_blk_count
= blk_count
;
357 zilog
->zl_parse_lr_count
= lr_count
;
359 ASSERT(!claimed
|| !(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
) ||
360 (max_blk_seq
== claim_blk_seq
&& max_lr_seq
== claim_lr_seq
));
362 zil_bp_tree_fini(zilog
);
363 zio_buf_free(lrbuf
, SPA_MAXBLOCKSIZE
);
369 zil_claim_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t first_txg
)
372 * Claim log block if not already committed and not already claimed.
373 * If tx == NULL, just verify that the block is claimable.
375 if (bp
->blk_birth
< first_txg
|| zil_bp_tree_add(zilog
, bp
) != 0)
378 return (zio_wait(zio_claim(NULL
, zilog
->zl_spa
,
379 tx
== NULL
? 0 : first_txg
, bp
, spa_claim_notify
, NULL
,
380 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
)));
384 zil_claim_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t first_txg
)
386 lr_write_t
*lr
= (lr_write_t
*)lrc
;
389 if (lrc
->lrc_txtype
!= TX_WRITE
)
393 * If the block is not readable, don't claim it. This can happen
394 * in normal operation when a log block is written to disk before
395 * some of the dmu_sync() blocks it points to. In this case, the
396 * transaction cannot have been committed to anyone (we would have
397 * waited for all writes to be stable first), so it is semantically
398 * correct to declare this the end of the log.
400 if (lr
->lr_blkptr
.blk_birth
>= first_txg
&&
401 (error
= zil_read_log_data(zilog
, lr
, NULL
)) != 0)
403 return (zil_claim_log_block(zilog
, &lr
->lr_blkptr
, tx
, first_txg
));
408 zil_free_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t claim_txg
)
410 zio_free_zil(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
416 zil_free_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t claim_txg
)
418 lr_write_t
*lr
= (lr_write_t
*)lrc
;
419 blkptr_t
*bp
= &lr
->lr_blkptr
;
422 * If we previously claimed it, we need to free it.
424 if (claim_txg
!= 0 && lrc
->lrc_txtype
== TX_WRITE
&&
425 bp
->blk_birth
>= claim_txg
&& zil_bp_tree_add(zilog
, bp
) == 0)
426 zio_free(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
432 zil_alloc_lwb(zilog_t
*zilog
, blkptr_t
*bp
, uint64_t txg
)
436 lwb
= kmem_cache_alloc(zil_lwb_cache
, KM_SLEEP
);
437 lwb
->lwb_zilog
= zilog
;
439 lwb
->lwb_buf
= zio_buf_alloc(BP_GET_LSIZE(bp
));
440 lwb
->lwb_max_txg
= txg
;
443 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
444 lwb
->lwb_nused
= sizeof (zil_chain_t
);
445 lwb
->lwb_sz
= BP_GET_LSIZE(bp
);
448 lwb
->lwb_sz
= BP_GET_LSIZE(bp
) - sizeof (zil_chain_t
);
451 mutex_enter(&zilog
->zl_lock
);
452 list_insert_tail(&zilog
->zl_lwb_list
, lwb
);
453 mutex_exit(&zilog
->zl_lock
);
459 * Create an on-disk intent log.
462 zil_create(zilog_t
*zilog
)
464 const zil_header_t
*zh
= zilog
->zl_header
;
472 * Wait for any previous destroy to complete.
474 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
476 ASSERT(zh
->zh_claim_txg
== 0);
477 ASSERT(zh
->zh_replay_seq
== 0);
482 * Allocate an initial log block if:
483 * - there isn't one already
484 * - the existing block is the wrong endianess
486 if (BP_IS_HOLE(&blk
) || BP_SHOULD_BYTESWAP(&blk
)) {
487 tx
= dmu_tx_create(zilog
->zl_os
);
488 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
489 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
490 txg
= dmu_tx_get_txg(tx
);
492 if (!BP_IS_HOLE(&blk
)) {
493 zio_free_zil(zilog
->zl_spa
, txg
, &blk
);
497 error
= zio_alloc_zil(zilog
->zl_spa
, txg
, &blk
, NULL
,
498 ZIL_MIN_BLKSZ
, zilog
->zl_logbias
== ZFS_LOGBIAS_LATENCY
);
501 zil_init_log_chain(zilog
, &blk
);
505 * Allocate a log write buffer (lwb) for the first log block.
508 lwb
= zil_alloc_lwb(zilog
, &blk
, txg
);
511 * If we just allocated the first log block, commit our transaction
512 * and wait for zil_sync() to stuff the block poiner into zh_log.
513 * (zh is part of the MOS, so we cannot modify it in open context.)
517 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
520 ASSERT(bcmp(&blk
, &zh
->zh_log
, sizeof (blk
)) == 0);
526 * In one tx, free all log blocks and clear the log header.
527 * If keep_first is set, then we're replaying a log with no content.
528 * We want to keep the first block, however, so that the first
529 * synchronous transaction doesn't require a txg_wait_synced()
530 * in zil_create(). We don't need to txg_wait_synced() here either
531 * when keep_first is set, because both zil_create() and zil_destroy()
532 * will wait for any in-progress destroys to complete.
535 zil_destroy(zilog_t
*zilog
, boolean_t keep_first
)
537 const zil_header_t
*zh
= zilog
->zl_header
;
543 * Wait for any previous destroy to complete.
545 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
547 zilog
->zl_old_header
= *zh
; /* debugging aid */
549 if (BP_IS_HOLE(&zh
->zh_log
))
552 tx
= dmu_tx_create(zilog
->zl_os
);
553 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
554 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
555 txg
= dmu_tx_get_txg(tx
);
557 mutex_enter(&zilog
->zl_lock
);
559 ASSERT3U(zilog
->zl_destroy_txg
, <, txg
);
560 zilog
->zl_destroy_txg
= txg
;
561 zilog
->zl_keep_first
= keep_first
;
563 if (!list_is_empty(&zilog
->zl_lwb_list
)) {
564 ASSERT(zh
->zh_claim_txg
== 0);
566 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
567 list_remove(&zilog
->zl_lwb_list
, lwb
);
568 if (lwb
->lwb_buf
!= NULL
)
569 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
570 zio_free_zil(zilog
->zl_spa
, txg
, &lwb
->lwb_blk
);
571 kmem_cache_free(zil_lwb_cache
, lwb
);
573 } else if (!keep_first
) {
574 (void) zil_parse(zilog
, zil_free_log_block
,
575 zil_free_log_record
, tx
, zh
->zh_claim_txg
);
577 mutex_exit(&zilog
->zl_lock
);
583 zil_claim(const char *osname
, void *txarg
)
585 dmu_tx_t
*tx
= txarg
;
586 uint64_t first_txg
= dmu_tx_get_txg(tx
);
592 error
= dmu_objset_hold(osname
, FTAG
, &os
);
594 cmn_err(CE_WARN
, "can't open objset for %s", osname
);
598 zilog
= dmu_objset_zil(os
);
599 zh
= zil_header_in_syncing_context(zilog
);
601 if (spa_get_log_state(zilog
->zl_spa
) == SPA_LOG_CLEAR
) {
602 if (!BP_IS_HOLE(&zh
->zh_log
))
603 zio_free_zil(zilog
->zl_spa
, first_txg
, &zh
->zh_log
);
604 BP_ZERO(&zh
->zh_log
);
605 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
606 dmu_objset_rele(os
, FTAG
);
611 * Claim all log blocks if we haven't already done so, and remember
612 * the highest claimed sequence number. This ensures that if we can
613 * read only part of the log now (e.g. due to a missing device),
614 * but we can read the entire log later, we will not try to replay
615 * or destroy beyond the last block we successfully claimed.
617 ASSERT3U(zh
->zh_claim_txg
, <=, first_txg
);
618 if (zh
->zh_claim_txg
== 0 && !BP_IS_HOLE(&zh
->zh_log
)) {
619 (void) zil_parse(zilog
, zil_claim_log_block
,
620 zil_claim_log_record
, tx
, first_txg
);
621 zh
->zh_claim_txg
= first_txg
;
622 zh
->zh_claim_blk_seq
= zilog
->zl_parse_blk_seq
;
623 zh
->zh_claim_lr_seq
= zilog
->zl_parse_lr_seq
;
624 if (zilog
->zl_parse_lr_count
|| zilog
->zl_parse_blk_count
> 1)
625 zh
->zh_flags
|= ZIL_REPLAY_NEEDED
;
626 zh
->zh_flags
|= ZIL_CLAIM_LR_SEQ_VALID
;
627 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
630 ASSERT3U(first_txg
, ==, (spa_last_synced_txg(zilog
->zl_spa
) + 1));
631 dmu_objset_rele(os
, FTAG
);
636 * Check the log by walking the log chain.
637 * Checksum errors are ok as they indicate the end of the chain.
638 * Any other error (no device or read failure) returns an error.
641 zil_check_log_chain(const char *osname
, void *tx
)
650 error
= dmu_objset_hold(osname
, FTAG
, &os
);
652 cmn_err(CE_WARN
, "can't open objset for %s", osname
);
656 zilog
= dmu_objset_zil(os
);
657 bp
= (blkptr_t
*)&zilog
->zl_header
->zh_log
;
660 * Check the first block and determine if it's on a log device
661 * which may have been removed or faulted prior to loading this
662 * pool. If so, there's no point in checking the rest of the log
663 * as its content should have already been synced to the pool.
665 if (!BP_IS_HOLE(bp
)) {
667 boolean_t valid
= B_TRUE
;
669 spa_config_enter(os
->os_spa
, SCL_STATE
, FTAG
, RW_READER
);
670 vd
= vdev_lookup_top(os
->os_spa
, DVA_GET_VDEV(&bp
->blk_dva
[0]));
671 if (vd
->vdev_islog
&& vdev_is_dead(vd
))
672 valid
= vdev_log_state_valid(vd
);
673 spa_config_exit(os
->os_spa
, SCL_STATE
, FTAG
);
676 dmu_objset_rele(os
, FTAG
);
682 * Because tx == NULL, zil_claim_log_block() will not actually claim
683 * any blocks, but just determine whether it is possible to do so.
684 * In addition to checking the log chain, zil_claim_log_block()
685 * will invoke zio_claim() with a done func of spa_claim_notify(),
686 * which will update spa_max_claim_txg. See spa_load() for details.
688 error
= zil_parse(zilog
, zil_claim_log_block
, zil_claim_log_record
, tx
,
689 zilog
->zl_header
->zh_claim_txg
? -1ULL : spa_first_txg(os
->os_spa
));
691 dmu_objset_rele(os
, FTAG
);
693 return ((error
== ECKSUM
|| error
== ENOENT
) ? 0 : error
);
697 zil_vdev_compare(const void *x1
, const void *x2
)
699 const uint64_t v1
= ((zil_vdev_node_t
*)x1
)->zv_vdev
;
700 const uint64_t v2
= ((zil_vdev_node_t
*)x2
)->zv_vdev
;
711 zil_add_block(zilog_t
*zilog
, const blkptr_t
*bp
)
713 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
715 zil_vdev_node_t
*zv
, zvsearch
;
716 int ndvas
= BP_GET_NDVAS(bp
);
719 if (zfs_nocacheflush
)
722 ASSERT(zilog
->zl_writer
);
725 * Even though we're zl_writer, we still need a lock because the
726 * zl_get_data() callbacks may have dmu_sync() done callbacks
727 * that will run concurrently.
729 mutex_enter(&zilog
->zl_vdev_lock
);
730 for (i
= 0; i
< ndvas
; i
++) {
731 zvsearch
.zv_vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
732 if (avl_find(t
, &zvsearch
, &where
) == NULL
) {
733 zv
= kmem_alloc(sizeof (*zv
), KM_SLEEP
);
734 zv
->zv_vdev
= zvsearch
.zv_vdev
;
735 avl_insert(t
, zv
, where
);
738 mutex_exit(&zilog
->zl_vdev_lock
);
742 zil_flush_vdevs(zilog_t
*zilog
)
744 spa_t
*spa
= zilog
->zl_spa
;
745 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
750 ASSERT(zilog
->zl_writer
);
753 * We don't need zl_vdev_lock here because we're the zl_writer,
754 * and all zl_get_data() callbacks are done.
756 if (avl_numnodes(t
) == 0)
759 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
761 zio
= zio_root(spa
, NULL
, NULL
, ZIO_FLAG_CANFAIL
);
763 while ((zv
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
764 vdev_t
*vd
= vdev_lookup_top(spa
, zv
->zv_vdev
);
767 kmem_free(zv
, sizeof (*zv
));
771 * Wait for all the flushes to complete. Not all devices actually
772 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
774 (void) zio_wait(zio
);
776 spa_config_exit(spa
, SCL_STATE
, FTAG
);
780 * Function called when a log block write completes
783 zil_lwb_write_done(zio_t
*zio
)
785 lwb_t
*lwb
= zio
->io_private
;
786 zilog_t
*zilog
= lwb
->lwb_zilog
;
787 dmu_tx_t
*tx
= lwb
->lwb_tx
;
789 ASSERT(BP_GET_COMPRESS(zio
->io_bp
) == ZIO_COMPRESS_OFF
);
790 ASSERT(BP_GET_TYPE(zio
->io_bp
) == DMU_OT_INTENT_LOG
);
791 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
792 ASSERT(BP_GET_BYTEORDER(zio
->io_bp
) == ZFS_HOST_BYTEORDER
);
793 ASSERT(!BP_IS_GANG(zio
->io_bp
));
794 ASSERT(!BP_IS_HOLE(zio
->io_bp
));
795 ASSERT(zio
->io_bp
->blk_fill
== 0);
798 * Ensure the lwb buffer pointer is cleared before releasing
799 * the txg. If we have had an allocation failure and
800 * the txg is waiting to sync then we want want zil_sync()
801 * to remove the lwb so that it's not picked up as the next new
802 * one in zil_commit_writer(). zil_sync() will only remove
803 * the lwb if lwb_buf is null.
805 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
806 mutex_enter(&zilog
->zl_lock
);
809 mutex_exit(&zilog
->zl_lock
);
812 * Now that we've written this log block, we have a stable pointer
813 * to the next block in the chain, so it's OK to let the txg in
814 * which we allocated the next block sync.
820 * Initialize the io for a log block.
823 zil_lwb_write_init(zilog_t
*zilog
, lwb_t
*lwb
)
827 SET_BOOKMARK(&zb
, lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
828 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
,
829 lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
831 if (zilog
->zl_root_zio
== NULL
) {
832 zilog
->zl_root_zio
= zio_root(zilog
->zl_spa
, NULL
, NULL
,
835 if (lwb
->lwb_zio
== NULL
) {
836 lwb
->lwb_zio
= zio_rewrite(zilog
->zl_root_zio
, zilog
->zl_spa
,
837 0, &lwb
->lwb_blk
, lwb
->lwb_buf
, BP_GET_LSIZE(&lwb
->lwb_blk
),
838 zil_lwb_write_done
, lwb
, ZIO_PRIORITY_LOG_WRITE
,
839 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
, &zb
);
844 * Define a limited set of intent log block sizes.
845 * These must be a multiple of 4KB. Note only the amount used (again
846 * aligned to 4KB) actually gets written. However, we can't always just
847 * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted.
849 uint64_t zil_block_buckets
[] = {
850 4096, /* non TX_WRITE */
851 8192+4096, /* data base */
852 32*1024 + 4096, /* NFS writes */
857 * Use the slog as long as the logbias is 'latency' and the current commit size
858 * is less than the limit or the total list size is less than 2X the limit.
859 * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
861 uint64_t zil_slog_limit
= 1024 * 1024;
862 #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
863 (((zilog)->zl_cur_used < zil_slog_limit) || \
864 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
867 * Start a log block write and advance to the next log block.
868 * Calls are serialized.
871 zil_lwb_write_start(zilog_t
*zilog
, lwb_t
*lwb
)
875 spa_t
*spa
= zilog
->zl_spa
;
879 uint64_t zil_blksz
, wsz
;
882 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
883 zilc
= (zil_chain_t
*)lwb
->lwb_buf
;
884 bp
= &zilc
->zc_next_blk
;
886 zilc
= (zil_chain_t
*)(lwb
->lwb_buf
+ lwb
->lwb_sz
);
887 bp
= &zilc
->zc_next_blk
;
890 ASSERT(lwb
->lwb_nused
<= lwb
->lwb_sz
);
893 * Allocate the next block and save its address in this block
894 * before writing it in order to establish the log chain.
895 * Note that if the allocation of nlwb synced before we wrote
896 * the block that points at it (lwb), we'd leak it if we crashed.
897 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
898 * We dirty the dataset to ensure that zil_sync() will be called
899 * to clean up in the event of allocation failure or I/O failure.
901 tx
= dmu_tx_create(zilog
->zl_os
);
902 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
903 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
904 txg
= dmu_tx_get_txg(tx
);
909 * Log blocks are pre-allocated. Here we select the size of the next
910 * block, based on size used in the last block.
911 * - first find the smallest bucket that will fit the block from a
912 * limited set of block sizes. This is because it's faster to write
913 * blocks allocated from the same metaslab as they are adjacent or
915 * - next find the maximum from the new suggested size and an array of
916 * previous sizes. This lessens a picket fence effect of wrongly
917 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
920 * Note we only write what is used, but we can't just allocate
921 * the maximum block size because we can exhaust the available
924 zil_blksz
= zilog
->zl_cur_used
+ sizeof (zil_chain_t
);
925 for (i
= 0; zil_blksz
> zil_block_buckets
[i
]; i
++)
927 zil_blksz
= zil_block_buckets
[i
];
928 if (zil_blksz
== UINT64_MAX
)
929 zil_blksz
= SPA_MAXBLOCKSIZE
;
930 zilog
->zl_prev_blks
[zilog
->zl_prev_rotor
] = zil_blksz
;
931 for (i
= 0; i
< ZIL_PREV_BLKS
; i
++)
932 zil_blksz
= MAX(zil_blksz
, zilog
->zl_prev_blks
[i
]);
933 zilog
->zl_prev_rotor
= (zilog
->zl_prev_rotor
+ 1) & (ZIL_PREV_BLKS
- 1);
936 /* pass the old blkptr in order to spread log blocks across devs */
937 error
= zio_alloc_zil(spa
, txg
, bp
, &lwb
->lwb_blk
, zil_blksz
,
940 ASSERT3U(bp
->blk_birth
, ==, txg
);
941 bp
->blk_cksum
= lwb
->lwb_blk
.blk_cksum
;
942 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]++;
945 * Allocate a new log write buffer (lwb).
947 nlwb
= zil_alloc_lwb(zilog
, bp
, txg
);
949 /* Record the block for later vdev flushing */
950 zil_add_block(zilog
, &lwb
->lwb_blk
);
953 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
954 /* For Slim ZIL only write what is used. */
955 wsz
= P2ROUNDUP_TYPED(lwb
->lwb_nused
, ZIL_MIN_BLKSZ
, uint64_t);
956 ASSERT3U(wsz
, <=, lwb
->lwb_sz
);
957 zio_shrink(lwb
->lwb_zio
, wsz
);
964 zilc
->zc_nused
= lwb
->lwb_nused
;
965 zilc
->zc_eck
.zec_cksum
= lwb
->lwb_blk
.blk_cksum
;
968 * clear unused data for security
970 bzero(lwb
->lwb_buf
+ lwb
->lwb_nused
, wsz
- lwb
->lwb_nused
);
972 zio_nowait(lwb
->lwb_zio
); /* Kick off the write for the old log block */
975 * If there was an allocation failure then nlwb will be null which
976 * forces a txg_wait_synced().
982 zil_lwb_commit(zilog_t
*zilog
, itx_t
*itx
, lwb_t
*lwb
)
984 lr_t
*lrc
= &itx
->itx_lr
; /* common log record */
985 lr_write_t
*lrw
= (lr_write_t
*)lrc
;
987 uint64_t txg
= lrc
->lrc_txg
;
988 uint64_t reclen
= lrc
->lrc_reclen
;
994 ASSERT(lwb
->lwb_buf
!= NULL
);
996 if (lrc
->lrc_txtype
== TX_WRITE
&& itx
->itx_wr_state
== WR_NEED_COPY
)
997 dlen
= P2ROUNDUP_TYPED(
998 lrw
->lr_length
, sizeof (uint64_t), uint64_t);
1000 zilog
->zl_cur_used
+= (reclen
+ dlen
);
1002 zil_lwb_write_init(zilog
, lwb
);
1005 * If this record won't fit in the current log block, start a new one.
1007 if (lwb
->lwb_nused
+ reclen
+ dlen
> lwb
->lwb_sz
) {
1008 lwb
= zil_lwb_write_start(zilog
, lwb
);
1011 zil_lwb_write_init(zilog
, lwb
);
1012 ASSERT(LWB_EMPTY(lwb
));
1013 if (lwb
->lwb_nused
+ reclen
+ dlen
> lwb
->lwb_sz
) {
1014 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1019 lr_buf
= lwb
->lwb_buf
+ lwb
->lwb_nused
;
1020 bcopy(lrc
, lr_buf
, reclen
);
1021 lrc
= (lr_t
*)lr_buf
;
1022 lrw
= (lr_write_t
*)lrc
;
1025 * If it's a write, fetch the data or get its blkptr as appropriate.
1027 if (lrc
->lrc_txtype
== TX_WRITE
) {
1028 if (txg
> spa_freeze_txg(zilog
->zl_spa
))
1029 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1030 if (itx
->itx_wr_state
!= WR_COPIED
) {
1035 ASSERT(itx
->itx_wr_state
== WR_NEED_COPY
);
1036 dbuf
= lr_buf
+ reclen
;
1037 lrw
->lr_common
.lrc_reclen
+= dlen
;
1039 ASSERT(itx
->itx_wr_state
== WR_INDIRECT
);
1042 error
= zilog
->zl_get_data(
1043 itx
->itx_private
, lrw
, dbuf
, lwb
->lwb_zio
);
1045 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1049 ASSERT(error
== ENOENT
|| error
== EEXIST
||
1057 * We're actually making an entry, so update lrc_seq to be the
1058 * log record sequence number. Note that this is generally not
1059 * equal to the itx sequence number because not all transactions
1060 * are synchronous, and sometimes spa_sync() gets there first.
1062 lrc
->lrc_seq
= ++zilog
->zl_lr_seq
; /* we are single threaded */
1063 lwb
->lwb_nused
+= reclen
+ dlen
;
1064 lwb
->lwb_max_txg
= MAX(lwb
->lwb_max_txg
, txg
);
1065 ASSERT3U(lwb
->lwb_nused
, <=, lwb
->lwb_sz
);
1066 ASSERT3U(P2PHASE(lwb
->lwb_nused
, sizeof (uint64_t)), ==, 0);
1072 zil_itx_create(uint64_t txtype
, size_t lrsize
)
1076 lrsize
= P2ROUNDUP_TYPED(lrsize
, sizeof (uint64_t), size_t);
1078 itx
= kmem_alloc(offsetof(itx_t
, itx_lr
) + lrsize
,
1079 KM_PUSHPAGE
| KM_NODEBUG
);
1080 itx
->itx_lr
.lrc_txtype
= txtype
;
1081 itx
->itx_lr
.lrc_reclen
= lrsize
;
1082 itx
->itx_sod
= lrsize
; /* if write & WR_NEED_COPY will be increased */
1083 itx
->itx_lr
.lrc_seq
= 0; /* defensive */
1084 itx
->itx_sync
= B_TRUE
; /* default is synchronous */
1090 zil_itx_destroy(itx_t
*itx
)
1092 kmem_free(itx
, offsetof(itx_t
, itx_lr
) + itx
->itx_lr
.lrc_reclen
);
1096 * Free up the sync and async itxs. The itxs_t has already been detached
1097 * so no locks are needed.
1100 zil_itxg_clean(itxs_t
*itxs
)
1106 itx_async_node_t
*ian
;
1108 list
= &itxs
->i_sync_list
;
1109 while ((itx
= list_head(list
)) != NULL
) {
1110 list_remove(list
, itx
);
1111 kmem_free(itx
, offsetof(itx_t
, itx_lr
) +
1112 itx
->itx_lr
.lrc_reclen
);
1116 t
= &itxs
->i_async_tree
;
1117 while ((ian
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
1118 list
= &ian
->ia_list
;
1119 while ((itx
= list_head(list
)) != NULL
) {
1120 list_remove(list
, itx
);
1121 kmem_free(itx
, offsetof(itx_t
, itx_lr
) +
1122 itx
->itx_lr
.lrc_reclen
);
1125 kmem_free(ian
, sizeof (itx_async_node_t
));
1129 kmem_free(itxs
, sizeof (itxs_t
));
1133 zil_aitx_compare(const void *x1
, const void *x2
)
1135 const uint64_t o1
= ((itx_async_node_t
*)x1
)->ia_foid
;
1136 const uint64_t o2
= ((itx_async_node_t
*)x2
)->ia_foid
;
1147 * Remove all async itx with the given oid.
1150 zil_remove_async(zilog_t
*zilog
, uint64_t oid
)
1153 itx_async_node_t
*ian
;
1160 list_create(&clean_list
, sizeof (itx_t
), offsetof(itx_t
, itx_node
));
1162 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1165 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1167 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1168 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1170 mutex_enter(&itxg
->itxg_lock
);
1171 if (itxg
->itxg_txg
!= txg
) {
1172 mutex_exit(&itxg
->itxg_lock
);
1177 * Locate the object node and append its list.
1179 t
= &itxg
->itxg_itxs
->i_async_tree
;
1180 ian
= avl_find(t
, &oid
, &where
);
1182 list_move_tail(&clean_list
, &ian
->ia_list
);
1183 mutex_exit(&itxg
->itxg_lock
);
1185 while ((itx
= list_head(&clean_list
)) != NULL
) {
1186 list_remove(&clean_list
, itx
);
1187 kmem_free(itx
, offsetof(itx_t
, itx_lr
) +
1188 itx
->itx_lr
.lrc_reclen
);
1190 list_destroy(&clean_list
);
1194 zil_itx_assign(zilog_t
*zilog
, itx_t
*itx
, dmu_tx_t
*tx
)
1198 itxs_t
*itxs
, *clean
= NULL
;
1201 * Object ids can be re-instantiated in the next txg so
1202 * remove any async transactions to avoid future leaks.
1203 * This can happen if a fsync occurs on the re-instantiated
1204 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1205 * the new file data and flushes a write record for the old object.
1207 if ((itx
->itx_lr
.lrc_txtype
& ~TX_CI
) == TX_REMOVE
)
1208 zil_remove_async(zilog
, itx
->itx_oid
);
1211 * Ensure the data of a renamed file is committed before the rename.
1213 if ((itx
->itx_lr
.lrc_txtype
& ~TX_CI
) == TX_RENAME
)
1214 zil_async_to_sync(zilog
, itx
->itx_oid
);
1216 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
)
1219 txg
= dmu_tx_get_txg(tx
);
1221 itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1222 mutex_enter(&itxg
->itxg_lock
);
1223 itxs
= itxg
->itxg_itxs
;
1224 if (itxg
->itxg_txg
!= txg
) {
1227 * The zil_clean callback hasn't got around to cleaning
1228 * this itxg. Save the itxs for release below.
1229 * This should be rare.
1231 atomic_add_64(&zilog
->zl_itx_list_sz
, -itxg
->itxg_sod
);
1233 clean
= itxg
->itxg_itxs
;
1235 ASSERT(itxg
->itxg_sod
== 0);
1236 itxg
->itxg_txg
= txg
;
1237 itxs
= itxg
->itxg_itxs
= kmem_zalloc(sizeof (itxs_t
), KM_SLEEP
);
1239 list_create(&itxs
->i_sync_list
, sizeof (itx_t
),
1240 offsetof(itx_t
, itx_node
));
1241 avl_create(&itxs
->i_async_tree
, zil_aitx_compare
,
1242 sizeof (itx_async_node_t
),
1243 offsetof(itx_async_node_t
, ia_node
));
1245 if (itx
->itx_sync
) {
1246 list_insert_tail(&itxs
->i_sync_list
, itx
);
1247 atomic_add_64(&zilog
->zl_itx_list_sz
, itx
->itx_sod
);
1248 itxg
->itxg_sod
+= itx
->itx_sod
;
1250 avl_tree_t
*t
= &itxs
->i_async_tree
;
1251 uint64_t foid
= ((lr_ooo_t
*)&itx
->itx_lr
)->lr_foid
;
1252 itx_async_node_t
*ian
;
1255 ian
= avl_find(t
, &foid
, &where
);
1257 ian
= kmem_alloc(sizeof (itx_async_node_t
), KM_SLEEP
);
1258 list_create(&ian
->ia_list
, sizeof (itx_t
),
1259 offsetof(itx_t
, itx_node
));
1260 ian
->ia_foid
= foid
;
1261 avl_insert(t
, ian
, where
);
1263 list_insert_tail(&ian
->ia_list
, itx
);
1266 itx
->itx_lr
.lrc_txg
= dmu_tx_get_txg(tx
);
1267 mutex_exit(&itxg
->itxg_lock
);
1269 /* Release the old itxs now we've dropped the lock */
1271 zil_itxg_clean(clean
);
1275 * If there are any in-memory intent log transactions which have now been
1276 * synced then start up a taskq to free them.
1279 zil_clean(zilog_t
*zilog
, uint64_t synced_txg
)
1281 itxg_t
*itxg
= &zilog
->zl_itxg
[synced_txg
& TXG_MASK
];
1284 mutex_enter(&itxg
->itxg_lock
);
1285 if (itxg
->itxg_itxs
== NULL
|| itxg
->itxg_txg
== ZILTEST_TXG
) {
1286 mutex_exit(&itxg
->itxg_lock
);
1289 ASSERT3U(itxg
->itxg_txg
, <=, synced_txg
);
1290 ASSERT(itxg
->itxg_txg
!= 0);
1291 ASSERT(zilog
->zl_clean_taskq
!= NULL
);
1292 atomic_add_64(&zilog
->zl_itx_list_sz
, -itxg
->itxg_sod
);
1294 clean_me
= itxg
->itxg_itxs
;
1295 itxg
->itxg_itxs
= NULL
;
1297 mutex_exit(&itxg
->itxg_lock
);
1299 * Preferably start a task queue to free up the old itxs but
1300 * if taskq_dispatch can't allocate resources to do that then
1301 * free it in-line. This should be rare. Note, using TQ_SLEEP
1302 * created a bad performance problem.
1304 if (taskq_dispatch(zilog
->zl_clean_taskq
,
1305 (void (*)(void *))zil_itxg_clean
, clean_me
, TQ_NOSLEEP
) == 0)
1306 zil_itxg_clean(clean_me
);
1310 * Get the list of itxs to commit into zl_itx_commit_list.
1313 zil_get_commit_list(zilog_t
*zilog
)
1316 list_t
*commit_list
= &zilog
->zl_itx_commit_list
;
1317 uint64_t push_sod
= 0;
1319 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1322 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1324 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1325 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1327 mutex_enter(&itxg
->itxg_lock
);
1328 if (itxg
->itxg_txg
!= txg
) {
1329 mutex_exit(&itxg
->itxg_lock
);
1333 list_move_tail(commit_list
, &itxg
->itxg_itxs
->i_sync_list
);
1334 push_sod
+= itxg
->itxg_sod
;
1337 mutex_exit(&itxg
->itxg_lock
);
1339 atomic_add_64(&zilog
->zl_itx_list_sz
, -push_sod
);
1343 * Move the async itxs for a specified object to commit into sync lists.
1346 zil_async_to_sync(zilog_t
*zilog
, uint64_t foid
)
1349 itx_async_node_t
*ian
;
1353 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1356 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1358 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1359 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1361 mutex_enter(&itxg
->itxg_lock
);
1362 if (itxg
->itxg_txg
!= txg
) {
1363 mutex_exit(&itxg
->itxg_lock
);
1368 * If a foid is specified then find that node and append its
1369 * list. Otherwise walk the tree appending all the lists
1370 * to the sync list. We add to the end rather than the
1371 * beginning to ensure the create has happened.
1373 t
= &itxg
->itxg_itxs
->i_async_tree
;
1375 ian
= avl_find(t
, &foid
, &where
);
1377 list_move_tail(&itxg
->itxg_itxs
->i_sync_list
,
1381 void *cookie
= NULL
;
1383 while ((ian
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
1384 list_move_tail(&itxg
->itxg_itxs
->i_sync_list
,
1386 list_destroy(&ian
->ia_list
);
1387 kmem_free(ian
, sizeof (itx_async_node_t
));
1390 mutex_exit(&itxg
->itxg_lock
);
1395 zil_commit_writer(zilog_t
*zilog
)
1400 spa_t
*spa
= zilog
->zl_spa
;
1403 ASSERT(zilog
->zl_root_zio
== NULL
);
1405 mutex_exit(&zilog
->zl_lock
);
1407 zil_get_commit_list(zilog
);
1410 * Return if there's nothing to commit before we dirty the fs by
1411 * calling zil_create().
1413 if (list_head(&zilog
->zl_itx_commit_list
) == NULL
) {
1414 mutex_enter(&zilog
->zl_lock
);
1418 if (zilog
->zl_suspend
) {
1421 lwb
= list_tail(&zilog
->zl_lwb_list
);
1423 lwb
= zil_create(zilog
);
1426 DTRACE_PROBE1(zil__cw1
, zilog_t
*, zilog
);
1427 while ((itx
= list_head(&zilog
->zl_itx_commit_list
))) {
1428 txg
= itx
->itx_lr
.lrc_txg
;
1431 if (txg
> spa_last_synced_txg(spa
) || txg
> spa_freeze_txg(spa
))
1432 lwb
= zil_lwb_commit(zilog
, itx
, lwb
);
1433 list_remove(&zilog
->zl_itx_commit_list
, itx
);
1434 kmem_free(itx
, offsetof(itx_t
, itx_lr
)
1435 + itx
->itx_lr
.lrc_reclen
);
1437 DTRACE_PROBE1(zil__cw2
, zilog_t
*, zilog
);
1439 /* write the last block out */
1440 if (lwb
!= NULL
&& lwb
->lwb_zio
!= NULL
)
1441 lwb
= zil_lwb_write_start(zilog
, lwb
);
1443 zilog
->zl_cur_used
= 0;
1446 * Wait if necessary for the log blocks to be on stable storage.
1448 if (zilog
->zl_root_zio
) {
1449 error
= zio_wait(zilog
->zl_root_zio
);
1450 zilog
->zl_root_zio
= NULL
;
1451 zil_flush_vdevs(zilog
);
1454 if (error
|| lwb
== NULL
)
1455 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1457 mutex_enter(&zilog
->zl_lock
);
1460 * Remember the highest committed log sequence number for ztest.
1461 * We only update this value when all the log writes succeeded,
1462 * because ztest wants to ASSERT that it got the whole log chain.
1464 if (error
== 0 && lwb
!= NULL
)
1465 zilog
->zl_commit_lr_seq
= zilog
->zl_lr_seq
;
1469 * Commit zfs transactions to stable storage.
1470 * If foid is 0 push out all transactions, otherwise push only those
1471 * for that object or might reference that object.
1473 * itxs are committed in batches. In a heavily stressed zil there will be
1474 * a commit writer thread who is writing out a bunch of itxs to the log
1475 * for a set of committing threads (cthreads) in the same batch as the writer.
1476 * Those cthreads are all waiting on the same cv for that batch.
1478 * There will also be a different and growing batch of threads that are
1479 * waiting to commit (qthreads). When the committing batch completes
1480 * a transition occurs such that the cthreads exit and the qthreads become
1481 * cthreads. One of the new cthreads becomes the writer thread for the
1482 * batch. Any new threads arriving become new qthreads.
1484 * Only 2 condition variables are needed and there's no transition
1485 * between the two cvs needed. They just flip-flop between qthreads
1488 * Using this scheme we can efficiently wakeup up only those threads
1489 * that have been committed.
1492 zil_commit(zilog_t
*zilog
, uint64_t foid
)
1496 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
)
1499 /* move the async itxs for the foid to the sync queues */
1500 zil_async_to_sync(zilog
, foid
);
1502 mutex_enter(&zilog
->zl_lock
);
1503 mybatch
= zilog
->zl_next_batch
;
1504 while (zilog
->zl_writer
) {
1505 cv_wait(&zilog
->zl_cv_batch
[mybatch
& 1], &zilog
->zl_lock
);
1506 if (mybatch
<= zilog
->zl_com_batch
) {
1507 mutex_exit(&zilog
->zl_lock
);
1512 zilog
->zl_next_batch
++;
1513 zilog
->zl_writer
= B_TRUE
;
1514 zil_commit_writer(zilog
);
1515 zilog
->zl_com_batch
= mybatch
;
1516 zilog
->zl_writer
= B_FALSE
;
1517 mutex_exit(&zilog
->zl_lock
);
1519 /* wake up one thread to become the next writer */
1520 cv_signal(&zilog
->zl_cv_batch
[(mybatch
+1) & 1]);
1522 /* wake up all threads waiting for this batch to be committed */
1523 cv_broadcast(&zilog
->zl_cv_batch
[mybatch
& 1]);
1527 * Called in syncing context to free committed log blocks and update log header.
1530 zil_sync(zilog_t
*zilog
, dmu_tx_t
*tx
)
1532 zil_header_t
*zh
= zil_header_in_syncing_context(zilog
);
1533 uint64_t txg
= dmu_tx_get_txg(tx
);
1534 spa_t
*spa
= zilog
->zl_spa
;
1535 uint64_t *replayed_seq
= &zilog
->zl_replayed_seq
[txg
& TXG_MASK
];
1539 * We don't zero out zl_destroy_txg, so make sure we don't try
1540 * to destroy it twice.
1542 if (spa_sync_pass(spa
) != 1)
1545 mutex_enter(&zilog
->zl_lock
);
1547 ASSERT(zilog
->zl_stop_sync
== 0);
1549 if (*replayed_seq
!= 0) {
1550 ASSERT(zh
->zh_replay_seq
< *replayed_seq
);
1551 zh
->zh_replay_seq
= *replayed_seq
;
1555 if (zilog
->zl_destroy_txg
== txg
) {
1556 blkptr_t blk
= zh
->zh_log
;
1558 ASSERT(list_head(&zilog
->zl_lwb_list
) == NULL
);
1560 bzero(zh
, sizeof (zil_header_t
));
1561 bzero(zilog
->zl_replayed_seq
, sizeof (zilog
->zl_replayed_seq
));
1563 if (zilog
->zl_keep_first
) {
1565 * If this block was part of log chain that couldn't
1566 * be claimed because a device was missing during
1567 * zil_claim(), but that device later returns,
1568 * then this block could erroneously appear valid.
1569 * To guard against this, assign a new GUID to the new
1570 * log chain so it doesn't matter what blk points to.
1572 zil_init_log_chain(zilog
, &blk
);
1577 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
1578 zh
->zh_log
= lwb
->lwb_blk
;
1579 if (lwb
->lwb_buf
!= NULL
|| lwb
->lwb_max_txg
> txg
)
1581 list_remove(&zilog
->zl_lwb_list
, lwb
);
1582 zio_free_zil(spa
, txg
, &lwb
->lwb_blk
);
1583 kmem_cache_free(zil_lwb_cache
, lwb
);
1586 * If we don't have anything left in the lwb list then
1587 * we've had an allocation failure and we need to zero
1588 * out the zil_header blkptr so that we don't end
1589 * up freeing the same block twice.
1591 if (list_head(&zilog
->zl_lwb_list
) == NULL
)
1592 BP_ZERO(&zh
->zh_log
);
1594 mutex_exit(&zilog
->zl_lock
);
1600 zil_lwb_cache
= kmem_cache_create("zil_lwb_cache",
1601 sizeof (struct lwb
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
1607 kmem_cache_destroy(zil_lwb_cache
);
1611 zil_set_sync(zilog_t
*zilog
, uint64_t sync
)
1613 zilog
->zl_sync
= sync
;
1617 zil_set_logbias(zilog_t
*zilog
, uint64_t logbias
)
1619 zilog
->zl_logbias
= logbias
;
1623 zil_alloc(objset_t
*os
, zil_header_t
*zh_phys
)
1628 zilog
= kmem_zalloc(sizeof (zilog_t
), KM_SLEEP
);
1630 zilog
->zl_header
= zh_phys
;
1632 zilog
->zl_spa
= dmu_objset_spa(os
);
1633 zilog
->zl_dmu_pool
= dmu_objset_pool(os
);
1634 zilog
->zl_destroy_txg
= TXG_INITIAL
- 1;
1635 zilog
->zl_logbias
= dmu_objset_logbias(os
);
1636 zilog
->zl_sync
= dmu_objset_syncprop(os
);
1637 zilog
->zl_next_batch
= 1;
1639 mutex_init(&zilog
->zl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1641 for (i
= 0; i
< TXG_SIZE
; i
++) {
1642 mutex_init(&zilog
->zl_itxg
[i
].itxg_lock
, NULL
,
1643 MUTEX_DEFAULT
, NULL
);
1646 list_create(&zilog
->zl_lwb_list
, sizeof (lwb_t
),
1647 offsetof(lwb_t
, lwb_node
));
1649 list_create(&zilog
->zl_itx_commit_list
, sizeof (itx_t
),
1650 offsetof(itx_t
, itx_node
));
1652 mutex_init(&zilog
->zl_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1654 avl_create(&zilog
->zl_vdev_tree
, zil_vdev_compare
,
1655 sizeof (zil_vdev_node_t
), offsetof(zil_vdev_node_t
, zv_node
));
1657 cv_init(&zilog
->zl_cv_writer
, NULL
, CV_DEFAULT
, NULL
);
1658 cv_init(&zilog
->zl_cv_suspend
, NULL
, CV_DEFAULT
, NULL
);
1659 cv_init(&zilog
->zl_cv_batch
[0], NULL
, CV_DEFAULT
, NULL
);
1660 cv_init(&zilog
->zl_cv_batch
[1], NULL
, CV_DEFAULT
, NULL
);
1666 zil_free(zilog_t
*zilog
)
1671 zilog
->zl_stop_sync
= 1;
1674 * After zil_close() there should only be one lwb with a buffer.
1676 head_lwb
= list_head(&zilog
->zl_lwb_list
);
1678 ASSERT(head_lwb
== list_tail(&zilog
->zl_lwb_list
));
1679 list_remove(&zilog
->zl_lwb_list
, head_lwb
);
1680 zio_buf_free(head_lwb
->lwb_buf
, head_lwb
->lwb_sz
);
1681 kmem_cache_free(zil_lwb_cache
, head_lwb
);
1683 list_destroy(&zilog
->zl_lwb_list
);
1685 avl_destroy(&zilog
->zl_vdev_tree
);
1686 mutex_destroy(&zilog
->zl_vdev_lock
);
1688 ASSERT(list_is_empty(&zilog
->zl_itx_commit_list
));
1689 list_destroy(&zilog
->zl_itx_commit_list
);
1691 for (i
= 0; i
< TXG_SIZE
; i
++) {
1693 * It's possible for an itx to be generated that doesn't dirty
1694 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1695 * callback to remove the entry. We remove those here.
1697 * Also free up the ziltest itxs.
1699 if (zilog
->zl_itxg
[i
].itxg_itxs
)
1700 zil_itxg_clean(zilog
->zl_itxg
[i
].itxg_itxs
);
1701 mutex_destroy(&zilog
->zl_itxg
[i
].itxg_lock
);
1704 mutex_destroy(&zilog
->zl_lock
);
1706 cv_destroy(&zilog
->zl_cv_writer
);
1707 cv_destroy(&zilog
->zl_cv_suspend
);
1708 cv_destroy(&zilog
->zl_cv_batch
[0]);
1709 cv_destroy(&zilog
->zl_cv_batch
[1]);
1711 kmem_free(zilog
, sizeof (zilog_t
));
1715 * Open an intent log.
1718 zil_open(objset_t
*os
, zil_get_data_t
*get_data
)
1720 zilog_t
*zilog
= dmu_objset_zil(os
);
1722 zilog
->zl_get_data
= get_data
;
1723 zilog
->zl_clean_taskq
= taskq_create("zil_clean", 1, minclsyspri
,
1724 2, 2, TASKQ_PREPOPULATE
);
1730 * Close an intent log.
1733 zil_close(zilog_t
*zilog
)
1738 zil_commit(zilog
, 0); /* commit all itx */
1741 * The lwb_max_txg for the stubby lwb will reflect the last activity
1742 * for the zil. After a txg_wait_synced() on the txg we know all the
1743 * callbacks have occurred that may clean the zil. Only then can we
1744 * destroy the zl_clean_taskq.
1746 mutex_enter(&zilog
->zl_lock
);
1747 tail_lwb
= list_tail(&zilog
->zl_lwb_list
);
1748 if (tail_lwb
!= NULL
)
1749 txg
= tail_lwb
->lwb_max_txg
;
1750 mutex_exit(&zilog
->zl_lock
);
1752 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1754 taskq_destroy(zilog
->zl_clean_taskq
);
1755 zilog
->zl_clean_taskq
= NULL
;
1756 zilog
->zl_get_data
= NULL
;
1760 * Suspend an intent log. While in suspended mode, we still honor
1761 * synchronous semantics, but we rely on txg_wait_synced() to do it.
1762 * We suspend the log briefly when taking a snapshot so that the snapshot
1763 * contains all the data it's supposed to, and has an empty intent log.
1766 zil_suspend(zilog_t
*zilog
)
1768 const zil_header_t
*zh
= zilog
->zl_header
;
1770 mutex_enter(&zilog
->zl_lock
);
1771 if (zh
->zh_flags
& ZIL_REPLAY_NEEDED
) { /* unplayed log */
1772 mutex_exit(&zilog
->zl_lock
);
1775 if (zilog
->zl_suspend
++ != 0) {
1777 * Someone else already began a suspend.
1778 * Just wait for them to finish.
1780 while (zilog
->zl_suspending
)
1781 cv_wait(&zilog
->zl_cv_suspend
, &zilog
->zl_lock
);
1782 mutex_exit(&zilog
->zl_lock
);
1785 zilog
->zl_suspending
= B_TRUE
;
1786 mutex_exit(&zilog
->zl_lock
);
1788 zil_commit(zilog
, 0);
1790 zil_destroy(zilog
, B_FALSE
);
1792 mutex_enter(&zilog
->zl_lock
);
1793 zilog
->zl_suspending
= B_FALSE
;
1794 cv_broadcast(&zilog
->zl_cv_suspend
);
1795 mutex_exit(&zilog
->zl_lock
);
1801 zil_resume(zilog_t
*zilog
)
1803 mutex_enter(&zilog
->zl_lock
);
1804 ASSERT(zilog
->zl_suspend
!= 0);
1805 zilog
->zl_suspend
--;
1806 mutex_exit(&zilog
->zl_lock
);
1809 typedef struct zil_replay_arg
{
1810 zil_replay_func_t
**zr_replay
;
1812 boolean_t zr_byteswap
;
1817 zil_replay_error(zilog_t
*zilog
, lr_t
*lr
, int error
)
1819 char name
[MAXNAMELEN
];
1821 zilog
->zl_replaying_seq
--; /* didn't actually replay this one */
1823 dmu_objset_name(zilog
->zl_os
, name
);
1825 cmn_err(CE_WARN
, "ZFS replay transaction error %d, "
1826 "dataset %s, seq 0x%llx, txtype %llu %s\n", error
, name
,
1827 (u_longlong_t
)lr
->lrc_seq
,
1828 (u_longlong_t
)(lr
->lrc_txtype
& ~TX_CI
),
1829 (lr
->lrc_txtype
& TX_CI
) ? "CI" : "");
1835 zil_replay_log_record(zilog_t
*zilog
, lr_t
*lr
, void *zra
, uint64_t claim_txg
)
1837 zil_replay_arg_t
*zr
= zra
;
1838 const zil_header_t
*zh
= zilog
->zl_header
;
1839 uint64_t reclen
= lr
->lrc_reclen
;
1840 uint64_t txtype
= lr
->lrc_txtype
;
1843 zilog
->zl_replaying_seq
= lr
->lrc_seq
;
1845 if (lr
->lrc_seq
<= zh
->zh_replay_seq
) /* already replayed */
1848 if (lr
->lrc_txg
< claim_txg
) /* already committed */
1851 /* Strip case-insensitive bit, still present in log record */
1854 if (txtype
== 0 || txtype
>= TX_MAX_TYPE
)
1855 return (zil_replay_error(zilog
, lr
, EINVAL
));
1858 * If this record type can be logged out of order, the object
1859 * (lr_foid) may no longer exist. That's legitimate, not an error.
1861 if (TX_OOO(txtype
)) {
1862 error
= dmu_object_info(zilog
->zl_os
,
1863 ((lr_ooo_t
*)lr
)->lr_foid
, NULL
);
1864 if (error
== ENOENT
|| error
== EEXIST
)
1869 * Make a copy of the data so we can revise and extend it.
1871 bcopy(lr
, zr
->zr_lr
, reclen
);
1874 * If this is a TX_WRITE with a blkptr, suck in the data.
1876 if (txtype
== TX_WRITE
&& reclen
== sizeof (lr_write_t
)) {
1877 error
= zil_read_log_data(zilog
, (lr_write_t
*)lr
,
1878 zr
->zr_lr
+ reclen
);
1880 return (zil_replay_error(zilog
, lr
, error
));
1884 * The log block containing this lr may have been byteswapped
1885 * so that we can easily examine common fields like lrc_txtype.
1886 * However, the log is a mix of different record types, and only the
1887 * replay vectors know how to byteswap their records. Therefore, if
1888 * the lr was byteswapped, undo it before invoking the replay vector.
1890 if (zr
->zr_byteswap
)
1891 byteswap_uint64_array(zr
->zr_lr
, reclen
);
1894 * We must now do two things atomically: replay this log record,
1895 * and update the log header sequence number to reflect the fact that
1896 * we did so. At the end of each replay function the sequence number
1897 * is updated if we are in replay mode.
1899 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, zr
->zr_byteswap
);
1902 * The DMU's dnode layer doesn't see removes until the txg
1903 * commits, so a subsequent claim can spuriously fail with
1904 * EEXIST. So if we receive any error we try syncing out
1905 * any removes then retry the transaction. Note that we
1906 * specify B_FALSE for byteswap now, so we don't do it twice.
1908 txg_wait_synced(spa_get_dsl(zilog
->zl_spa
), 0);
1909 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, B_FALSE
);
1911 return (zil_replay_error(zilog
, lr
, error
));
1918 zil_incr_blks(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
1920 zilog
->zl_replay_blks
++;
1926 * If this dataset has a non-empty intent log, replay it and destroy it.
1929 zil_replay(objset_t
*os
, void *arg
, zil_replay_func_t
*replay_func
[TX_MAX_TYPE
])
1931 zilog_t
*zilog
= dmu_objset_zil(os
);
1932 const zil_header_t
*zh
= zilog
->zl_header
;
1933 zil_replay_arg_t zr
;
1935 if ((zh
->zh_flags
& ZIL_REPLAY_NEEDED
) == 0) {
1936 zil_destroy(zilog
, B_TRUE
);
1940 zr
.zr_replay
= replay_func
;
1942 zr
.zr_byteswap
= BP_SHOULD_BYTESWAP(&zh
->zh_log
);
1943 zr
.zr_lr
= vmem_alloc(2 * SPA_MAXBLOCKSIZE
, KM_SLEEP
);
1946 * Wait for in-progress removes to sync before starting replay.
1948 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1950 zilog
->zl_replay
= B_TRUE
;
1951 zilog
->zl_replay_time
= ddi_get_lbolt();
1952 ASSERT(zilog
->zl_replay_blks
== 0);
1953 (void) zil_parse(zilog
, zil_incr_blks
, zil_replay_log_record
, &zr
,
1955 vmem_free(zr
.zr_lr
, 2 * SPA_MAXBLOCKSIZE
);
1957 zil_destroy(zilog
, B_FALSE
);
1958 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
1959 zilog
->zl_replay
= B_FALSE
;
1963 zil_replaying(zilog_t
*zilog
, dmu_tx_t
*tx
)
1965 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
)
1968 if (zilog
->zl_replay
) {
1969 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
1970 zilog
->zl_replayed_seq
[dmu_tx_get_txg(tx
) & TXG_MASK
] =
1971 zilog
->zl_replaying_seq
;
1980 zil_vdev_offline(const char *osname
, void *arg
)
1986 error
= dmu_objset_hold(osname
, FTAG
, &os
);
1990 zilog
= dmu_objset_zil(os
);
1991 if (zil_suspend(zilog
) != 0)
1995 dmu_objset_rele(os
, FTAG
);
1999 #if defined(_KERNEL) && defined(HAVE_SPL)
2000 module_param(zil_replay_disable
, int, 0644);
2001 MODULE_PARM_DESC(zil_replay_disable
, "Disable intent logging replay");
2003 module_param(zfs_nocacheflush
, int, 0644);
2004 MODULE_PARM_DESC(zfs_nocacheflush
, "Disable cache flushes");