4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2010 Robert Milkowski */
27 #include <sys/zfs_context.h>
33 #include <sys/resource.h>
35 #include <sys/zil_impl.h>
36 #include <sys/dsl_dataset.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/dsl_pool.h>
42 * The zfs intent log (ZIL) saves transaction records of system calls
43 * that change the file system in memory with enough information
44 * to be able to replay them. These are stored in memory until
45 * either the DMU transaction group (txg) commits them to the stable pool
46 * and they can be discarded, or they are flushed to the stable log
47 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
48 * requirement. In the event of a panic or power fail then those log
49 * records (transactions) are replayed.
51 * There is one ZIL per file system. Its on-disk (pool) format consists
58 * A log record holds a system call transaction. Log blocks can
59 * hold many log records and the blocks are chained together.
60 * Each ZIL block contains a block pointer (blkptr_t) to the next
61 * ZIL block in the chain. The ZIL header points to the first
62 * block in the chain. Note there is not a fixed place in the pool
63 * to hold blocks. They are dynamically allocated and freed as
64 * needed from the blocks available. Figure X shows the ZIL structure:
68 * This global ZIL switch affects all pools
70 int zil_replay_disable
= 0; /* disable intent logging replay */
73 * Tunable parameter for debugging or performance analysis. Setting
74 * zfs_nocacheflush will cause corruption on power loss if a volatile
75 * out-of-order write cache is enabled.
77 boolean_t zfs_nocacheflush
= B_FALSE
;
79 static kmem_cache_t
*zil_lwb_cache
;
81 static boolean_t
zil_empty(zilog_t
*zilog
);
83 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
84 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
88 zil_bp_compare(const void *x1
, const void *x2
)
90 const dva_t
*dva1
= &((zil_bp_node_t
*)x1
)->zn_dva
;
91 const dva_t
*dva2
= &((zil_bp_node_t
*)x2
)->zn_dva
;
93 if (DVA_GET_VDEV(dva1
) < DVA_GET_VDEV(dva2
))
95 if (DVA_GET_VDEV(dva1
) > DVA_GET_VDEV(dva2
))
98 if (DVA_GET_OFFSET(dva1
) < DVA_GET_OFFSET(dva2
))
100 if (DVA_GET_OFFSET(dva1
) > DVA_GET_OFFSET(dva2
))
107 zil_bp_tree_init(zilog_t
*zilog
)
109 avl_create(&zilog
->zl_bp_tree
, zil_bp_compare
,
110 sizeof (zil_bp_node_t
), offsetof(zil_bp_node_t
, zn_node
));
114 zil_bp_tree_fini(zilog_t
*zilog
)
116 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
120 while ((zn
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
121 kmem_free(zn
, sizeof (zil_bp_node_t
));
127 zil_bp_tree_add(zilog_t
*zilog
, const blkptr_t
*bp
)
129 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
130 const dva_t
*dva
= BP_IDENTITY(bp
);
134 if (avl_find(t
, dva
, &where
) != NULL
)
137 zn
= kmem_alloc(sizeof (zil_bp_node_t
), KM_SLEEP
);
139 avl_insert(t
, zn
, where
);
144 static zil_header_t
*
145 zil_header_in_syncing_context(zilog_t
*zilog
)
147 return ((zil_header_t
*)zilog
->zl_header
);
151 zil_init_log_chain(zilog_t
*zilog
, blkptr_t
*bp
)
153 zio_cksum_t
*zc
= &bp
->blk_cksum
;
155 zc
->zc_word
[ZIL_ZC_GUID_0
] = spa_get_random(-1ULL);
156 zc
->zc_word
[ZIL_ZC_GUID_1
] = spa_get_random(-1ULL);
157 zc
->zc_word
[ZIL_ZC_OBJSET
] = dmu_objset_id(zilog
->zl_os
);
158 zc
->zc_word
[ZIL_ZC_SEQ
] = 1ULL;
162 * Read a log block and make sure it's valid.
165 zil_read_log_block(zilog_t
*zilog
, const blkptr_t
*bp
, blkptr_t
*nbp
, void *dst
,
168 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
169 uint32_t aflags
= ARC_WAIT
;
170 arc_buf_t
*abuf
= NULL
;
174 if (zilog
->zl_header
->zh_claim_txg
== 0)
175 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
177 if (!(zilog
->zl_header
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
178 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
180 SET_BOOKMARK(&zb
, bp
->blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
181 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
, bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
183 error
= dsl_read_nolock(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
, &abuf
,
184 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
187 zio_cksum_t cksum
= bp
->blk_cksum
;
190 * Validate the checksummed log block.
192 * Sequence numbers should be... sequential. The checksum
193 * verifier for the next block should be bp's checksum plus 1.
195 * Also check the log chain linkage and size used.
197 cksum
.zc_word
[ZIL_ZC_SEQ
]++;
199 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
200 zil_chain_t
*zilc
= abuf
->b_data
;
201 char *lr
= (char *)(zilc
+ 1);
202 uint64_t len
= zilc
->zc_nused
- sizeof (zil_chain_t
);
204 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
205 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
)) {
209 *end
= (char *)dst
+ len
;
210 *nbp
= zilc
->zc_next_blk
;
213 char *lr
= abuf
->b_data
;
214 uint64_t size
= BP_GET_LSIZE(bp
);
215 zil_chain_t
*zilc
= (zil_chain_t
*)(lr
+ size
) - 1;
217 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
218 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
) ||
219 (zilc
->zc_nused
> (size
- sizeof (*zilc
)))) {
222 bcopy(lr
, dst
, zilc
->zc_nused
);
223 *end
= (char *)dst
+ zilc
->zc_nused
;
224 *nbp
= zilc
->zc_next_blk
;
228 VERIFY(arc_buf_remove_ref(abuf
, &abuf
) == 1);
235 * Read a TX_WRITE log data block.
238 zil_read_log_data(zilog_t
*zilog
, const lr_write_t
*lr
, void *wbuf
)
240 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
241 const blkptr_t
*bp
= &lr
->lr_blkptr
;
242 uint32_t aflags
= ARC_WAIT
;
243 arc_buf_t
*abuf
= NULL
;
247 if (BP_IS_HOLE(bp
)) {
249 bzero(wbuf
, MAX(BP_GET_LSIZE(bp
), lr
->lr_length
));
253 if (zilog
->zl_header
->zh_claim_txg
== 0)
254 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
256 SET_BOOKMARK(&zb
, dmu_objset_id(zilog
->zl_os
), lr
->lr_foid
,
257 ZB_ZIL_LEVEL
, lr
->lr_offset
/ BP_GET_LSIZE(bp
));
259 error
= arc_read_nolock(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
, &abuf
,
260 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
264 bcopy(abuf
->b_data
, wbuf
, arc_buf_size(abuf
));
265 (void) arc_buf_remove_ref(abuf
, &abuf
);
272 * Parse the intent log, and call parse_func for each valid record within.
275 zil_parse(zilog_t
*zilog
, zil_parse_blk_func_t
*parse_blk_func
,
276 zil_parse_lr_func_t
*parse_lr_func
, void *arg
, uint64_t txg
)
278 const zil_header_t
*zh
= zilog
->zl_header
;
279 boolean_t claimed
= !!zh
->zh_claim_txg
;
280 uint64_t claim_blk_seq
= claimed
? zh
->zh_claim_blk_seq
: UINT64_MAX
;
281 uint64_t claim_lr_seq
= claimed
? zh
->zh_claim_lr_seq
: UINT64_MAX
;
282 uint64_t max_blk_seq
= 0;
283 uint64_t max_lr_seq
= 0;
284 uint64_t blk_count
= 0;
285 uint64_t lr_count
= 0;
286 blkptr_t blk
, next_blk
;
291 * Old logs didn't record the maximum zh_claim_lr_seq.
293 if (!(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
294 claim_lr_seq
= UINT64_MAX
;
297 * Starting at the block pointed to by zh_log we read the log chain.
298 * For each block in the chain we strongly check that block to
299 * ensure its validity. We stop when an invalid block is found.
300 * For each block pointer in the chain we call parse_blk_func().
301 * For each record in each valid block we call parse_lr_func().
302 * If the log has been claimed, stop if we encounter a sequence
303 * number greater than the highest claimed sequence number.
305 lrbuf
= zio_buf_alloc(SPA_MAXBLOCKSIZE
);
306 zil_bp_tree_init(zilog
);
308 for (blk
= zh
->zh_log
; !BP_IS_HOLE(&blk
); blk
= next_blk
) {
309 uint64_t blk_seq
= blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
313 if (blk_seq
> claim_blk_seq
)
315 if ((error
= parse_blk_func(zilog
, &blk
, arg
, txg
)) != 0)
317 ASSERT3U(max_blk_seq
, <, blk_seq
);
318 max_blk_seq
= blk_seq
;
321 if (max_lr_seq
== claim_lr_seq
&& max_blk_seq
== claim_blk_seq
)
324 error
= zil_read_log_block(zilog
, &blk
, &next_blk
, lrbuf
, &end
);
328 for (lrp
= lrbuf
; lrp
< end
; lrp
+= reclen
) {
329 lr_t
*lr
= (lr_t
*)lrp
;
330 reclen
= lr
->lrc_reclen
;
331 ASSERT3U(reclen
, >=, sizeof (lr_t
));
332 if (lr
->lrc_seq
> claim_lr_seq
)
334 if ((error
= parse_lr_func(zilog
, lr
, arg
, txg
)) != 0)
336 ASSERT3U(max_lr_seq
, <, lr
->lrc_seq
);
337 max_lr_seq
= lr
->lrc_seq
;
342 zilog
->zl_parse_error
= error
;
343 zilog
->zl_parse_blk_seq
= max_blk_seq
;
344 zilog
->zl_parse_lr_seq
= max_lr_seq
;
345 zilog
->zl_parse_blk_count
= blk_count
;
346 zilog
->zl_parse_lr_count
= lr_count
;
348 ASSERT(!claimed
|| !(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
) ||
349 (max_blk_seq
== claim_blk_seq
&& max_lr_seq
== claim_lr_seq
));
351 zil_bp_tree_fini(zilog
);
352 zio_buf_free(lrbuf
, SPA_MAXBLOCKSIZE
);
358 zil_claim_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t first_txg
)
361 * Claim log block if not already committed and not already claimed.
362 * If tx == NULL, just verify that the block is claimable.
364 if (bp
->blk_birth
< first_txg
|| zil_bp_tree_add(zilog
, bp
) != 0)
367 return (zio_wait(zio_claim(NULL
, zilog
->zl_spa
,
368 tx
== NULL
? 0 : first_txg
, bp
, spa_claim_notify
, NULL
,
369 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
)));
373 zil_claim_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t first_txg
)
375 lr_write_t
*lr
= (lr_write_t
*)lrc
;
378 if (lrc
->lrc_txtype
!= TX_WRITE
)
382 * If the block is not readable, don't claim it. This can happen
383 * in normal operation when a log block is written to disk before
384 * some of the dmu_sync() blocks it points to. In this case, the
385 * transaction cannot have been committed to anyone (we would have
386 * waited for all writes to be stable first), so it is semantically
387 * correct to declare this the end of the log.
389 if (lr
->lr_blkptr
.blk_birth
>= first_txg
&&
390 (error
= zil_read_log_data(zilog
, lr
, NULL
)) != 0)
392 return (zil_claim_log_block(zilog
, &lr
->lr_blkptr
, tx
, first_txg
));
397 zil_free_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t claim_txg
)
399 zio_free_zil(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
405 zil_free_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t claim_txg
)
407 lr_write_t
*lr
= (lr_write_t
*)lrc
;
408 blkptr_t
*bp
= &lr
->lr_blkptr
;
411 * If we previously claimed it, we need to free it.
413 if (claim_txg
!= 0 && lrc
->lrc_txtype
== TX_WRITE
&&
414 bp
->blk_birth
>= claim_txg
&& zil_bp_tree_add(zilog
, bp
) == 0)
415 zio_free(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
421 zil_alloc_lwb(zilog_t
*zilog
, blkptr_t
*bp
, uint64_t txg
)
425 lwb
= kmem_cache_alloc(zil_lwb_cache
, KM_SLEEP
);
426 lwb
->lwb_zilog
= zilog
;
428 lwb
->lwb_buf
= zio_buf_alloc(BP_GET_LSIZE(bp
));
429 lwb
->lwb_max_txg
= txg
;
432 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
433 lwb
->lwb_nused
= sizeof (zil_chain_t
);
434 lwb
->lwb_sz
= BP_GET_LSIZE(bp
);
437 lwb
->lwb_sz
= BP_GET_LSIZE(bp
) - sizeof (zil_chain_t
);
440 mutex_enter(&zilog
->zl_lock
);
441 list_insert_tail(&zilog
->zl_lwb_list
, lwb
);
442 mutex_exit(&zilog
->zl_lock
);
448 * Create an on-disk intent log.
451 zil_create(zilog_t
*zilog
)
453 const zil_header_t
*zh
= zilog
->zl_header
;
461 * Wait for any previous destroy to complete.
463 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
465 ASSERT(zh
->zh_claim_txg
== 0);
466 ASSERT(zh
->zh_replay_seq
== 0);
471 * Allocate an initial log block if:
472 * - there isn't one already
473 * - the existing block is the wrong endianess
475 if (BP_IS_HOLE(&blk
) || BP_SHOULD_BYTESWAP(&blk
)) {
476 tx
= dmu_tx_create(zilog
->zl_os
);
477 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
478 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
479 txg
= dmu_tx_get_txg(tx
);
481 if (!BP_IS_HOLE(&blk
)) {
482 zio_free_zil(zilog
->zl_spa
, txg
, &blk
);
486 error
= zio_alloc_zil(zilog
->zl_spa
, txg
, &blk
, NULL
,
487 ZIL_MIN_BLKSZ
, zilog
->zl_logbias
== ZFS_LOGBIAS_LATENCY
);
490 zil_init_log_chain(zilog
, &blk
);
494 * Allocate a log write buffer (lwb) for the first log block.
497 lwb
= zil_alloc_lwb(zilog
, &blk
, txg
);
500 * If we just allocated the first log block, commit our transaction
501 * and wait for zil_sync() to stuff the block poiner into zh_log.
502 * (zh is part of the MOS, so we cannot modify it in open context.)
506 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
509 ASSERT(bcmp(&blk
, &zh
->zh_log
, sizeof (blk
)) == 0);
515 * In one tx, free all log blocks and clear the log header.
516 * If keep_first is set, then we're replaying a log with no content.
517 * We want to keep the first block, however, so that the first
518 * synchronous transaction doesn't require a txg_wait_synced()
519 * in zil_create(). We don't need to txg_wait_synced() here either
520 * when keep_first is set, because both zil_create() and zil_destroy()
521 * will wait for any in-progress destroys to complete.
524 zil_destroy(zilog_t
*zilog
, boolean_t keep_first
)
526 const zil_header_t
*zh
= zilog
->zl_header
;
532 * Wait for any previous destroy to complete.
534 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
536 zilog
->zl_old_header
= *zh
; /* debugging aid */
538 if (BP_IS_HOLE(&zh
->zh_log
))
541 tx
= dmu_tx_create(zilog
->zl_os
);
542 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
543 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
544 txg
= dmu_tx_get_txg(tx
);
546 mutex_enter(&zilog
->zl_lock
);
548 ASSERT3U(zilog
->zl_destroy_txg
, <, txg
);
549 zilog
->zl_destroy_txg
= txg
;
550 zilog
->zl_keep_first
= keep_first
;
552 if (!list_is_empty(&zilog
->zl_lwb_list
)) {
553 ASSERT(zh
->zh_claim_txg
== 0);
555 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
556 list_remove(&zilog
->zl_lwb_list
, lwb
);
557 if (lwb
->lwb_buf
!= NULL
)
558 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
559 zio_free_zil(zilog
->zl_spa
, txg
, &lwb
->lwb_blk
);
560 kmem_cache_free(zil_lwb_cache
, lwb
);
562 } else if (!keep_first
) {
563 (void) zil_parse(zilog
, zil_free_log_block
,
564 zil_free_log_record
, tx
, zh
->zh_claim_txg
);
566 mutex_exit(&zilog
->zl_lock
);
572 zil_claim(const char *osname
, void *txarg
)
574 dmu_tx_t
*tx
= txarg
;
575 uint64_t first_txg
= dmu_tx_get_txg(tx
);
581 error
= dmu_objset_hold(osname
, FTAG
, &os
);
583 cmn_err(CE_WARN
, "can't open objset for %s", osname
);
587 zilog
= dmu_objset_zil(os
);
588 zh
= zil_header_in_syncing_context(zilog
);
590 if (spa_get_log_state(zilog
->zl_spa
) == SPA_LOG_CLEAR
) {
591 if (!BP_IS_HOLE(&zh
->zh_log
))
592 zio_free_zil(zilog
->zl_spa
, first_txg
, &zh
->zh_log
);
593 BP_ZERO(&zh
->zh_log
);
594 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
595 dmu_objset_rele(os
, FTAG
);
600 * Claim all log blocks if we haven't already done so, and remember
601 * the highest claimed sequence number. This ensures that if we can
602 * read only part of the log now (e.g. due to a missing device),
603 * but we can read the entire log later, we will not try to replay
604 * or destroy beyond the last block we successfully claimed.
606 ASSERT3U(zh
->zh_claim_txg
, <=, first_txg
);
607 if (zh
->zh_claim_txg
== 0 && !BP_IS_HOLE(&zh
->zh_log
)) {
608 (void) zil_parse(zilog
, zil_claim_log_block
,
609 zil_claim_log_record
, tx
, first_txg
);
610 zh
->zh_claim_txg
= first_txg
;
611 zh
->zh_claim_blk_seq
= zilog
->zl_parse_blk_seq
;
612 zh
->zh_claim_lr_seq
= zilog
->zl_parse_lr_seq
;
613 if (zilog
->zl_parse_lr_count
|| zilog
->zl_parse_blk_count
> 1)
614 zh
->zh_flags
|= ZIL_REPLAY_NEEDED
;
615 zh
->zh_flags
|= ZIL_CLAIM_LR_SEQ_VALID
;
616 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
619 ASSERT3U(first_txg
, ==, (spa_last_synced_txg(zilog
->zl_spa
) + 1));
620 dmu_objset_rele(os
, FTAG
);
625 * Check the log by walking the log chain.
626 * Checksum errors are ok as they indicate the end of the chain.
627 * Any other error (no device or read failure) returns an error.
630 zil_check_log_chain(const char *osname
, void *tx
)
638 error
= dmu_objset_hold(osname
, FTAG
, &os
);
640 cmn_err(CE_WARN
, "can't open objset for %s", osname
);
644 zilog
= dmu_objset_zil(os
);
647 * Because tx == NULL, zil_claim_log_block() will not actually claim
648 * any blocks, but just determine whether it is possible to do so.
649 * In addition to checking the log chain, zil_claim_log_block()
650 * will invoke zio_claim() with a done func of spa_claim_notify(),
651 * which will update spa_max_claim_txg. See spa_load() for details.
653 error
= zil_parse(zilog
, zil_claim_log_block
, zil_claim_log_record
, tx
,
654 zilog
->zl_header
->zh_claim_txg
? -1ULL : spa_first_txg(os
->os_spa
));
656 dmu_objset_rele(os
, FTAG
);
658 return ((error
== ECKSUM
|| error
== ENOENT
) ? 0 : error
);
662 zil_vdev_compare(const void *x1
, const void *x2
)
664 uint64_t v1
= ((zil_vdev_node_t
*)x1
)->zv_vdev
;
665 uint64_t v2
= ((zil_vdev_node_t
*)x2
)->zv_vdev
;
676 zil_add_block(zilog_t
*zilog
, const blkptr_t
*bp
)
678 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
680 zil_vdev_node_t
*zv
, zvsearch
;
681 int ndvas
= BP_GET_NDVAS(bp
);
684 if (zfs_nocacheflush
)
687 ASSERT(zilog
->zl_writer
);
690 * Even though we're zl_writer, we still need a lock because the
691 * zl_get_data() callbacks may have dmu_sync() done callbacks
692 * that will run concurrently.
694 mutex_enter(&zilog
->zl_vdev_lock
);
695 for (i
= 0; i
< ndvas
; i
++) {
696 zvsearch
.zv_vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
697 if (avl_find(t
, &zvsearch
, &where
) == NULL
) {
698 zv
= kmem_alloc(sizeof (*zv
), KM_SLEEP
);
699 zv
->zv_vdev
= zvsearch
.zv_vdev
;
700 avl_insert(t
, zv
, where
);
703 mutex_exit(&zilog
->zl_vdev_lock
);
707 zil_flush_vdevs(zilog_t
*zilog
)
709 spa_t
*spa
= zilog
->zl_spa
;
710 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
715 ASSERT(zilog
->zl_writer
);
718 * We don't need zl_vdev_lock here because we're the zl_writer,
719 * and all zl_get_data() callbacks are done.
721 if (avl_numnodes(t
) == 0)
724 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
726 zio
= zio_root(spa
, NULL
, NULL
, ZIO_FLAG_CANFAIL
);
728 while ((zv
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
729 vdev_t
*vd
= vdev_lookup_top(spa
, zv
->zv_vdev
);
732 kmem_free(zv
, sizeof (*zv
));
736 * Wait for all the flushes to complete. Not all devices actually
737 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
739 (void) zio_wait(zio
);
741 spa_config_exit(spa
, SCL_STATE
, FTAG
);
745 * Function called when a log block write completes
748 zil_lwb_write_done(zio_t
*zio
)
750 lwb_t
*lwb
= zio
->io_private
;
751 zilog_t
*zilog
= lwb
->lwb_zilog
;
752 dmu_tx_t
*tx
= lwb
->lwb_tx
;
754 ASSERT(BP_GET_COMPRESS(zio
->io_bp
) == ZIO_COMPRESS_OFF
);
755 ASSERT(BP_GET_TYPE(zio
->io_bp
) == DMU_OT_INTENT_LOG
);
756 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
757 ASSERT(BP_GET_BYTEORDER(zio
->io_bp
) == ZFS_HOST_BYTEORDER
);
758 ASSERT(!BP_IS_GANG(zio
->io_bp
));
759 ASSERT(!BP_IS_HOLE(zio
->io_bp
));
760 ASSERT(zio
->io_bp
->blk_fill
== 0);
763 * Ensure the lwb buffer pointer is cleared before releasing
764 * the txg. If we have had an allocation failure and
765 * the txg is waiting to sync then we want want zil_sync()
766 * to remove the lwb so that it's not picked up as the next new
767 * one in zil_commit_writer(). zil_sync() will only remove
768 * the lwb if lwb_buf is null.
770 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
771 mutex_enter(&zilog
->zl_lock
);
774 mutex_exit(&zilog
->zl_lock
);
777 * Now that we've written this log block, we have a stable pointer
778 * to the next block in the chain, so it's OK to let the txg in
779 * which we allocated the next block sync.
785 * Initialize the io for a log block.
788 zil_lwb_write_init(zilog_t
*zilog
, lwb_t
*lwb
)
792 SET_BOOKMARK(&zb
, lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
793 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
,
794 lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
796 if (zilog
->zl_root_zio
== NULL
) {
797 zilog
->zl_root_zio
= zio_root(zilog
->zl_spa
, NULL
, NULL
,
800 if (lwb
->lwb_zio
== NULL
) {
801 lwb
->lwb_zio
= zio_rewrite(zilog
->zl_root_zio
, zilog
->zl_spa
,
802 0, &lwb
->lwb_blk
, lwb
->lwb_buf
, BP_GET_LSIZE(&lwb
->lwb_blk
),
803 zil_lwb_write_done
, lwb
, ZIO_PRIORITY_LOG_WRITE
,
804 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
, &zb
);
809 * Define a limited set of intent log block sizes.
810 * These must be a multiple of 4KB. Note only the amount used (again
811 * aligned to 4KB) actually gets written. However, we can't always just
812 * allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted.
814 uint64_t zil_block_buckets
[] = {
815 4096, /* non TX_WRITE */
816 8192+4096, /* data base */
817 32*1024 + 4096, /* NFS writes */
822 * Use the slog as long as the logbias is 'latency' and the current commit size
823 * is less than the limit or the total list size is less than 2X the limit.
824 * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
826 uint64_t zil_slog_limit
= 1024 * 1024;
827 #define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
828 (((zilog)->zl_cur_used < zil_slog_limit) || \
829 ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
832 * Start a log block write and advance to the next log block.
833 * Calls are serialized.
836 zil_lwb_write_start(zilog_t
*zilog
, lwb_t
*lwb
)
840 spa_t
*spa
= zilog
->zl_spa
;
844 uint64_t zil_blksz
, wsz
;
847 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
848 zilc
= (zil_chain_t
*)lwb
->lwb_buf
;
849 bp
= &zilc
->zc_next_blk
;
851 zilc
= (zil_chain_t
*)(lwb
->lwb_buf
+ lwb
->lwb_sz
);
852 bp
= &zilc
->zc_next_blk
;
855 ASSERT(lwb
->lwb_nused
<= lwb
->lwb_sz
);
858 * Allocate the next block and save its address in this block
859 * before writing it in order to establish the log chain.
860 * Note that if the allocation of nlwb synced before we wrote
861 * the block that points at it (lwb), we'd leak it if we crashed.
862 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
863 * We dirty the dataset to ensure that zil_sync() will be called
864 * to clean up in the event of allocation failure or I/O failure.
866 tx
= dmu_tx_create(zilog
->zl_os
);
867 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
868 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
869 txg
= dmu_tx_get_txg(tx
);
874 * Log blocks are pre-allocated. Here we select the size of the next
875 * block, based on size used in the last block.
876 * - first find the smallest bucket that will fit the block from a
877 * limited set of block sizes. This is because it's faster to write
878 * blocks allocated from the same metaslab as they are adjacent or
880 * - next find the maximum from the new suggested size and an array of
881 * previous sizes. This lessens a picket fence effect of wrongly
882 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
885 * Note we only write what is used, but we can't just allocate
886 * the maximum block size because we can exhaust the available
889 zil_blksz
= zilog
->zl_cur_used
+ sizeof (zil_chain_t
);
890 for (i
= 0; zil_blksz
> zil_block_buckets
[i
]; i
++)
892 zil_blksz
= zil_block_buckets
[i
];
893 if (zil_blksz
== UINT64_MAX
)
894 zil_blksz
= SPA_MAXBLOCKSIZE
;
895 zilog
->zl_prev_blks
[zilog
->zl_prev_rotor
] = zil_blksz
;
896 for (i
= 0; i
< ZIL_PREV_BLKS
; i
++)
897 zil_blksz
= MAX(zil_blksz
, zilog
->zl_prev_blks
[i
]);
898 zilog
->zl_prev_rotor
= (zilog
->zl_prev_rotor
+ 1) & (ZIL_PREV_BLKS
- 1);
901 /* pass the old blkptr in order to spread log blocks across devs */
902 error
= zio_alloc_zil(spa
, txg
, bp
, &lwb
->lwb_blk
, zil_blksz
,
905 ASSERT3U(bp
->blk_birth
, ==, txg
);
906 bp
->blk_cksum
= lwb
->lwb_blk
.blk_cksum
;
907 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]++;
910 * Allocate a new log write buffer (lwb).
912 nlwb
= zil_alloc_lwb(zilog
, bp
, txg
);
914 /* Record the block for later vdev flushing */
915 zil_add_block(zilog
, &lwb
->lwb_blk
);
918 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
919 /* For Slim ZIL only write what is used. */
920 wsz
= P2ROUNDUP_TYPED(lwb
->lwb_nused
, ZIL_MIN_BLKSZ
, uint64_t);
921 ASSERT3U(wsz
, <=, lwb
->lwb_sz
);
922 zio_shrink(lwb
->lwb_zio
, wsz
);
929 zilc
->zc_nused
= lwb
->lwb_nused
;
930 zilc
->zc_eck
.zec_cksum
= lwb
->lwb_blk
.blk_cksum
;
933 * clear unused data for security
935 bzero(lwb
->lwb_buf
+ lwb
->lwb_nused
, wsz
- lwb
->lwb_nused
);
937 zio_nowait(lwb
->lwb_zio
); /* Kick off the write for the old log block */
940 * If there was an allocation failure then nlwb will be null which
941 * forces a txg_wait_synced().
947 zil_lwb_commit(zilog_t
*zilog
, itx_t
*itx
, lwb_t
*lwb
)
949 lr_t
*lrc
= &itx
->itx_lr
; /* common log record */
950 lr_write_t
*lrw
= (lr_write_t
*)lrc
;
952 uint64_t txg
= lrc
->lrc_txg
;
953 uint64_t reclen
= lrc
->lrc_reclen
;
959 ASSERT(lwb
->lwb_buf
!= NULL
);
961 if (lrc
->lrc_txtype
== TX_WRITE
&& itx
->itx_wr_state
== WR_NEED_COPY
)
962 dlen
= P2ROUNDUP_TYPED(
963 lrw
->lr_length
, sizeof (uint64_t), uint64_t);
965 zilog
->zl_cur_used
+= (reclen
+ dlen
);
967 zil_lwb_write_init(zilog
, lwb
);
970 * If this record won't fit in the current log block, start a new one.
972 if (lwb
->lwb_nused
+ reclen
+ dlen
> lwb
->lwb_sz
) {
973 lwb
= zil_lwb_write_start(zilog
, lwb
);
976 zil_lwb_write_init(zilog
, lwb
);
977 ASSERT(LWB_EMPTY(lwb
));
978 if (lwb
->lwb_nused
+ reclen
+ dlen
> lwb
->lwb_sz
) {
979 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
984 lr_buf
= lwb
->lwb_buf
+ lwb
->lwb_nused
;
985 bcopy(lrc
, lr_buf
, reclen
);
986 lrc
= (lr_t
*)lr_buf
;
987 lrw
= (lr_write_t
*)lrc
;
990 * If it's a write, fetch the data or get its blkptr as appropriate.
992 if (lrc
->lrc_txtype
== TX_WRITE
) {
993 if (txg
> spa_freeze_txg(zilog
->zl_spa
))
994 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
995 if (itx
->itx_wr_state
!= WR_COPIED
) {
1000 ASSERT(itx
->itx_wr_state
== WR_NEED_COPY
);
1001 dbuf
= lr_buf
+ reclen
;
1002 lrw
->lr_common
.lrc_reclen
+= dlen
;
1004 ASSERT(itx
->itx_wr_state
== WR_INDIRECT
);
1007 error
= zilog
->zl_get_data(
1008 itx
->itx_private
, lrw
, dbuf
, lwb
->lwb_zio
);
1010 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1014 ASSERT(error
== ENOENT
|| error
== EEXIST
||
1022 * We're actually making an entry, so update lrc_seq to be the
1023 * log record sequence number. Note that this is generally not
1024 * equal to the itx sequence number because not all transactions
1025 * are synchronous, and sometimes spa_sync() gets there first.
1027 lrc
->lrc_seq
= ++zilog
->zl_lr_seq
; /* we are single threaded */
1028 lwb
->lwb_nused
+= reclen
+ dlen
;
1029 lwb
->lwb_max_txg
= MAX(lwb
->lwb_max_txg
, txg
);
1030 ASSERT3U(lwb
->lwb_nused
, <=, lwb
->lwb_sz
);
1031 ASSERT3U(P2PHASE(lwb
->lwb_nused
, sizeof (uint64_t)), ==, 0);
1037 zil_itx_create(uint64_t txtype
, size_t lrsize
)
1041 lrsize
= P2ROUNDUP_TYPED(lrsize
, sizeof (uint64_t), size_t);
1043 itx
= kmem_alloc(offsetof(itx_t
, itx_lr
) + lrsize
, KM_SLEEP
);
1044 itx
->itx_lr
.lrc_txtype
= txtype
;
1045 itx
->itx_lr
.lrc_reclen
= lrsize
;
1046 itx
->itx_sod
= lrsize
; /* if write & WR_NEED_COPY will be increased */
1047 itx
->itx_lr
.lrc_seq
= 0; /* defensive */
1053 zil_itx_destroy(itx_t
*itx
)
1055 kmem_free(itx
, offsetof(itx_t
, itx_lr
) + itx
->itx_lr
.lrc_reclen
);
1059 zil_itx_assign(zilog_t
*zilog
, itx_t
*itx
, dmu_tx_t
*tx
)
1063 ASSERT(itx
->itx_lr
.lrc_seq
== 0);
1064 ASSERT(!zilog
->zl_replay
);
1066 mutex_enter(&zilog
->zl_lock
);
1067 list_insert_tail(&zilog
->zl_itx_list
, itx
);
1068 zilog
->zl_itx_list_sz
+= itx
->itx_sod
;
1069 itx
->itx_lr
.lrc_txg
= dmu_tx_get_txg(tx
);
1070 itx
->itx_lr
.lrc_seq
= seq
= ++zilog
->zl_itx_seq
;
1071 mutex_exit(&zilog
->zl_lock
);
1077 * Free up all in-memory intent log transactions that have now been synced.
1080 zil_itx_clean(zilog_t
*zilog
)
1082 uint64_t synced_txg
= spa_last_synced_txg(zilog
->zl_spa
);
1083 uint64_t freeze_txg
= spa_freeze_txg(zilog
->zl_spa
);
1087 list_create(&clean_list
, sizeof (itx_t
), offsetof(itx_t
, itx_node
));
1089 mutex_enter(&zilog
->zl_lock
);
1090 /* wait for a log writer to finish walking list */
1091 while (zilog
->zl_writer
) {
1092 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1096 * Move the sync'd log transactions to a separate list so we can call
1097 * kmem_free without holding the zl_lock.
1099 * There is no need to set zl_writer as we don't drop zl_lock here
1101 while ((itx
= list_head(&zilog
->zl_itx_list
)) != NULL
&&
1102 itx
->itx_lr
.lrc_txg
<= MIN(synced_txg
, freeze_txg
)) {
1103 list_remove(&zilog
->zl_itx_list
, itx
);
1104 zilog
->zl_itx_list_sz
-= itx
->itx_sod
;
1105 list_insert_tail(&clean_list
, itx
);
1107 cv_broadcast(&zilog
->zl_cv_writer
);
1108 mutex_exit(&zilog
->zl_lock
);
1110 /* destroy sync'd log transactions */
1111 while ((itx
= list_head(&clean_list
)) != NULL
) {
1112 list_remove(&clean_list
, itx
);
1113 zil_itx_destroy(itx
);
1115 list_destroy(&clean_list
);
1119 * If there are any in-memory intent log transactions which have now been
1120 * synced then start up a taskq to free them.
1123 zil_clean(zilog_t
*zilog
)
1127 mutex_enter(&zilog
->zl_lock
);
1128 itx
= list_head(&zilog
->zl_itx_list
);
1129 if ((itx
!= NULL
) &&
1130 (itx
->itx_lr
.lrc_txg
<= spa_last_synced_txg(zilog
->zl_spa
))) {
1131 (void) taskq_dispatch(zilog
->zl_clean_taskq
,
1132 (task_func_t
*)zil_itx_clean
, zilog
, TQ_NOSLEEP
);
1134 mutex_exit(&zilog
->zl_lock
);
1138 zil_commit_writer(zilog_t
*zilog
, uint64_t seq
, uint64_t foid
)
1141 uint64_t commit_seq
= 0;
1142 itx_t
*itx
, *itx_next
;
1147 zilog
->zl_writer
= B_TRUE
;
1148 ASSERT(zilog
->zl_root_zio
== NULL
);
1149 spa
= zilog
->zl_spa
;
1151 if (zilog
->zl_suspend
) {
1154 lwb
= list_tail(&zilog
->zl_lwb_list
);
1157 * Return if there's nothing to flush before we
1158 * dirty the fs by calling zil_create()
1160 if (list_is_empty(&zilog
->zl_itx_list
)) {
1161 zilog
->zl_writer
= B_FALSE
;
1164 mutex_exit(&zilog
->zl_lock
);
1165 lwb
= zil_create(zilog
);
1166 mutex_enter(&zilog
->zl_lock
);
1169 ASSERT(lwb
== NULL
|| lwb
->lwb_zio
== NULL
);
1171 /* Loop through in-memory log transactions filling log blocks. */
1172 DTRACE_PROBE1(zil__cw1
, zilog_t
*, zilog
);
1174 for (itx
= list_head(&zilog
->zl_itx_list
); itx
; itx
= itx_next
) {
1176 * Save the next pointer. Even though we drop zl_lock below,
1177 * all threads that can remove itx list entries (other writers
1178 * and zil_itx_clean()) can't do so until they have zl_writer.
1180 itx_next
= list_next(&zilog
->zl_itx_list
, itx
);
1183 * Determine whether to push this itx.
1184 * Push all transactions related to specified foid and
1185 * all other transactions except those that can be logged
1186 * out of order (TX_WRITE, TX_TRUNCATE, TX_SETATTR, TX_ACL)
1187 * for all other files.
1189 * If foid == 0 (meaning "push all foids") or
1190 * itx->itx_sync is set (meaning O_[D]SYNC), push regardless.
1192 if (foid
!= 0 && !itx
->itx_sync
&&
1193 TX_OOO(itx
->itx_lr
.lrc_txtype
) &&
1194 ((lr_ooo_t
*)&itx
->itx_lr
)->lr_foid
!= foid
)
1195 continue; /* skip this record */
1197 if ((itx
->itx_lr
.lrc_seq
> seq
) &&
1198 ((lwb
== NULL
) || (LWB_EMPTY(lwb
)) ||
1199 (lwb
->lwb_nused
+ itx
->itx_sod
> lwb
->lwb_sz
)))
1202 list_remove(&zilog
->zl_itx_list
, itx
);
1203 zilog
->zl_itx_list_sz
-= itx
->itx_sod
;
1205 mutex_exit(&zilog
->zl_lock
);
1207 txg
= itx
->itx_lr
.lrc_txg
;
1210 if (txg
> spa_last_synced_txg(spa
) ||
1211 txg
> spa_freeze_txg(spa
))
1212 lwb
= zil_lwb_commit(zilog
, itx
, lwb
);
1214 zil_itx_destroy(itx
);
1216 mutex_enter(&zilog
->zl_lock
);
1218 DTRACE_PROBE1(zil__cw2
, zilog_t
*, zilog
);
1219 /* determine commit sequence number */
1220 itx
= list_head(&zilog
->zl_itx_list
);
1222 commit_seq
= itx
->itx_lr
.lrc_seq
- 1;
1224 commit_seq
= zilog
->zl_itx_seq
;
1225 mutex_exit(&zilog
->zl_lock
);
1227 /* write the last block out */
1228 if (lwb
!= NULL
&& lwb
->lwb_zio
!= NULL
)
1229 lwb
= zil_lwb_write_start(zilog
, lwb
);
1231 zilog
->zl_prev_used
= zilog
->zl_cur_used
;
1232 zilog
->zl_cur_used
= 0;
1235 * Wait if necessary for the log blocks to be on stable storage.
1237 if (zilog
->zl_root_zio
) {
1238 DTRACE_PROBE1(zil__cw3
, zilog_t
*, zilog
);
1239 error
= zio_wait(zilog
->zl_root_zio
);
1240 zilog
->zl_root_zio
= NULL
;
1241 DTRACE_PROBE1(zil__cw4
, zilog_t
*, zilog
);
1242 zil_flush_vdevs(zilog
);
1245 if (error
|| lwb
== NULL
)
1246 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1248 mutex_enter(&zilog
->zl_lock
);
1249 zilog
->zl_writer
= B_FALSE
;
1251 ASSERT3U(commit_seq
, >=, zilog
->zl_commit_seq
);
1252 zilog
->zl_commit_seq
= commit_seq
;
1255 * Remember the highest committed log sequence number for ztest.
1256 * We only update this value when all the log writes succeeded,
1257 * because ztest wants to ASSERT that it got the whole log chain.
1259 if (error
== 0 && lwb
!= NULL
)
1260 zilog
->zl_commit_lr_seq
= zilog
->zl_lr_seq
;
1264 * Push zfs transactions to stable storage up to the supplied sequence number.
1265 * If foid is 0 push out all transactions, otherwise push only those
1266 * for that file or might have been used to create that file.
1269 zil_commit(zilog_t
*zilog
, uint64_t seq
, uint64_t foid
)
1271 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
|| seq
== 0)
1274 mutex_enter(&zilog
->zl_lock
);
1276 seq
= MIN(seq
, zilog
->zl_itx_seq
); /* cap seq at largest itx seq */
1278 while (zilog
->zl_writer
) {
1279 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1280 if (seq
<= zilog
->zl_commit_seq
) {
1281 mutex_exit(&zilog
->zl_lock
);
1285 zil_commit_writer(zilog
, seq
, foid
); /* drops zl_lock */
1286 /* wake up others waiting on the commit */
1287 cv_broadcast(&zilog
->zl_cv_writer
);
1288 mutex_exit(&zilog
->zl_lock
);
1292 * Report whether all transactions are committed.
1295 zil_is_committed(zilog_t
*zilog
)
1298 boolean_t committed
;
1300 mutex_enter(&zilog
->zl_lock
);
1302 while (zilog
->zl_writer
)
1303 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1305 if (!list_is_empty(&zilog
->zl_itx_list
))
1306 committed
= B_FALSE
; /* unpushed transactions */
1307 else if ((lwb
= list_head(&zilog
->zl_lwb_list
)) == NULL
)
1308 committed
= B_TRUE
; /* intent log never used */
1309 else if (list_next(&zilog
->zl_lwb_list
, lwb
) != NULL
)
1310 committed
= B_FALSE
; /* zil_sync() not done yet */
1312 committed
= B_TRUE
; /* everything synced */
1314 mutex_exit(&zilog
->zl_lock
);
1319 * Called in syncing context to free committed log blocks and update log header.
1322 zil_sync(zilog_t
*zilog
, dmu_tx_t
*tx
)
1324 zil_header_t
*zh
= zil_header_in_syncing_context(zilog
);
1325 uint64_t txg
= dmu_tx_get_txg(tx
);
1326 spa_t
*spa
= zilog
->zl_spa
;
1327 uint64_t *replayed_seq
= &zilog
->zl_replayed_seq
[txg
& TXG_MASK
];
1331 * We don't zero out zl_destroy_txg, so make sure we don't try
1332 * to destroy it twice.
1334 if (spa_sync_pass(spa
) != 1)
1337 mutex_enter(&zilog
->zl_lock
);
1339 ASSERT(zilog
->zl_stop_sync
== 0);
1341 if (*replayed_seq
!= 0) {
1342 ASSERT(zh
->zh_replay_seq
< *replayed_seq
);
1343 zh
->zh_replay_seq
= *replayed_seq
;
1347 if (zilog
->zl_destroy_txg
== txg
) {
1348 blkptr_t blk
= zh
->zh_log
;
1350 ASSERT(list_head(&zilog
->zl_lwb_list
) == NULL
);
1352 bzero(zh
, sizeof (zil_header_t
));
1353 bzero(zilog
->zl_replayed_seq
, sizeof (zilog
->zl_replayed_seq
));
1355 if (zilog
->zl_keep_first
) {
1357 * If this block was part of log chain that couldn't
1358 * be claimed because a device was missing during
1359 * zil_claim(), but that device later returns,
1360 * then this block could erroneously appear valid.
1361 * To guard against this, assign a new GUID to the new
1362 * log chain so it doesn't matter what blk points to.
1364 zil_init_log_chain(zilog
, &blk
);
1369 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
1370 zh
->zh_log
= lwb
->lwb_blk
;
1371 if (lwb
->lwb_buf
!= NULL
|| lwb
->lwb_max_txg
> txg
)
1373 list_remove(&zilog
->zl_lwb_list
, lwb
);
1374 zio_free_zil(spa
, txg
, &lwb
->lwb_blk
);
1375 kmem_cache_free(zil_lwb_cache
, lwb
);
1378 * If we don't have anything left in the lwb list then
1379 * we've had an allocation failure and we need to zero
1380 * out the zil_header blkptr so that we don't end
1381 * up freeing the same block twice.
1383 if (list_head(&zilog
->zl_lwb_list
) == NULL
)
1384 BP_ZERO(&zh
->zh_log
);
1386 mutex_exit(&zilog
->zl_lock
);
1392 zil_lwb_cache
= kmem_cache_create("zil_lwb_cache",
1393 sizeof (struct lwb
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
1399 kmem_cache_destroy(zil_lwb_cache
);
1403 zil_set_sync(zilog_t
*zilog
, uint64_t sync
)
1405 zilog
->zl_sync
= sync
;
1409 zil_set_logbias(zilog_t
*zilog
, uint64_t logbias
)
1411 zilog
->zl_logbias
= logbias
;
1415 zil_alloc(objset_t
*os
, zil_header_t
*zh_phys
)
1419 zilog
= kmem_zalloc(sizeof (zilog_t
), KM_SLEEP
);
1421 zilog
->zl_header
= zh_phys
;
1423 zilog
->zl_spa
= dmu_objset_spa(os
);
1424 zilog
->zl_dmu_pool
= dmu_objset_pool(os
);
1425 zilog
->zl_destroy_txg
= TXG_INITIAL
- 1;
1426 zilog
->zl_logbias
= dmu_objset_logbias(os
);
1427 zilog
->zl_sync
= dmu_objset_syncprop(os
);
1429 mutex_init(&zilog
->zl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1431 list_create(&zilog
->zl_itx_list
, sizeof (itx_t
),
1432 offsetof(itx_t
, itx_node
));
1434 list_create(&zilog
->zl_lwb_list
, sizeof (lwb_t
),
1435 offsetof(lwb_t
, lwb_node
));
1437 mutex_init(&zilog
->zl_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1439 avl_create(&zilog
->zl_vdev_tree
, zil_vdev_compare
,
1440 sizeof (zil_vdev_node_t
), offsetof(zil_vdev_node_t
, zv_node
));
1442 cv_init(&zilog
->zl_cv_writer
, NULL
, CV_DEFAULT
, NULL
);
1443 cv_init(&zilog
->zl_cv_suspend
, NULL
, CV_DEFAULT
, NULL
);
1449 zil_free(zilog_t
*zilog
)
1453 zilog
->zl_stop_sync
= 1;
1455 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
1456 list_remove(&zilog
->zl_lwb_list
, lwb
);
1457 if (lwb
->lwb_buf
!= NULL
)
1458 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
1459 kmem_cache_free(zil_lwb_cache
, lwb
);
1461 list_destroy(&zilog
->zl_lwb_list
);
1463 avl_destroy(&zilog
->zl_vdev_tree
);
1464 mutex_destroy(&zilog
->zl_vdev_lock
);
1466 ASSERT(list_head(&zilog
->zl_itx_list
) == NULL
);
1467 list_destroy(&zilog
->zl_itx_list
);
1468 mutex_destroy(&zilog
->zl_lock
);
1470 cv_destroy(&zilog
->zl_cv_writer
);
1471 cv_destroy(&zilog
->zl_cv_suspend
);
1473 kmem_free(zilog
, sizeof (zilog_t
));
1477 * Open an intent log.
1480 zil_open(objset_t
*os
, zil_get_data_t
*get_data
)
1482 zilog_t
*zilog
= dmu_objset_zil(os
);
1484 zilog
->zl_get_data
= get_data
;
1485 zilog
->zl_clean_taskq
= taskq_create("zil_clean", 1, minclsyspri
,
1486 2, 2, TASKQ_PREPOPULATE
);
1492 * Close an intent log.
1495 zil_close(zilog_t
*zilog
)
1498 * If the log isn't already committed, mark the objset dirty
1499 * (so zil_sync() will be called) and wait for that txg to sync.
1501 if (!zil_is_committed(zilog
)) {
1503 dmu_tx_t
*tx
= dmu_tx_create(zilog
->zl_os
);
1504 VERIFY(dmu_tx_assign(tx
, TXG_WAIT
) == 0);
1505 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
1506 txg
= dmu_tx_get_txg(tx
);
1508 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1511 taskq_destroy(zilog
->zl_clean_taskq
);
1512 zilog
->zl_clean_taskq
= NULL
;
1513 zilog
->zl_get_data
= NULL
;
1515 zil_itx_clean(zilog
);
1516 ASSERT(list_head(&zilog
->zl_itx_list
) == NULL
);
1520 * Suspend an intent log. While in suspended mode, we still honor
1521 * synchronous semantics, but we rely on txg_wait_synced() to do it.
1522 * We suspend the log briefly when taking a snapshot so that the snapshot
1523 * contains all the data it's supposed to, and has an empty intent log.
1526 zil_suspend(zilog_t
*zilog
)
1528 const zil_header_t
*zh
= zilog
->zl_header
;
1530 mutex_enter(&zilog
->zl_lock
);
1531 if (zh
->zh_flags
& ZIL_REPLAY_NEEDED
) { /* unplayed log */
1532 mutex_exit(&zilog
->zl_lock
);
1535 if (zilog
->zl_suspend
++ != 0) {
1537 * Someone else already began a suspend.
1538 * Just wait for them to finish.
1540 while (zilog
->zl_suspending
)
1541 cv_wait(&zilog
->zl_cv_suspend
, &zilog
->zl_lock
);
1542 mutex_exit(&zilog
->zl_lock
);
1545 zilog
->zl_suspending
= B_TRUE
;
1546 mutex_exit(&zilog
->zl_lock
);
1548 zil_commit(zilog
, UINT64_MAX
, 0);
1551 * Wait for any in-flight log writes to complete.
1553 mutex_enter(&zilog
->zl_lock
);
1554 while (zilog
->zl_writer
)
1555 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1556 mutex_exit(&zilog
->zl_lock
);
1558 zil_destroy(zilog
, B_FALSE
);
1560 mutex_enter(&zilog
->zl_lock
);
1561 zilog
->zl_suspending
= B_FALSE
;
1562 cv_broadcast(&zilog
->zl_cv_suspend
);
1563 mutex_exit(&zilog
->zl_lock
);
1569 zil_resume(zilog_t
*zilog
)
1571 mutex_enter(&zilog
->zl_lock
);
1572 ASSERT(zilog
->zl_suspend
!= 0);
1573 zilog
->zl_suspend
--;
1574 mutex_exit(&zilog
->zl_lock
);
1577 typedef struct zil_replay_arg
{
1578 zil_replay_func_t
**zr_replay
;
1580 boolean_t zr_byteswap
;
1585 zil_replay_error(zilog_t
*zilog
, lr_t
*lr
, int error
)
1587 char name
[MAXNAMELEN
];
1589 zilog
->zl_replaying_seq
--; /* didn't actually replay this one */
1591 dmu_objset_name(zilog
->zl_os
, name
);
1593 cmn_err(CE_WARN
, "ZFS replay transaction error %d, "
1594 "dataset %s, seq 0x%llx, txtype %llu %s\n", error
, name
,
1595 (u_longlong_t
)lr
->lrc_seq
,
1596 (u_longlong_t
)(lr
->lrc_txtype
& ~TX_CI
),
1597 (lr
->lrc_txtype
& TX_CI
) ? "CI" : "");
1603 zil_replay_log_record(zilog_t
*zilog
, lr_t
*lr
, void *zra
, uint64_t claim_txg
)
1605 zil_replay_arg_t
*zr
= zra
;
1606 const zil_header_t
*zh
= zilog
->zl_header
;
1607 uint64_t reclen
= lr
->lrc_reclen
;
1608 uint64_t txtype
= lr
->lrc_txtype
;
1611 zilog
->zl_replaying_seq
= lr
->lrc_seq
;
1613 if (lr
->lrc_seq
<= zh
->zh_replay_seq
) /* already replayed */
1616 if (lr
->lrc_txg
< claim_txg
) /* already committed */
1619 /* Strip case-insensitive bit, still present in log record */
1622 if (txtype
== 0 || txtype
>= TX_MAX_TYPE
)
1623 return (zil_replay_error(zilog
, lr
, EINVAL
));
1626 * If this record type can be logged out of order, the object
1627 * (lr_foid) may no longer exist. That's legitimate, not an error.
1629 if (TX_OOO(txtype
)) {
1630 error
= dmu_object_info(zilog
->zl_os
,
1631 ((lr_ooo_t
*)lr
)->lr_foid
, NULL
);
1632 if (error
== ENOENT
|| error
== EEXIST
)
1637 * Make a copy of the data so we can revise and extend it.
1639 bcopy(lr
, zr
->zr_lr
, reclen
);
1642 * If this is a TX_WRITE with a blkptr, suck in the data.
1644 if (txtype
== TX_WRITE
&& reclen
== sizeof (lr_write_t
)) {
1645 error
= zil_read_log_data(zilog
, (lr_write_t
*)lr
,
1646 zr
->zr_lr
+ reclen
);
1648 return (zil_replay_error(zilog
, lr
, error
));
1652 * The log block containing this lr may have been byteswapped
1653 * so that we can easily examine common fields like lrc_txtype.
1654 * However, the log is a mix of different record types, and only the
1655 * replay vectors know how to byteswap their records. Therefore, if
1656 * the lr was byteswapped, undo it before invoking the replay vector.
1658 if (zr
->zr_byteswap
)
1659 byteswap_uint64_array(zr
->zr_lr
, reclen
);
1662 * We must now do two things atomically: replay this log record,
1663 * and update the log header sequence number to reflect the fact that
1664 * we did so. At the end of each replay function the sequence number
1665 * is updated if we are in replay mode.
1667 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, zr
->zr_byteswap
);
1670 * The DMU's dnode layer doesn't see removes until the txg
1671 * commits, so a subsequent claim can spuriously fail with
1672 * EEXIST. So if we receive any error we try syncing out
1673 * any removes then retry the transaction. Note that we
1674 * specify B_FALSE for byteswap now, so we don't do it twice.
1676 txg_wait_synced(spa_get_dsl(zilog
->zl_spa
), 0);
1677 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, B_FALSE
);
1679 return (zil_replay_error(zilog
, lr
, error
));
1686 zil_incr_blks(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
1688 zilog
->zl_replay_blks
++;
1694 * If this dataset has a non-empty intent log, replay it and destroy it.
1697 zil_replay(objset_t
*os
, void *arg
, zil_replay_func_t
*replay_func
[TX_MAX_TYPE
])
1699 zilog_t
*zilog
= dmu_objset_zil(os
);
1700 const zil_header_t
*zh
= zilog
->zl_header
;
1701 zil_replay_arg_t zr
;
1703 if ((zh
->zh_flags
& ZIL_REPLAY_NEEDED
) == 0) {
1704 zil_destroy(zilog
, B_TRUE
);
1708 zr
.zr_replay
= replay_func
;
1710 zr
.zr_byteswap
= BP_SHOULD_BYTESWAP(&zh
->zh_log
);
1711 zr
.zr_lr
= kmem_alloc(2 * SPA_MAXBLOCKSIZE
, KM_SLEEP
);
1714 * Wait for in-progress removes to sync before starting replay.
1716 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1718 zilog
->zl_replay
= B_TRUE
;
1719 zilog
->zl_replay_time
= ddi_get_lbolt();
1720 ASSERT(zilog
->zl_replay_blks
== 0);
1721 (void) zil_parse(zilog
, zil_incr_blks
, zil_replay_log_record
, &zr
,
1723 kmem_free(zr
.zr_lr
, 2 * SPA_MAXBLOCKSIZE
);
1725 zil_destroy(zilog
, B_FALSE
);
1726 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
1727 zilog
->zl_replay
= B_FALSE
;
1731 zil_replaying(zilog_t
*zilog
, dmu_tx_t
*tx
)
1733 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
)
1736 if (zilog
->zl_replay
) {
1737 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
1738 zilog
->zl_replayed_seq
[dmu_tx_get_txg(tx
) & TXG_MASK
] =
1739 zilog
->zl_replaying_seq
;
1748 zil_vdev_offline(const char *osname
, void *arg
)
1754 error
= dmu_objset_hold(osname
, FTAG
, &os
);
1758 zilog
= dmu_objset_zil(os
);
1759 if (zil_suspend(zilog
) != 0)
1763 dmu_objset_rele(os
, FTAG
);