4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
32 #include <sys/resource.h>
34 #include <sys/zil_impl.h>
35 #include <sys/dsl_dataset.h>
37 #include <sys/dmu_tx.h>
40 * The zfs intent log (ZIL) saves transaction records of system calls
41 * that change the file system in memory with enough information
42 * to be able to replay them. These are stored in memory until
43 * either the DMU transaction group (txg) commits them to the stable pool
44 * and they can be discarded, or they are flushed to the stable log
45 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
46 * requirement. In the event of a panic or power fail then those log
47 * records (transactions) are replayed.
49 * There is one ZIL per file system. Its on-disk (pool) format consists
56 * A log record holds a system call transaction. Log blocks can
57 * hold many log records and the blocks are chained together.
58 * Each ZIL block contains a block pointer (blkptr_t) to the next
59 * ZIL block in the chain. The ZIL header points to the first
60 * block in the chain. Note there is not a fixed place in the pool
61 * to hold blocks. They are dynamically allocated and freed as
62 * needed from the blocks available. Figure X shows the ZIL structure:
66 * This global ZIL switch affects all pools
68 int zil_disable
= 0; /* disable intent logging */
71 * Tunable parameter for debugging or performance analysis. Setting
72 * zfs_nocacheflush will cause corruption on power loss if a volatile
73 * out-of-order write cache is enabled.
75 boolean_t zfs_nocacheflush
= B_FALSE
;
77 static kmem_cache_t
*zil_lwb_cache
;
80 zil_dva_compare(const void *x1
, const void *x2
)
82 const dva_t
*dva1
= x1
;
83 const dva_t
*dva2
= x2
;
85 if (DVA_GET_VDEV(dva1
) < DVA_GET_VDEV(dva2
))
87 if (DVA_GET_VDEV(dva1
) > DVA_GET_VDEV(dva2
))
90 if (DVA_GET_OFFSET(dva1
) < DVA_GET_OFFSET(dva2
))
92 if (DVA_GET_OFFSET(dva1
) > DVA_GET_OFFSET(dva2
))
99 zil_dva_tree_init(avl_tree_t
*t
)
101 avl_create(t
, zil_dva_compare
, sizeof (zil_dva_node_t
),
102 offsetof(zil_dva_node_t
, zn_node
));
106 zil_dva_tree_fini(avl_tree_t
*t
)
111 while ((zn
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
112 kmem_free(zn
, sizeof (zil_dva_node_t
));
118 zil_dva_tree_add(avl_tree_t
*t
, dva_t
*dva
)
123 if (avl_find(t
, dva
, &where
) != NULL
)
126 zn
= kmem_alloc(sizeof (zil_dva_node_t
), KM_SLEEP
);
128 avl_insert(t
, zn
, where
);
133 static zil_header_t
*
134 zil_header_in_syncing_context(zilog_t
*zilog
)
136 return ((zil_header_t
*)zilog
->zl_header
);
140 zil_init_log_chain(zilog_t
*zilog
, blkptr_t
*bp
)
142 zio_cksum_t
*zc
= &bp
->blk_cksum
;
144 zc
->zc_word
[ZIL_ZC_GUID_0
] = spa_get_random(-1ULL);
145 zc
->zc_word
[ZIL_ZC_GUID_1
] = spa_get_random(-1ULL);
146 zc
->zc_word
[ZIL_ZC_OBJSET
] = dmu_objset_id(zilog
->zl_os
);
147 zc
->zc_word
[ZIL_ZC_SEQ
] = 1ULL;
151 * Read a log block, make sure it's valid, and byteswap it if necessary.
154 zil_read_log_block(zilog_t
*zilog
, const blkptr_t
*bp
, arc_buf_t
**abufpp
)
158 uint32_t aflags
= ARC_WAIT
;
161 zb
.zb_objset
= bp
->blk_cksum
.zc_word
[ZIL_ZC_OBJSET
];
164 zb
.zb_blkid
= bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
169 * We shouldn't be doing any scrubbing while we're doing log
170 * replay, it's OK to not lock.
172 error
= arc_read_nolock(NULL
, zilog
->zl_spa
, &blk
,
173 arc_getbuf_func
, abufpp
, ZIO_PRIORITY_SYNC_READ
, ZIO_FLAG_CANFAIL
|
174 ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
, &aflags
, &zb
);
177 char *data
= (*abufpp
)->b_data
;
178 uint64_t blksz
= BP_GET_LSIZE(bp
);
179 zil_trailer_t
*ztp
= (zil_trailer_t
*)(data
+ blksz
) - 1;
180 zio_cksum_t cksum
= bp
->blk_cksum
;
183 * Validate the checksummed log block.
185 * Sequence numbers should be... sequential. The checksum
186 * verifier for the next block should be bp's checksum plus 1.
188 * Also check the log chain linkage and size used.
190 cksum
.zc_word
[ZIL_ZC_SEQ
]++;
192 if (bcmp(&cksum
, &ztp
->zit_next_blk
.blk_cksum
,
193 sizeof (cksum
)) || BP_IS_HOLE(&ztp
->zit_next_blk
) ||
194 (ztp
->zit_nused
> (blksz
- sizeof (zil_trailer_t
)))) {
199 VERIFY(arc_buf_remove_ref(*abufpp
, abufpp
) == 1);
204 dprintf("error %d on %llu:%llu\n", error
, zb
.zb_objset
, zb
.zb_blkid
);
210 * Parse the intent log, and call parse_func for each valid record within.
211 * Return the highest sequence number.
214 zil_parse(zilog_t
*zilog
, zil_parse_blk_func_t
*parse_blk_func
,
215 zil_parse_lr_func_t
*parse_lr_func
, void *arg
, uint64_t txg
)
217 const zil_header_t
*zh
= zilog
->zl_header
;
218 uint64_t claim_seq
= zh
->zh_claim_seq
;
220 uint64_t max_seq
= 0;
221 blkptr_t blk
= zh
->zh_log
;
227 if (BP_IS_HOLE(&blk
))
231 * Starting at the block pointed to by zh_log we read the log chain.
232 * For each block in the chain we strongly check that block to
233 * ensure its validity. We stop when an invalid block is found.
234 * For each block pointer in the chain we call parse_blk_func().
235 * For each record in each valid block we call parse_lr_func().
236 * If the log has been claimed, stop if we encounter a sequence
237 * number greater than the highest claimed sequence number.
239 zil_dva_tree_init(&zilog
->zl_dva_tree
);
241 seq
= blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
243 if (claim_seq
!= 0 && seq
> claim_seq
)
246 ASSERT(max_seq
< seq
);
249 error
= zil_read_log_block(zilog
, &blk
, &abuf
);
251 if (parse_blk_func
!= NULL
)
252 parse_blk_func(zilog
, &blk
, arg
, txg
);
257 lrbuf
= abuf
->b_data
;
258 ztp
= (zil_trailer_t
*)(lrbuf
+ BP_GET_LSIZE(&blk
)) - 1;
259 blk
= ztp
->zit_next_blk
;
261 if (parse_lr_func
== NULL
) {
262 VERIFY(arc_buf_remove_ref(abuf
, &abuf
) == 1);
266 for (lrp
= lrbuf
; lrp
< lrbuf
+ ztp
->zit_nused
; lrp
+= reclen
) {
267 lr_t
*lr
= (lr_t
*)lrp
;
268 reclen
= lr
->lrc_reclen
;
269 ASSERT3U(reclen
, >=, sizeof (lr_t
));
270 parse_lr_func(zilog
, lr
, arg
, txg
);
272 VERIFY(arc_buf_remove_ref(abuf
, &abuf
) == 1);
274 zil_dva_tree_fini(&zilog
->zl_dva_tree
);
281 zil_claim_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t first_txg
)
283 spa_t
*spa
= zilog
->zl_spa
;
287 * Claim log block if not already committed and not already claimed.
289 if (bp
->blk_birth
>= first_txg
&&
290 zil_dva_tree_add(&zilog
->zl_dva_tree
, BP_IDENTITY(bp
)) == 0) {
291 err
= zio_wait(zio_claim(NULL
, spa
, first_txg
, bp
, NULL
, NULL
,
292 ZIO_FLAG_MUSTSUCCEED
));
298 zil_claim_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t first_txg
)
300 if (lrc
->lrc_txtype
== TX_WRITE
) {
301 lr_write_t
*lr
= (lr_write_t
*)lrc
;
302 zil_claim_log_block(zilog
, &lr
->lr_blkptr
, tx
, first_txg
);
308 zil_free_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t claim_txg
)
310 zio_free_blk(zilog
->zl_spa
, bp
, dmu_tx_get_txg(tx
));
314 zil_free_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t claim_txg
)
317 * If we previously claimed it, we need to free it.
319 if (claim_txg
!= 0 && lrc
->lrc_txtype
== TX_WRITE
) {
320 lr_write_t
*lr
= (lr_write_t
*)lrc
;
321 blkptr_t
*bp
= &lr
->lr_blkptr
;
322 if (bp
->blk_birth
>= claim_txg
&&
323 !zil_dva_tree_add(&zilog
->zl_dva_tree
, BP_IDENTITY(bp
))) {
324 (void) arc_free(NULL
, zilog
->zl_spa
,
325 dmu_tx_get_txg(tx
), bp
, NULL
, NULL
, ARC_WAIT
);
331 * Create an on-disk intent log.
334 zil_create(zilog_t
*zilog
)
336 const zil_header_t
*zh
= zilog
->zl_header
;
344 * Wait for any previous destroy to complete.
346 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
348 ASSERT(zh
->zh_claim_txg
== 0);
349 ASSERT(zh
->zh_replay_seq
== 0);
354 * If we don't already have an initial log block, allocate one now.
356 if (BP_IS_HOLE(&blk
)) {
357 tx
= dmu_tx_create(zilog
->zl_os
);
358 (void) dmu_tx_assign(tx
, TXG_WAIT
);
359 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
360 txg
= dmu_tx_get_txg(tx
);
362 error
= zio_alloc_blk(zilog
->zl_spa
, ZIL_MIN_BLKSZ
, &blk
,
366 zil_init_log_chain(zilog
, &blk
);
370 * Allocate a log write buffer (lwb) for the first log block.
373 lwb
= kmem_cache_alloc(zil_lwb_cache
, KM_SLEEP
);
374 lwb
->lwb_zilog
= zilog
;
377 lwb
->lwb_sz
= BP_GET_LSIZE(&lwb
->lwb_blk
);
378 lwb
->lwb_buf
= zio_buf_alloc(lwb
->lwb_sz
);
379 lwb
->lwb_max_txg
= txg
;
382 mutex_enter(&zilog
->zl_lock
);
383 list_insert_tail(&zilog
->zl_lwb_list
, lwb
);
384 mutex_exit(&zilog
->zl_lock
);
388 * If we just allocated the first log block, commit our transaction
389 * and wait for zil_sync() to stuff the block poiner into zh_log.
390 * (zh is part of the MOS, so we cannot modify it in open context.)
394 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
397 ASSERT(bcmp(&blk
, &zh
->zh_log
, sizeof (blk
)) == 0);
401 * In one tx, free all log blocks and clear the log header.
402 * If keep_first is set, then we're replaying a log with no content.
403 * We want to keep the first block, however, so that the first
404 * synchronous transaction doesn't require a txg_wait_synced()
405 * in zil_create(). We don't need to txg_wait_synced() here either
406 * when keep_first is set, because both zil_create() and zil_destroy()
407 * will wait for any in-progress destroys to complete.
410 zil_destroy(zilog_t
*zilog
, boolean_t keep_first
)
412 const zil_header_t
*zh
= zilog
->zl_header
;
418 * Wait for any previous destroy to complete.
420 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
422 if (BP_IS_HOLE(&zh
->zh_log
))
425 tx
= dmu_tx_create(zilog
->zl_os
);
426 (void) dmu_tx_assign(tx
, TXG_WAIT
);
427 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
428 txg
= dmu_tx_get_txg(tx
);
430 mutex_enter(&zilog
->zl_lock
);
433 * It is possible for the ZIL to get the previously mounted zilog
434 * structure of the same dataset if quickly remounted and the dbuf
435 * eviction has not completed. In this case we can see a non
436 * empty lwb list and keep_first will be set. We fix this by
437 * clearing the keep_first. This will be slower but it's very rare.
439 if (!list_is_empty(&zilog
->zl_lwb_list
) && keep_first
)
440 keep_first
= B_FALSE
;
442 ASSERT3U(zilog
->zl_destroy_txg
, <, txg
);
443 zilog
->zl_destroy_txg
= txg
;
444 zilog
->zl_keep_first
= keep_first
;
446 if (!list_is_empty(&zilog
->zl_lwb_list
)) {
447 ASSERT(zh
->zh_claim_txg
== 0);
449 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
450 list_remove(&zilog
->zl_lwb_list
, lwb
);
451 if (lwb
->lwb_buf
!= NULL
)
452 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
453 zio_free_blk(zilog
->zl_spa
, &lwb
->lwb_blk
, txg
);
454 kmem_cache_free(zil_lwb_cache
, lwb
);
458 (void) zil_parse(zilog
, zil_free_log_block
,
459 zil_free_log_record
, tx
, zh
->zh_claim_txg
);
462 mutex_exit(&zilog
->zl_lock
);
468 * zil_rollback_destroy() is only called by the rollback code.
469 * We already have a syncing tx. Rollback has exclusive access to the
470 * dataset, so we don't have to worry about concurrent zil access.
471 * The actual freeing of any log blocks occurs in zil_sync() later in
472 * this txg syncing phase.
475 zil_rollback_destroy(zilog_t
*zilog
, dmu_tx_t
*tx
)
477 const zil_header_t
*zh
= zilog
->zl_header
;
480 if (BP_IS_HOLE(&zh
->zh_log
))
483 txg
= dmu_tx_get_txg(tx
);
484 ASSERT3U(zilog
->zl_destroy_txg
, <, txg
);
485 zilog
->zl_destroy_txg
= txg
;
486 zilog
->zl_keep_first
= B_FALSE
;
489 * Ensure there's no outstanding ZIL IO. No lwbs or just the
490 * unused one that allocated in advance is ok.
492 ASSERT(zilog
->zl_lwb_list
.list_head
.list_next
==
493 zilog
->zl_lwb_list
.list_head
.list_prev
);
494 (void) zil_parse(zilog
, zil_free_log_block
, zil_free_log_record
,
495 tx
, zh
->zh_claim_txg
);
499 zil_claim(char *osname
, void *txarg
)
501 dmu_tx_t
*tx
= txarg
;
502 uint64_t first_txg
= dmu_tx_get_txg(tx
);
508 error
= dmu_objset_open(osname
, DMU_OST_ANY
, DS_MODE_USER
, &os
);
510 cmn_err(CE_WARN
, "can't open objset for %s", osname
);
514 zilog
= dmu_objset_zil(os
);
515 zh
= zil_header_in_syncing_context(zilog
);
518 * Claim all log blocks if we haven't already done so, and remember
519 * the highest claimed sequence number. This ensures that if we can
520 * read only part of the log now (e.g. due to a missing device),
521 * but we can read the entire log later, we will not try to replay
522 * or destroy beyond the last block we successfully claimed.
524 ASSERT3U(zh
->zh_claim_txg
, <=, first_txg
);
525 if (zh
->zh_claim_txg
== 0 && !BP_IS_HOLE(&zh
->zh_log
)) {
526 zh
->zh_claim_txg
= first_txg
;
527 zh
->zh_claim_seq
= zil_parse(zilog
, zil_claim_log_block
,
528 zil_claim_log_record
, tx
, first_txg
);
529 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
532 ASSERT3U(first_txg
, ==, (spa_last_synced_txg(zilog
->zl_spa
) + 1));
533 dmu_objset_close(os
);
538 * Check the log by walking the log chain.
539 * Checksum errors are ok as they indicate the end of the chain.
540 * Any other error (no device or read failure) returns an error.
544 zil_check_log_chain(char *osname
, void *txarg
)
555 error
= dmu_objset_open(osname
, DMU_OST_ANY
, DS_MODE_USER
, &os
);
557 cmn_err(CE_WARN
, "can't open objset for %s", osname
);
561 zilog
= dmu_objset_zil(os
);
562 zh
= zil_header_in_syncing_context(zilog
);
564 if (BP_IS_HOLE(&blk
)) {
565 dmu_objset_close(os
);
566 return (0); /* no chain */
570 error
= zil_read_log_block(zilog
, &blk
, &abuf
);
573 lrbuf
= abuf
->b_data
;
574 ztp
= (zil_trailer_t
*)(lrbuf
+ BP_GET_LSIZE(&blk
)) - 1;
575 blk
= ztp
->zit_next_blk
;
576 VERIFY(arc_buf_remove_ref(abuf
, &abuf
) == 1);
578 dmu_objset_close(os
);
580 return (0); /* normal end of chain */
589 zil_clear_log_chain(char *osname
, void *txarg
)
597 error
= dmu_objset_open(osname
, DMU_OST_ANY
, DS_MODE_USER
, &os
);
599 cmn_err(CE_WARN
, "can't open objset for %s", osname
);
603 zilog
= dmu_objset_zil(os
);
604 tx
= dmu_tx_create(zilog
->zl_os
);
605 (void) dmu_tx_assign(tx
, TXG_WAIT
);
606 zh
= zil_header_in_syncing_context(zilog
);
607 BP_ZERO(&zh
->zh_log
);
608 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
610 dmu_objset_close(os
);
615 zil_vdev_compare(const void *x1
, const void *x2
)
617 uint64_t v1
= ((zil_vdev_node_t
*)x1
)->zv_vdev
;
618 uint64_t v2
= ((zil_vdev_node_t
*)x2
)->zv_vdev
;
629 zil_add_block(zilog_t
*zilog
, blkptr_t
*bp
)
631 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
633 zil_vdev_node_t
*zv
, zvsearch
;
634 int ndvas
= BP_GET_NDVAS(bp
);
637 if (zfs_nocacheflush
)
640 ASSERT(zilog
->zl_writer
);
643 * Even though we're zl_writer, we still need a lock because the
644 * zl_get_data() callbacks may have dmu_sync() done callbacks
645 * that will run concurrently.
647 mutex_enter(&zilog
->zl_vdev_lock
);
648 for (i
= 0; i
< ndvas
; i
++) {
649 zvsearch
.zv_vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
650 if (avl_find(t
, &zvsearch
, &where
) == NULL
) {
651 zv
= kmem_alloc(sizeof (*zv
), KM_SLEEP
);
652 zv
->zv_vdev
= zvsearch
.zv_vdev
;
653 avl_insert(t
, zv
, where
);
656 mutex_exit(&zilog
->zl_vdev_lock
);
660 zil_flush_vdevs(zilog_t
*zilog
)
662 spa_t
*spa
= zilog
->zl_spa
;
663 avl_tree_t
*t
= &zilog
->zl_vdev_tree
;
668 ASSERT(zilog
->zl_writer
);
671 * We don't need zl_vdev_lock here because we're the zl_writer,
672 * and all zl_get_data() callbacks are done.
674 if (avl_numnodes(t
) == 0)
677 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
679 zio
= zio_root(spa
, NULL
, NULL
, ZIO_FLAG_CANFAIL
);
681 while ((zv
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
682 vdev_t
*vd
= vdev_lookup_top(spa
, zv
->zv_vdev
);
685 kmem_free(zv
, sizeof (*zv
));
689 * Wait for all the flushes to complete. Not all devices actually
690 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
692 (void) zio_wait(zio
);
694 spa_config_exit(spa
, SCL_STATE
, FTAG
);
698 * Function called when a log block write completes
701 zil_lwb_write_done(zio_t
*zio
)
703 lwb_t
*lwb
= zio
->io_private
;
704 zilog_t
*zilog
= lwb
->lwb_zilog
;
706 ASSERT(BP_GET_COMPRESS(zio
->io_bp
) == ZIO_COMPRESS_OFF
);
707 ASSERT(BP_GET_CHECKSUM(zio
->io_bp
) == ZIO_CHECKSUM_ZILOG
);
708 ASSERT(BP_GET_TYPE(zio
->io_bp
) == DMU_OT_INTENT_LOG
);
709 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
710 ASSERT(BP_GET_BYTEORDER(zio
->io_bp
) == ZFS_HOST_BYTEORDER
);
711 ASSERT(!BP_IS_GANG(zio
->io_bp
));
712 ASSERT(!BP_IS_HOLE(zio
->io_bp
));
713 ASSERT(zio
->io_bp
->blk_fill
== 0);
716 * Now that we've written this log block, we have a stable pointer
717 * to the next block in the chain, so it's OK to let the txg in
718 * which we allocated the next block sync.
720 txg_rele_to_sync(&lwb
->lwb_txgh
);
722 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
723 mutex_enter(&zilog
->zl_lock
);
726 zilog
->zl_log_error
= B_TRUE
;
727 mutex_exit(&zilog
->zl_lock
);
731 * Initialize the io for a log block.
734 zil_lwb_write_init(zilog_t
*zilog
, lwb_t
*lwb
)
738 zb
.zb_objset
= lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
];
741 zb
.zb_blkid
= lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
743 if (zilog
->zl_root_zio
== NULL
) {
744 zilog
->zl_root_zio
= zio_root(zilog
->zl_spa
, NULL
, NULL
,
747 if (lwb
->lwb_zio
== NULL
) {
748 lwb
->lwb_zio
= zio_rewrite(zilog
->zl_root_zio
, zilog
->zl_spa
,
749 0, &lwb
->lwb_blk
, lwb
->lwb_buf
,
750 lwb
->lwb_sz
, zil_lwb_write_done
, lwb
,
751 ZIO_PRIORITY_LOG_WRITE
, ZIO_FLAG_CANFAIL
, &zb
);
756 * Start a log block write and advance to the next log block.
757 * Calls are serialized.
760 zil_lwb_write_start(zilog_t
*zilog
, lwb_t
*lwb
)
763 zil_trailer_t
*ztp
= (zil_trailer_t
*)(lwb
->lwb_buf
+ lwb
->lwb_sz
) - 1;
764 spa_t
*spa
= zilog
->zl_spa
;
765 blkptr_t
*bp
= &ztp
->zit_next_blk
;
770 ASSERT(lwb
->lwb_nused
<= ZIL_BLK_DATA_SZ(lwb
));
773 * Allocate the next block and save its address in this block
774 * before writing it in order to establish the log chain.
775 * Note that if the allocation of nlwb synced before we wrote
776 * the block that points at it (lwb), we'd leak it if we crashed.
777 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
779 txg
= txg_hold_open(zilog
->zl_dmu_pool
, &lwb
->lwb_txgh
);
780 txg_rele_to_quiesce(&lwb
->lwb_txgh
);
783 * Pick a ZIL blocksize. We request a size that is the
784 * maximum of the previous used size, the current used size and
785 * the amount waiting in the queue.
787 zil_blksz
= MAX(zilog
->zl_prev_used
,
788 zilog
->zl_cur_used
+ sizeof (*ztp
));
789 zil_blksz
= MAX(zil_blksz
, zilog
->zl_itx_list_sz
+ sizeof (*ztp
));
790 zil_blksz
= P2ROUNDUP_TYPED(zil_blksz
, ZIL_MIN_BLKSZ
, uint64_t);
791 if (zil_blksz
> ZIL_MAX_BLKSZ
)
792 zil_blksz
= ZIL_MAX_BLKSZ
;
795 /* pass the old blkptr in order to spread log blocks across devs */
796 error
= zio_alloc_blk(spa
, zil_blksz
, bp
, &lwb
->lwb_blk
, txg
);
798 dmu_tx_t
*tx
= dmu_tx_create_assigned(zilog
->zl_dmu_pool
, txg
);
801 * We dirty the dataset to ensure that zil_sync() will
802 * be called to remove this lwb from our zl_lwb_list.
803 * Failing to do so, may leave an lwb with a NULL lwb_buf
804 * hanging around on the zl_lwb_list.
806 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
810 * Since we've just experienced an allocation failure so we
811 * terminate the current lwb and send it on its way.
814 ztp
->zit_nused
= lwb
->lwb_nused
;
815 ztp
->zit_bt
.zbt_cksum
= lwb
->lwb_blk
.blk_cksum
;
816 zio_nowait(lwb
->lwb_zio
);
819 * By returning NULL the caller will call tx_wait_synced()
824 ASSERT3U(bp
->blk_birth
, ==, txg
);
826 ztp
->zit_nused
= lwb
->lwb_nused
;
827 ztp
->zit_bt
.zbt_cksum
= lwb
->lwb_blk
.blk_cksum
;
828 bp
->blk_cksum
= lwb
->lwb_blk
.blk_cksum
;
829 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]++;
832 * Allocate a new log write buffer (lwb).
834 nlwb
= kmem_cache_alloc(zil_lwb_cache
, KM_SLEEP
);
836 nlwb
->lwb_zilog
= zilog
;
839 nlwb
->lwb_sz
= BP_GET_LSIZE(&nlwb
->lwb_blk
);
840 nlwb
->lwb_buf
= zio_buf_alloc(nlwb
->lwb_sz
);
841 nlwb
->lwb_max_txg
= txg
;
842 nlwb
->lwb_zio
= NULL
;
845 * Put new lwb at the end of the log chain
847 mutex_enter(&zilog
->zl_lock
);
848 list_insert_tail(&zilog
->zl_lwb_list
, nlwb
);
849 mutex_exit(&zilog
->zl_lock
);
851 /* Record the block for later vdev flushing */
852 zil_add_block(zilog
, &lwb
->lwb_blk
);
855 * kick off the write for the old log block
857 dprintf_bp(&lwb
->lwb_blk
, "lwb %p txg %llu: ", lwb
, txg
);
858 ASSERT(lwb
->lwb_zio
);
859 zio_nowait(lwb
->lwb_zio
);
865 zil_lwb_commit(zilog_t
*zilog
, itx_t
*itx
, lwb_t
*lwb
)
867 lr_t
*lrc
= &itx
->itx_lr
; /* common log record */
868 lr_write_t
*lr
= (lr_write_t
*)lrc
;
869 uint64_t txg
= lrc
->lrc_txg
;
870 uint64_t reclen
= lrc
->lrc_reclen
;
875 ASSERT(lwb
->lwb_buf
!= NULL
);
877 if (lrc
->lrc_txtype
== TX_WRITE
&& itx
->itx_wr_state
== WR_NEED_COPY
)
878 dlen
= P2ROUNDUP_TYPED(
879 lr
->lr_length
, sizeof (uint64_t), uint64_t);
883 zilog
->zl_cur_used
+= (reclen
+ dlen
);
885 zil_lwb_write_init(zilog
, lwb
);
888 * If this record won't fit in the current log block, start a new one.
890 if (lwb
->lwb_nused
+ reclen
+ dlen
> ZIL_BLK_DATA_SZ(lwb
)) {
891 lwb
= zil_lwb_write_start(zilog
, lwb
);
894 zil_lwb_write_init(zilog
, lwb
);
895 ASSERT(lwb
->lwb_nused
== 0);
896 if (reclen
+ dlen
> ZIL_BLK_DATA_SZ(lwb
)) {
897 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
903 * Update the lrc_seq, to be log record sequence number. See zil.h
904 * Then copy the record to the log buffer.
906 lrc
->lrc_seq
= ++zilog
->zl_lr_seq
; /* we are single threaded */
907 bcopy(lrc
, lwb
->lwb_buf
+ lwb
->lwb_nused
, reclen
);
910 * If it's a write, fetch the data or get its blkptr as appropriate.
912 if (lrc
->lrc_txtype
== TX_WRITE
) {
913 if (txg
> spa_freeze_txg(zilog
->zl_spa
))
914 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
915 if (itx
->itx_wr_state
!= WR_COPIED
) {
919 /* alignment is guaranteed */
920 lr
= (lr_write_t
*)(lwb
->lwb_buf
+ lwb
->lwb_nused
);
922 ASSERT(itx
->itx_wr_state
== WR_NEED_COPY
);
923 dbuf
= lwb
->lwb_buf
+ lwb
->lwb_nused
+ reclen
;
924 lr
->lr_common
.lrc_reclen
+= dlen
;
926 ASSERT(itx
->itx_wr_state
== WR_INDIRECT
);
929 error
= zilog
->zl_get_data(
930 itx
->itx_private
, lr
, dbuf
, lwb
->lwb_zio
);
932 ASSERT(error
== ENOENT
|| error
== EEXIST
||
939 lwb
->lwb_nused
+= reclen
+ dlen
;
940 lwb
->lwb_max_txg
= MAX(lwb
->lwb_max_txg
, txg
);
941 ASSERT3U(lwb
->lwb_nused
, <=, ZIL_BLK_DATA_SZ(lwb
));
942 ASSERT3U(P2PHASE(lwb
->lwb_nused
, sizeof (uint64_t)), ==, 0);
948 zil_itx_create(uint64_t txtype
, size_t lrsize
)
952 lrsize
= P2ROUNDUP_TYPED(lrsize
, sizeof (uint64_t), size_t);
954 itx
= kmem_alloc(offsetof(itx_t
, itx_lr
) + lrsize
, KM_SLEEP
);
955 itx
->itx_lr
.lrc_txtype
= txtype
;
956 itx
->itx_lr
.lrc_reclen
= lrsize
;
957 itx
->itx_sod
= lrsize
; /* if write & WR_NEED_COPY will be increased */
958 itx
->itx_lr
.lrc_seq
= 0; /* defensive */
964 zil_itx_assign(zilog_t
*zilog
, itx_t
*itx
, dmu_tx_t
*tx
)
968 ASSERT(itx
->itx_lr
.lrc_seq
== 0);
970 mutex_enter(&zilog
->zl_lock
);
971 list_insert_tail(&zilog
->zl_itx_list
, itx
);
972 zilog
->zl_itx_list_sz
+= itx
->itx_sod
;
973 itx
->itx_lr
.lrc_txg
= dmu_tx_get_txg(tx
);
974 itx
->itx_lr
.lrc_seq
= seq
= ++zilog
->zl_itx_seq
;
975 mutex_exit(&zilog
->zl_lock
);
981 * Free up all in-memory intent log transactions that have now been synced.
984 zil_itx_clean(zilog_t
*zilog
)
986 uint64_t synced_txg
= spa_last_synced_txg(zilog
->zl_spa
);
987 uint64_t freeze_txg
= spa_freeze_txg(zilog
->zl_spa
);
991 list_create(&clean_list
, sizeof (itx_t
), offsetof(itx_t
, itx_node
));
993 mutex_enter(&zilog
->zl_lock
);
994 /* wait for a log writer to finish walking list */
995 while (zilog
->zl_writer
) {
996 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1000 * Move the sync'd log transactions to a separate list so we can call
1001 * kmem_free without holding the zl_lock.
1003 * There is no need to set zl_writer as we don't drop zl_lock here
1005 while ((itx
= list_head(&zilog
->zl_itx_list
)) != NULL
&&
1006 itx
->itx_lr
.lrc_txg
<= MIN(synced_txg
, freeze_txg
)) {
1007 list_remove(&zilog
->zl_itx_list
, itx
);
1008 zilog
->zl_itx_list_sz
-= itx
->itx_sod
;
1009 list_insert_tail(&clean_list
, itx
);
1011 cv_broadcast(&zilog
->zl_cv_writer
);
1012 mutex_exit(&zilog
->zl_lock
);
1014 /* destroy sync'd log transactions */
1015 while ((itx
= list_head(&clean_list
)) != NULL
) {
1016 list_remove(&clean_list
, itx
);
1017 kmem_free(itx
, offsetof(itx_t
, itx_lr
)
1018 + itx
->itx_lr
.lrc_reclen
);
1020 list_destroy(&clean_list
);
1024 * If there are any in-memory intent log transactions which have now been
1025 * synced then start up a taskq to free them.
1028 zil_clean(zilog_t
*zilog
)
1032 mutex_enter(&zilog
->zl_lock
);
1033 itx
= list_head(&zilog
->zl_itx_list
);
1034 if ((itx
!= NULL
) &&
1035 (itx
->itx_lr
.lrc_txg
<= spa_last_synced_txg(zilog
->zl_spa
))) {
1036 (void) taskq_dispatch(zilog
->zl_clean_taskq
,
1037 (void (*)(void *))zil_itx_clean
, zilog
, TQ_NOSLEEP
);
1039 mutex_exit(&zilog
->zl_lock
);
1043 zil_commit_writer(zilog_t
*zilog
, uint64_t seq
, uint64_t foid
)
1046 uint64_t commit_seq
= 0;
1047 itx_t
*itx
, *itx_next
= (itx_t
*)-1;
1051 zilog
->zl_writer
= B_TRUE
;
1052 ASSERT(zilog
->zl_root_zio
== NULL
);
1053 spa
= zilog
->zl_spa
;
1055 if (zilog
->zl_suspend
) {
1058 lwb
= list_tail(&zilog
->zl_lwb_list
);
1061 * Return if there's nothing to flush before we
1062 * dirty the fs by calling zil_create()
1064 if (list_is_empty(&zilog
->zl_itx_list
)) {
1065 zilog
->zl_writer
= B_FALSE
;
1068 mutex_exit(&zilog
->zl_lock
);
1070 mutex_enter(&zilog
->zl_lock
);
1071 lwb
= list_tail(&zilog
->zl_lwb_list
);
1075 /* Loop through in-memory log transactions filling log blocks. */
1076 DTRACE_PROBE1(zil__cw1
, zilog_t
*, zilog
);
1079 * Find the next itx to push:
1080 * Push all transactions related to specified foid and all
1081 * other transactions except TX_WRITE, TX_TRUNCATE,
1082 * TX_SETATTR and TX_ACL for all other files.
1084 if (itx_next
!= (itx_t
*)-1)
1087 itx
= list_head(&zilog
->zl_itx_list
);
1088 for (; itx
!= NULL
; itx
= list_next(&zilog
->zl_itx_list
, itx
)) {
1089 if (foid
== 0) /* push all foids? */
1091 if (itx
->itx_sync
) /* push all O_[D]SYNC */
1093 switch (itx
->itx_lr
.lrc_txtype
) {
1098 /* lr_foid is same offset for these records */
1099 if (((lr_write_t
*)&itx
->itx_lr
)->lr_foid
1101 continue; /* skip this record */
1109 if ((itx
->itx_lr
.lrc_seq
> seq
) &&
1110 ((lwb
== NULL
) || (lwb
->lwb_nused
== 0) ||
1111 (lwb
->lwb_nused
+ itx
->itx_sod
> ZIL_BLK_DATA_SZ(lwb
)))) {
1116 * Save the next pointer. Even though we soon drop
1117 * zl_lock all threads that may change the list
1118 * (another writer or zil_itx_clean) can't do so until
1119 * they have zl_writer.
1121 itx_next
= list_next(&zilog
->zl_itx_list
, itx
);
1122 list_remove(&zilog
->zl_itx_list
, itx
);
1123 zilog
->zl_itx_list_sz
-= itx
->itx_sod
;
1124 mutex_exit(&zilog
->zl_lock
);
1125 txg
= itx
->itx_lr
.lrc_txg
;
1128 if (txg
> spa_last_synced_txg(spa
) ||
1129 txg
> spa_freeze_txg(spa
))
1130 lwb
= zil_lwb_commit(zilog
, itx
, lwb
);
1131 kmem_free(itx
, offsetof(itx_t
, itx_lr
)
1132 + itx
->itx_lr
.lrc_reclen
);
1133 mutex_enter(&zilog
->zl_lock
);
1135 DTRACE_PROBE1(zil__cw2
, zilog_t
*, zilog
);
1136 /* determine commit sequence number */
1137 itx
= list_head(&zilog
->zl_itx_list
);
1139 commit_seq
= itx
->itx_lr
.lrc_seq
;
1141 commit_seq
= zilog
->zl_itx_seq
;
1142 mutex_exit(&zilog
->zl_lock
);
1144 /* write the last block out */
1145 if (lwb
!= NULL
&& lwb
->lwb_zio
!= NULL
)
1146 lwb
= zil_lwb_write_start(zilog
, lwb
);
1148 zilog
->zl_prev_used
= zilog
->zl_cur_used
;
1149 zilog
->zl_cur_used
= 0;
1152 * Wait if necessary for the log blocks to be on stable storage.
1154 if (zilog
->zl_root_zio
) {
1155 DTRACE_PROBE1(zil__cw3
, zilog_t
*, zilog
);
1156 (void) zio_wait(zilog
->zl_root_zio
);
1157 zilog
->zl_root_zio
= NULL
;
1158 DTRACE_PROBE1(zil__cw4
, zilog_t
*, zilog
);
1159 zil_flush_vdevs(zilog
);
1162 if (zilog
->zl_log_error
|| lwb
== NULL
) {
1163 zilog
->zl_log_error
= 0;
1164 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1167 mutex_enter(&zilog
->zl_lock
);
1168 zilog
->zl_writer
= B_FALSE
;
1170 ASSERT3U(commit_seq
, >=, zilog
->zl_commit_seq
);
1171 zilog
->zl_commit_seq
= commit_seq
;
1175 * Push zfs transactions to stable storage up to the supplied sequence number.
1176 * If foid is 0 push out all transactions, otherwise push only those
1177 * for that file or might have been used to create that file.
1180 zil_commit(zilog_t
*zilog
, uint64_t seq
, uint64_t foid
)
1182 if (zilog
== NULL
|| seq
== 0)
1185 mutex_enter(&zilog
->zl_lock
);
1187 seq
= MIN(seq
, zilog
->zl_itx_seq
); /* cap seq at largest itx seq */
1189 while (zilog
->zl_writer
) {
1190 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1191 if (seq
< zilog
->zl_commit_seq
) {
1192 mutex_exit(&zilog
->zl_lock
);
1196 zil_commit_writer(zilog
, seq
, foid
); /* drops zl_lock */
1197 /* wake up others waiting on the commit */
1198 cv_broadcast(&zilog
->zl_cv_writer
);
1199 mutex_exit(&zilog
->zl_lock
);
1203 * Called in syncing context to free committed log blocks and update log header.
1206 zil_sync(zilog_t
*zilog
, dmu_tx_t
*tx
)
1208 zil_header_t
*zh
= zil_header_in_syncing_context(zilog
);
1209 uint64_t txg
= dmu_tx_get_txg(tx
);
1210 spa_t
*spa
= zilog
->zl_spa
;
1213 mutex_enter(&zilog
->zl_lock
);
1215 ASSERT(zilog
->zl_stop_sync
== 0);
1217 zh
->zh_replay_seq
= zilog
->zl_replay_seq
[txg
& TXG_MASK
];
1219 if (zilog
->zl_destroy_txg
== txg
) {
1220 blkptr_t blk
= zh
->zh_log
;
1222 ASSERT(list_head(&zilog
->zl_lwb_list
) == NULL
);
1223 ASSERT(spa_sync_pass(spa
) == 1);
1225 bzero(zh
, sizeof (zil_header_t
));
1226 bzero(zilog
->zl_replay_seq
, sizeof (zilog
->zl_replay_seq
));
1228 if (zilog
->zl_keep_first
) {
1230 * If this block was part of log chain that couldn't
1231 * be claimed because a device was missing during
1232 * zil_claim(), but that device later returns,
1233 * then this block could erroneously appear valid.
1234 * To guard against this, assign a new GUID to the new
1235 * log chain so it doesn't matter what blk points to.
1237 zil_init_log_chain(zilog
, &blk
);
1243 lwb
= list_head(&zilog
->zl_lwb_list
);
1245 mutex_exit(&zilog
->zl_lock
);
1248 zh
->zh_log
= lwb
->lwb_blk
;
1249 if (lwb
->lwb_buf
!= NULL
|| lwb
->lwb_max_txg
> txg
)
1251 list_remove(&zilog
->zl_lwb_list
, lwb
);
1252 zio_free_blk(spa
, &lwb
->lwb_blk
, txg
);
1253 kmem_cache_free(zil_lwb_cache
, lwb
);
1256 * If we don't have anything left in the lwb list then
1257 * we've had an allocation failure and we need to zero
1258 * out the zil_header blkptr so that we don't end
1259 * up freeing the same block twice.
1261 if (list_head(&zilog
->zl_lwb_list
) == NULL
)
1262 BP_ZERO(&zh
->zh_log
);
1264 mutex_exit(&zilog
->zl_lock
);
1270 zil_lwb_cache
= kmem_cache_create("zil_lwb_cache",
1271 sizeof (struct lwb
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
1277 kmem_cache_destroy(zil_lwb_cache
);
1281 zil_alloc(objset_t
*os
, zil_header_t
*zh_phys
)
1285 zilog
= kmem_zalloc(sizeof (zilog_t
), KM_SLEEP
);
1287 zilog
->zl_header
= zh_phys
;
1289 zilog
->zl_spa
= dmu_objset_spa(os
);
1290 zilog
->zl_dmu_pool
= dmu_objset_pool(os
);
1291 zilog
->zl_destroy_txg
= TXG_INITIAL
- 1;
1293 mutex_init(&zilog
->zl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1295 list_create(&zilog
->zl_itx_list
, sizeof (itx_t
),
1296 offsetof(itx_t
, itx_node
));
1298 list_create(&zilog
->zl_lwb_list
, sizeof (lwb_t
),
1299 offsetof(lwb_t
, lwb_node
));
1301 mutex_init(&zilog
->zl_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1303 avl_create(&zilog
->zl_vdev_tree
, zil_vdev_compare
,
1304 sizeof (zil_vdev_node_t
), offsetof(zil_vdev_node_t
, zv_node
));
1306 cv_init(&zilog
->zl_cv_writer
, NULL
, CV_DEFAULT
, NULL
);
1307 cv_init(&zilog
->zl_cv_suspend
, NULL
, CV_DEFAULT
, NULL
);
1313 zil_free(zilog_t
*zilog
)
1317 zilog
->zl_stop_sync
= 1;
1319 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
1320 list_remove(&zilog
->zl_lwb_list
, lwb
);
1321 if (lwb
->lwb_buf
!= NULL
)
1322 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
1323 kmem_cache_free(zil_lwb_cache
, lwb
);
1325 list_destroy(&zilog
->zl_lwb_list
);
1327 avl_destroy(&zilog
->zl_vdev_tree
);
1328 mutex_destroy(&zilog
->zl_vdev_lock
);
1330 ASSERT(list_head(&zilog
->zl_itx_list
) == NULL
);
1331 list_destroy(&zilog
->zl_itx_list
);
1332 mutex_destroy(&zilog
->zl_lock
);
1334 cv_destroy(&zilog
->zl_cv_writer
);
1335 cv_destroy(&zilog
->zl_cv_suspend
);
1337 kmem_free(zilog
, sizeof (zilog_t
));
1341 * return true if the initial log block is not valid
1344 zil_empty(zilog_t
*zilog
)
1346 const zil_header_t
*zh
= zilog
->zl_header
;
1347 arc_buf_t
*abuf
= NULL
;
1349 if (BP_IS_HOLE(&zh
->zh_log
))
1352 if (zil_read_log_block(zilog
, &zh
->zh_log
, &abuf
) != 0)
1355 VERIFY(arc_buf_remove_ref(abuf
, &abuf
) == 1);
1360 * Open an intent log.
1363 zil_open(objset_t
*os
, zil_get_data_t
*get_data
)
1365 zilog_t
*zilog
= dmu_objset_zil(os
);
1367 zilog
->zl_get_data
= get_data
;
1368 zilog
->zl_clean_taskq
= taskq_create("zil_clean", 1, minclsyspri
,
1369 2, 2, TASKQ_PREPOPULATE
);
1375 * Close an intent log.
1378 zil_close(zilog_t
*zilog
)
1381 * If the log isn't already committed, mark the objset dirty
1382 * (so zil_sync() will be called) and wait for that txg to sync.
1384 if (!zil_is_committed(zilog
)) {
1386 dmu_tx_t
*tx
= dmu_tx_create(zilog
->zl_os
);
1387 (void) dmu_tx_assign(tx
, TXG_WAIT
);
1388 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
1389 txg
= dmu_tx_get_txg(tx
);
1391 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1394 taskq_destroy(zilog
->zl_clean_taskq
);
1395 zilog
->zl_clean_taskq
= NULL
;
1396 zilog
->zl_get_data
= NULL
;
1398 zil_itx_clean(zilog
);
1399 ASSERT(list_head(&zilog
->zl_itx_list
) == NULL
);
1403 * Suspend an intent log. While in suspended mode, we still honor
1404 * synchronous semantics, but we rely on txg_wait_synced() to do it.
1405 * We suspend the log briefly when taking a snapshot so that the snapshot
1406 * contains all the data it's supposed to, and has an empty intent log.
1409 zil_suspend(zilog_t
*zilog
)
1411 const zil_header_t
*zh
= zilog
->zl_header
;
1413 mutex_enter(&zilog
->zl_lock
);
1414 if (zh
->zh_claim_txg
!= 0) { /* unplayed log */
1415 mutex_exit(&zilog
->zl_lock
);
1418 if (zilog
->zl_suspend
++ != 0) {
1420 * Someone else already began a suspend.
1421 * Just wait for them to finish.
1423 while (zilog
->zl_suspending
)
1424 cv_wait(&zilog
->zl_cv_suspend
, &zilog
->zl_lock
);
1425 mutex_exit(&zilog
->zl_lock
);
1428 zilog
->zl_suspending
= B_TRUE
;
1429 mutex_exit(&zilog
->zl_lock
);
1431 zil_commit(zilog
, UINT64_MAX
, 0);
1434 * Wait for any in-flight log writes to complete.
1436 mutex_enter(&zilog
->zl_lock
);
1437 while (zilog
->zl_writer
)
1438 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1439 mutex_exit(&zilog
->zl_lock
);
1441 zil_destroy(zilog
, B_FALSE
);
1443 mutex_enter(&zilog
->zl_lock
);
1444 zilog
->zl_suspending
= B_FALSE
;
1445 cv_broadcast(&zilog
->zl_cv_suspend
);
1446 mutex_exit(&zilog
->zl_lock
);
1452 zil_resume(zilog_t
*zilog
)
1454 mutex_enter(&zilog
->zl_lock
);
1455 ASSERT(zilog
->zl_suspend
!= 0);
1456 zilog
->zl_suspend
--;
1457 mutex_exit(&zilog
->zl_lock
);
1460 typedef struct zil_replay_arg
{
1462 zil_replay_func_t
**zr_replay
;
1463 zil_replay_cleaner_t
*zr_replay_cleaner
;
1466 boolean_t zr_byteswap
;
1471 zil_replay_log_record(zilog_t
*zilog
, lr_t
*lr
, void *zra
, uint64_t claim_txg
)
1473 zil_replay_arg_t
*zr
= zra
;
1474 const zil_header_t
*zh
= zilog
->zl_header
;
1475 uint64_t reclen
= lr
->lrc_reclen
;
1476 uint64_t txtype
= lr
->lrc_txtype
;
1478 int pass
, error
, sunk
;
1480 if (zilog
->zl_stop_replay
)
1483 if (lr
->lrc_txg
< claim_txg
) /* already committed */
1486 if (lr
->lrc_seq
<= zh
->zh_replay_seq
) /* already replayed */
1489 /* Strip case-insensitive bit, still present in log record */
1493 * Make a copy of the data so we can revise and extend it.
1495 bcopy(lr
, zr
->zr_lrbuf
, reclen
);
1498 * The log block containing this lr may have been byteswapped
1499 * so that we can easily examine common fields like lrc_txtype.
1500 * However, the log is a mix of different data types, and only the
1501 * replay vectors know how to byteswap their records. Therefore, if
1502 * the lr was byteswapped, undo it before invoking the replay vector.
1504 if (zr
->zr_byteswap
)
1505 byteswap_uint64_array(zr
->zr_lrbuf
, reclen
);
1508 * If this is a TX_WRITE with a blkptr, suck in the data.
1510 if (txtype
== TX_WRITE
&& reclen
== sizeof (lr_write_t
)) {
1511 lr_write_t
*lrw
= (lr_write_t
*)lr
;
1512 blkptr_t
*wbp
= &lrw
->lr_blkptr
;
1513 uint64_t wlen
= lrw
->lr_length
;
1514 char *wbuf
= zr
->zr_lrbuf
+ reclen
;
1516 if (BP_IS_HOLE(wbp
)) { /* compressed to a hole */
1520 * A subsequent write may have overwritten this block,
1521 * in which case wbp may have been been freed and
1522 * reallocated, and our read of wbp may fail with a
1523 * checksum error. We can safely ignore this because
1524 * the later write will provide the correct data.
1528 zb
.zb_objset
= dmu_objset_id(zilog
->zl_os
);
1529 zb
.zb_object
= lrw
->lr_foid
;
1531 zb
.zb_blkid
= lrw
->lr_offset
/ BP_GET_LSIZE(wbp
);
1533 (void) zio_wait(zio_read(NULL
, zilog
->zl_spa
,
1534 wbp
, wbuf
, BP_GET_LSIZE(wbp
), NULL
, NULL
,
1535 ZIO_PRIORITY_SYNC_READ
,
1536 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
, &zb
));
1537 (void) memmove(wbuf
, wbuf
+ lrw
->lr_blkoff
, wlen
);
1542 * Replay of large truncates can end up needing additional txs
1543 * and a different txg. If they are nested within the replay tx
1544 * as below then a hang is possible. So we do the truncate here
1545 * and redo the truncate later (a no-op) and update the sequence
1546 * number whilst in the replay tx. Fortunately, it's safe to repeat
1547 * a truncate if we crash and the truncate commits. A create over
1548 * an existing file will also come in as a TX_TRUNCATE record.
1550 * Note, remove of large files and renames over large files is
1551 * handled by putting the deleted object on a stable list
1552 * and if necessary force deleting the object outside of the replay
1553 * transaction using the zr_replay_cleaner.
1555 if (txtype
== TX_TRUNCATE
) {
1556 *zr
->zr_txgp
= TXG_NOWAIT
;
1557 error
= zr
->zr_replay
[TX_TRUNCATE
](zr
->zr_arg
, zr
->zr_lrbuf
,
1561 zr
->zr_byteswap
= 0; /* only byteswap once */
1565 * We must now do two things atomically: replay this log record,
1566 * and update the log header to reflect the fact that we did so.
1567 * We use the DMU's ability to assign into a specific txg to do this.
1569 for (pass
= 1, sunk
= B_FALSE
; /* CONSTANTCONDITION */; pass
++) {
1570 uint64_t replay_txg
;
1571 dmu_tx_t
*replay_tx
;
1573 replay_tx
= dmu_tx_create(zr
->zr_os
);
1574 error
= dmu_tx_assign(replay_tx
, TXG_WAIT
);
1576 dmu_tx_abort(replay_tx
);
1580 replay_txg
= dmu_tx_get_txg(replay_tx
);
1582 if (txtype
== 0 || txtype
>= TX_MAX_TYPE
) {
1586 * On the first pass, arrange for the replay vector
1587 * to fail its dmu_tx_assign(). That's the only way
1588 * to ensure that those code paths remain well tested.
1590 * Only byteswap (if needed) on the 1st pass.
1592 *zr
->zr_txgp
= replay_txg
- (pass
== 1);
1593 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lrbuf
,
1594 zr
->zr_byteswap
&& pass
== 1);
1595 *zr
->zr_txgp
= TXG_NOWAIT
;
1599 dsl_dataset_dirty(dmu_objset_ds(zr
->zr_os
), replay_tx
);
1600 zilog
->zl_replay_seq
[replay_txg
& TXG_MASK
] =
1604 dmu_tx_commit(replay_tx
);
1610 * The DMU's dnode layer doesn't see removes until the txg
1611 * commits, so a subsequent claim can spuriously fail with
1612 * EEXIST. So if we receive any error other than ERESTART
1613 * we try syncing out any removes then retrying the
1616 if (error
!= ERESTART
&& !sunk
) {
1617 if (zr
->zr_replay_cleaner
)
1618 zr
->zr_replay_cleaner(zr
->zr_arg
);
1619 txg_wait_synced(spa_get_dsl(zilog
->zl_spa
), 0);
1621 continue; /* retry */
1624 if (error
!= ERESTART
)
1628 txg_wait_open(spa_get_dsl(zilog
->zl_spa
),
1631 dprintf("pass %d, retrying\n", pass
);
1635 ASSERT(error
&& error
!= ERESTART
);
1636 name
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
1637 dmu_objset_name(zr
->zr_os
, name
);
1638 cmn_err(CE_WARN
, "ZFS replay transaction error %d, "
1639 "dataset %s, seq 0x%llx, txtype %llu %s\n",
1640 error
, name
, (u_longlong_t
)lr
->lrc_seq
, (u_longlong_t
)txtype
,
1641 (lr
->lrc_txtype
& TX_CI
) ? "CI" : "");
1642 zilog
->zl_stop_replay
= 1;
1643 kmem_free(name
, MAXNAMELEN
);
1648 zil_incr_blks(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
1650 zilog
->zl_replay_blks
++;
1654 * If this dataset has a non-empty intent log, replay it and destroy it.
1657 zil_replay(objset_t
*os
, void *arg
, uint64_t *txgp
,
1658 zil_replay_func_t
*replay_func
[TX_MAX_TYPE
],
1659 zil_replay_cleaner_t
*replay_cleaner
)
1661 zilog_t
*zilog
= dmu_objset_zil(os
);
1662 const zil_header_t
*zh
= zilog
->zl_header
;
1663 zil_replay_arg_t zr
;
1665 if (zil_empty(zilog
)) {
1666 zil_destroy(zilog
, B_TRUE
);
1671 zr
.zr_replay
= replay_func
;
1672 zr
.zr_replay_cleaner
= replay_cleaner
;
1675 zr
.zr_byteswap
= BP_SHOULD_BYTESWAP(&zh
->zh_log
);
1676 zr
.zr_lrbuf
= kmem_alloc(2 * SPA_MAXBLOCKSIZE
, KM_SLEEP
);
1679 * Wait for in-progress removes to sync before starting replay.
1681 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1683 zilog
->zl_stop_replay
= 0;
1684 zilog
->zl_replay_time
= lbolt
;
1685 ASSERT(zilog
->zl_replay_blks
== 0);
1686 (void) zil_parse(zilog
, zil_incr_blks
, zil_replay_log_record
, &zr
,
1688 kmem_free(zr
.zr_lrbuf
, 2 * SPA_MAXBLOCKSIZE
);
1690 zil_destroy(zilog
, B_FALSE
);
1691 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
1695 * Report whether all transactions are committed
1698 zil_is_committed(zilog_t
*zilog
)
1703 mutex_enter(&zilog
->zl_lock
);
1704 while (zilog
->zl_writer
)
1705 cv_wait(&zilog
->zl_cv_writer
, &zilog
->zl_lock
);
1707 /* recent unpushed intent log transactions? */
1708 if (!list_is_empty(&zilog
->zl_itx_list
)) {
1713 /* intent log never used? */
1714 lwb
= list_head(&zilog
->zl_lwb_list
);
1721 * more than 1 log buffer means zil_sync() hasn't yet freed
1722 * entries after a txg has committed
1724 if (list_next(&zilog
->zl_lwb_list
, lwb
)) {
1729 ASSERT(zil_empty(zilog
));
1732 cv_broadcast(&zilog
->zl_cv_writer
);
1733 mutex_exit(&zilog
->zl_lock
);