]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zil.c
Rebase master to b117
[mirror_zfs.git] / module / zfs / zil.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
9babb374 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
25
34dc7c2f
BB
26#include <sys/zfs_context.h>
27#include <sys/spa.h>
9babb374 28#include <sys/spa_impl.h>
34dc7c2f
BB
29#include <sys/dmu.h>
30#include <sys/zap.h>
31#include <sys/arc.h>
32#include <sys/stat.h>
33#include <sys/resource.h>
34#include <sys/zil.h>
35#include <sys/zil_impl.h>
36#include <sys/dsl_dataset.h>
37#include <sys/vdev.h>
38#include <sys/dmu_tx.h>
39
40/*
41 * The zfs intent log (ZIL) saves transaction records of system calls
42 * that change the file system in memory with enough information
43 * to be able to replay them. These are stored in memory until
44 * either the DMU transaction group (txg) commits them to the stable pool
45 * and they can be discarded, or they are flushed to the stable log
46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
47 * requirement. In the event of a panic or power fail then those log
48 * records (transactions) are replayed.
49 *
50 * There is one ZIL per file system. Its on-disk (pool) format consists
51 * of 3 parts:
52 *
53 * - ZIL header
54 * - ZIL blocks
55 * - ZIL records
56 *
57 * A log record holds a system call transaction. Log blocks can
58 * hold many log records and the blocks are chained together.
59 * Each ZIL block contains a block pointer (blkptr_t) to the next
60 * ZIL block in the chain. The ZIL header points to the first
61 * block in the chain. Note there is not a fixed place in the pool
62 * to hold blocks. They are dynamically allocated and freed as
63 * needed from the blocks available. Figure X shows the ZIL structure:
64 */
65
66/*
67 * This global ZIL switch affects all pools
68 */
69int zil_disable = 0; /* disable intent logging */
70
71/*
72 * Tunable parameter for debugging or performance analysis. Setting
73 * zfs_nocacheflush will cause corruption on power loss if a volatile
74 * out-of-order write cache is enabled.
75 */
76boolean_t zfs_nocacheflush = B_FALSE;
77
78static kmem_cache_t *zil_lwb_cache;
79
80static int
81zil_dva_compare(const void *x1, const void *x2)
82{
83 const dva_t *dva1 = x1;
84 const dva_t *dva2 = x2;
85
86 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
87 return (-1);
88 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
89 return (1);
90
91 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
92 return (-1);
93 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
94 return (1);
95
96 return (0);
97}
98
99static void
100zil_dva_tree_init(avl_tree_t *t)
101{
102 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
103 offsetof(zil_dva_node_t, zn_node));
104}
105
106static void
107zil_dva_tree_fini(avl_tree_t *t)
108{
109 zil_dva_node_t *zn;
110 void *cookie = NULL;
111
112 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
113 kmem_free(zn, sizeof (zil_dva_node_t));
114
115 avl_destroy(t);
116}
117
118static int
119zil_dva_tree_add(avl_tree_t *t, dva_t *dva)
120{
121 zil_dva_node_t *zn;
122 avl_index_t where;
123
124 if (avl_find(t, dva, &where) != NULL)
125 return (EEXIST);
126
127 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
128 zn->zn_dva = *dva;
129 avl_insert(t, zn, where);
130
131 return (0);
132}
133
134static zil_header_t *
135zil_header_in_syncing_context(zilog_t *zilog)
136{
137 return ((zil_header_t *)zilog->zl_header);
138}
139
140static void
141zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
142{
143 zio_cksum_t *zc = &bp->blk_cksum;
144
145 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
146 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
147 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
148 zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
149}
150
151/*
152 * Read a log block, make sure it's valid, and byteswap it if necessary.
153 */
154static int
155zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
156{
157 blkptr_t blk = *bp;
158 zbookmark_t zb;
159 uint32_t aflags = ARC_WAIT;
160 int error;
161
162 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
163 zb.zb_object = 0;
164 zb.zb_level = -1;
165 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
166
167 *abufpp = NULL;
168
b128c09f
BB
169 /*
170 * We shouldn't be doing any scrubbing while we're doing log
171 * replay, it's OK to not lock.
172 */
173 error = arc_read_nolock(NULL, zilog->zl_spa, &blk,
34dc7c2f
BB
174 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
175 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
176
177 if (error == 0) {
178 char *data = (*abufpp)->b_data;
179 uint64_t blksz = BP_GET_LSIZE(bp);
180 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
181 zio_cksum_t cksum = bp->blk_cksum;
182
183 /*
b128c09f
BB
184 * Validate the checksummed log block.
185 *
34dc7c2f
BB
186 * Sequence numbers should be... sequential. The checksum
187 * verifier for the next block should be bp's checksum plus 1.
b128c09f
BB
188 *
189 * Also check the log chain linkage and size used.
34dc7c2f
BB
190 */
191 cksum.zc_word[ZIL_ZC_SEQ]++;
192
b128c09f
BB
193 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum,
194 sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) ||
195 (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) {
196 error = ECKSUM;
197 }
34dc7c2f
BB
198
199 if (error) {
200 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
201 *abufpp = NULL;
202 }
203 }
204
205 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
206
207 return (error);
208}
209
210/*
211 * Parse the intent log, and call parse_func for each valid record within.
212 * Return the highest sequence number.
213 */
214uint64_t
215zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
216 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
217{
218 const zil_header_t *zh = zilog->zl_header;
219 uint64_t claim_seq = zh->zh_claim_seq;
220 uint64_t seq = 0;
221 uint64_t max_seq = 0;
222 blkptr_t blk = zh->zh_log;
223 arc_buf_t *abuf;
224 char *lrbuf, *lrp;
225 zil_trailer_t *ztp;
226 int reclen, error;
227
228 if (BP_IS_HOLE(&blk))
229 return (max_seq);
230
231 /*
232 * Starting at the block pointed to by zh_log we read the log chain.
233 * For each block in the chain we strongly check that block to
234 * ensure its validity. We stop when an invalid block is found.
235 * For each block pointer in the chain we call parse_blk_func().
236 * For each record in each valid block we call parse_lr_func().
237 * If the log has been claimed, stop if we encounter a sequence
238 * number greater than the highest claimed sequence number.
239 */
240 zil_dva_tree_init(&zilog->zl_dva_tree);
241 for (;;) {
242 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
243
244 if (claim_seq != 0 && seq > claim_seq)
245 break;
246
247 ASSERT(max_seq < seq);
248 max_seq = seq;
249
250 error = zil_read_log_block(zilog, &blk, &abuf);
251
252 if (parse_blk_func != NULL)
253 parse_blk_func(zilog, &blk, arg, txg);
254
255 if (error)
256 break;
257
258 lrbuf = abuf->b_data;
259 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
260 blk = ztp->zit_next_blk;
261
262 if (parse_lr_func == NULL) {
263 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
264 continue;
265 }
266
267 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
268 lr_t *lr = (lr_t *)lrp;
269 reclen = lr->lrc_reclen;
270 ASSERT3U(reclen, >=, sizeof (lr_t));
271 parse_lr_func(zilog, lr, arg, txg);
272 }
273 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
274 }
275 zil_dva_tree_fini(&zilog->zl_dva_tree);
276
277 return (max_seq);
278}
279
280/* ARGSUSED */
281static void
282zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
283{
284 spa_t *spa = zilog->zl_spa;
285 int err;
286
287 /*
288 * Claim log block if not already committed and not already claimed.
289 */
290 if (bp->blk_birth >= first_txg &&
291 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
b128c09f
BB
292 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL,
293 ZIO_FLAG_MUSTSUCCEED));
34dc7c2f
BB
294 ASSERT(err == 0);
295 }
296}
297
298static void
299zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
300{
301 if (lrc->lrc_txtype == TX_WRITE) {
302 lr_write_t *lr = (lr_write_t *)lrc;
303 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
304 }
305}
306
307/* ARGSUSED */
308static void
309zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
310{
311 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
312}
313
314static void
315zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
316{
317 /*
318 * If we previously claimed it, we need to free it.
319 */
320 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
321 lr_write_t *lr = (lr_write_t *)lrc;
322 blkptr_t *bp = &lr->lr_blkptr;
323 if (bp->blk_birth >= claim_txg &&
324 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
325 (void) arc_free(NULL, zilog->zl_spa,
326 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
327 }
328 }
329}
330
331/*
332 * Create an on-disk intent log.
333 */
334static void
335zil_create(zilog_t *zilog)
336{
337 const zil_header_t *zh = zilog->zl_header;
338 lwb_t *lwb;
339 uint64_t txg = 0;
340 dmu_tx_t *tx = NULL;
341 blkptr_t blk;
342 int error = 0;
343
344 /*
345 * Wait for any previous destroy to complete.
346 */
347 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
348
349 ASSERT(zh->zh_claim_txg == 0);
350 ASSERT(zh->zh_replay_seq == 0);
351
352 blk = zh->zh_log;
353
354 /*
fb5f0bc8
BB
355 * If we don't already have an initial log block or we have one
356 * but it's the wrong endianness then allocate one.
34dc7c2f 357 */
fb5f0bc8 358 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
34dc7c2f
BB
359 tx = dmu_tx_create(zilog->zl_os);
360 (void) dmu_tx_assign(tx, TXG_WAIT);
361 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
362 txg = dmu_tx_get_txg(tx);
363
fb5f0bc8
BB
364 if (!BP_IS_HOLE(&blk)) {
365 zio_free_blk(zilog->zl_spa, &blk, txg);
366 BP_ZERO(&blk);
367 }
368
34dc7c2f
BB
369 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
370 NULL, txg);
371
372 if (error == 0)
373 zil_init_log_chain(zilog, &blk);
374 }
375
376 /*
377 * Allocate a log write buffer (lwb) for the first log block.
378 */
379 if (error == 0) {
380 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
381 lwb->lwb_zilog = zilog;
382 lwb->lwb_blk = blk;
383 lwb->lwb_nused = 0;
384 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
385 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
386 lwb->lwb_max_txg = txg;
387 lwb->lwb_zio = NULL;
388
389 mutex_enter(&zilog->zl_lock);
390 list_insert_tail(&zilog->zl_lwb_list, lwb);
391 mutex_exit(&zilog->zl_lock);
392 }
393
394 /*
395 * If we just allocated the first log block, commit our transaction
396 * and wait for zil_sync() to stuff the block poiner into zh_log.
397 * (zh is part of the MOS, so we cannot modify it in open context.)
398 */
399 if (tx != NULL) {
400 dmu_tx_commit(tx);
401 txg_wait_synced(zilog->zl_dmu_pool, txg);
402 }
403
404 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
405}
406
407/*
408 * In one tx, free all log blocks and clear the log header.
409 * If keep_first is set, then we're replaying a log with no content.
410 * We want to keep the first block, however, so that the first
411 * synchronous transaction doesn't require a txg_wait_synced()
412 * in zil_create(). We don't need to txg_wait_synced() here either
413 * when keep_first is set, because both zil_create() and zil_destroy()
414 * will wait for any in-progress destroys to complete.
415 */
416void
417zil_destroy(zilog_t *zilog, boolean_t keep_first)
418{
419 const zil_header_t *zh = zilog->zl_header;
420 lwb_t *lwb;
421 dmu_tx_t *tx;
422 uint64_t txg;
423
424 /*
425 * Wait for any previous destroy to complete.
426 */
427 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
428
429 if (BP_IS_HOLE(&zh->zh_log))
430 return;
431
432 tx = dmu_tx_create(zilog->zl_os);
433 (void) dmu_tx_assign(tx, TXG_WAIT);
434 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
435 txg = dmu_tx_get_txg(tx);
436
437 mutex_enter(&zilog->zl_lock);
438
439 /*
440 * It is possible for the ZIL to get the previously mounted zilog
441 * structure of the same dataset if quickly remounted and the dbuf
442 * eviction has not completed. In this case we can see a non
443 * empty lwb list and keep_first will be set. We fix this by
444 * clearing the keep_first. This will be slower but it's very rare.
445 */
446 if (!list_is_empty(&zilog->zl_lwb_list) && keep_first)
447 keep_first = B_FALSE;
448
449 ASSERT3U(zilog->zl_destroy_txg, <, txg);
450 zilog->zl_destroy_txg = txg;
451 zilog->zl_keep_first = keep_first;
452
453 if (!list_is_empty(&zilog->zl_lwb_list)) {
454 ASSERT(zh->zh_claim_txg == 0);
455 ASSERT(!keep_first);
456 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
457 list_remove(&zilog->zl_lwb_list, lwb);
458 if (lwb->lwb_buf != NULL)
459 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
460 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
461 kmem_cache_free(zil_lwb_cache, lwb);
462 }
463 } else {
464 if (!keep_first) {
465 (void) zil_parse(zilog, zil_free_log_block,
466 zil_free_log_record, tx, zh->zh_claim_txg);
467 }
468 }
469 mutex_exit(&zilog->zl_lock);
470
471 dmu_tx_commit(tx);
472}
473
474/*
9babb374 475 * return true if the initial log block is not valid
34dc7c2f 476 */
9babb374
BB
477static boolean_t
478zil_empty(zilog_t *zilog)
34dc7c2f
BB
479{
480 const zil_header_t *zh = zilog->zl_header;
9babb374 481 arc_buf_t *abuf = NULL;
34dc7c2f
BB
482
483 if (BP_IS_HOLE(&zh->zh_log))
9babb374 484 return (B_TRUE);
34dc7c2f 485
9babb374
BB
486 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
487 return (B_TRUE);
34dc7c2f 488
9babb374
BB
489 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
490 return (B_FALSE);
34dc7c2f
BB
491}
492
493int
494zil_claim(char *osname, void *txarg)
495{
496 dmu_tx_t *tx = txarg;
497 uint64_t first_txg = dmu_tx_get_txg(tx);
498 zilog_t *zilog;
499 zil_header_t *zh;
500 objset_t *os;
501 int error;
502
b128c09f 503 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
34dc7c2f 504 if (error) {
b128c09f 505 cmn_err(CE_WARN, "can't open objset for %s", osname);
34dc7c2f
BB
506 return (0);
507 }
508
509 zilog = dmu_objset_zil(os);
510 zh = zil_header_in_syncing_context(zilog);
511
9babb374
BB
512 if (zilog->zl_spa->spa_log_state == SPA_LOG_CLEAR) {
513 if (!BP_IS_HOLE(&zh->zh_log))
514 zio_free_blk(zilog->zl_spa, &zh->zh_log, first_txg);
515 BP_ZERO(&zh->zh_log);
516 dsl_dataset_dirty(dmu_objset_ds(os), tx);
517 }
518
519 /*
520 * Record here whether the zil has any records to replay.
521 * If the header block pointer is null or the block points
522 * to the stubby then we know there are no valid log records.
523 * We use the header to store this state as the the zilog gets
524 * freed later in dmu_objset_close().
525 * The flags (and the rest of the header fields) are cleared in
526 * zil_sync() as a result of a zil_destroy(), after replaying the log.
527 *
528 * Note, the intent log can be empty but still need the
529 * stubby to be claimed.
530 */
531 if (!zil_empty(zilog)) {
532 zh->zh_flags |= ZIL_REPLAY_NEEDED;
533 dsl_dataset_dirty(dmu_objset_ds(os), tx);
534 }
535
34dc7c2f
BB
536 /*
537 * Claim all log blocks if we haven't already done so, and remember
538 * the highest claimed sequence number. This ensures that if we can
539 * read only part of the log now (e.g. due to a missing device),
540 * but we can read the entire log later, we will not try to replay
541 * or destroy beyond the last block we successfully claimed.
542 */
543 ASSERT3U(zh->zh_claim_txg, <=, first_txg);
544 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
545 zh->zh_claim_txg = first_txg;
546 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
547 zil_claim_log_record, tx, first_txg);
548 dsl_dataset_dirty(dmu_objset_ds(os), tx);
549 }
550
551 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
552 dmu_objset_close(os);
553 return (0);
554}
555
b128c09f
BB
556/*
557 * Check the log by walking the log chain.
558 * Checksum errors are ok as they indicate the end of the chain.
559 * Any other error (no device or read failure) returns an error.
560 */
561/* ARGSUSED */
562int
563zil_check_log_chain(char *osname, void *txarg)
564{
565 zilog_t *zilog;
566 zil_header_t *zh;
567 blkptr_t blk;
568 arc_buf_t *abuf;
569 objset_t *os;
570 char *lrbuf;
571 zil_trailer_t *ztp;
572 int error;
573
574 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
575 if (error) {
576 cmn_err(CE_WARN, "can't open objset for %s", osname);
577 return (0);
578 }
579
580 zilog = dmu_objset_zil(os);
581 zh = zil_header_in_syncing_context(zilog);
582 blk = zh->zh_log;
583 if (BP_IS_HOLE(&blk)) {
584 dmu_objset_close(os);
585 return (0); /* no chain */
586 }
587
588 for (;;) {
589 error = zil_read_log_block(zilog, &blk, &abuf);
590 if (error)
591 break;
592 lrbuf = abuf->b_data;
593 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
594 blk = ztp->zit_next_blk;
595 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
596 }
597 dmu_objset_close(os);
598 if (error == ECKSUM)
599 return (0); /* normal end of chain */
600 return (error);
601}
602
34dc7c2f
BB
603static int
604zil_vdev_compare(const void *x1, const void *x2)
605{
606 uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
607 uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
608
609 if (v1 < v2)
610 return (-1);
611 if (v1 > v2)
612 return (1);
613
614 return (0);
615}
616
617void
618zil_add_block(zilog_t *zilog, blkptr_t *bp)
619{
620 avl_tree_t *t = &zilog->zl_vdev_tree;
621 avl_index_t where;
622 zil_vdev_node_t *zv, zvsearch;
623 int ndvas = BP_GET_NDVAS(bp);
624 int i;
625
626 if (zfs_nocacheflush)
627 return;
628
629 ASSERT(zilog->zl_writer);
630
631 /*
632 * Even though we're zl_writer, we still need a lock because the
633 * zl_get_data() callbacks may have dmu_sync() done callbacks
634 * that will run concurrently.
635 */
636 mutex_enter(&zilog->zl_vdev_lock);
637 for (i = 0; i < ndvas; i++) {
638 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
639 if (avl_find(t, &zvsearch, &where) == NULL) {
640 zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
641 zv->zv_vdev = zvsearch.zv_vdev;
642 avl_insert(t, zv, where);
643 }
644 }
645 mutex_exit(&zilog->zl_vdev_lock);
646}
647
648void
649zil_flush_vdevs(zilog_t *zilog)
650{
651 spa_t *spa = zilog->zl_spa;
652 avl_tree_t *t = &zilog->zl_vdev_tree;
653 void *cookie = NULL;
654 zil_vdev_node_t *zv;
655 zio_t *zio;
656
657 ASSERT(zilog->zl_writer);
658
659 /*
660 * We don't need zl_vdev_lock here because we're the zl_writer,
661 * and all zl_get_data() callbacks are done.
662 */
663 if (avl_numnodes(t) == 0)
664 return;
665
b128c09f 666 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
34dc7c2f 667
b128c09f 668 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
34dc7c2f
BB
669
670 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
671 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
672 if (vd != NULL)
673 zio_flush(zio, vd);
674 kmem_free(zv, sizeof (*zv));
675 }
676
677 /*
678 * Wait for all the flushes to complete. Not all devices actually
679 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
680 */
681 (void) zio_wait(zio);
682
b128c09f 683 spa_config_exit(spa, SCL_STATE, FTAG);
34dc7c2f
BB
684}
685
686/*
687 * Function called when a log block write completes
688 */
689static void
690zil_lwb_write_done(zio_t *zio)
691{
692 lwb_t *lwb = zio->io_private;
693 zilog_t *zilog = lwb->lwb_zilog;
694
b128c09f
BB
695 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
696 ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG);
697 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
698 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
699 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
700 ASSERT(!BP_IS_GANG(zio->io_bp));
701 ASSERT(!BP_IS_HOLE(zio->io_bp));
702 ASSERT(zio->io_bp->blk_fill == 0);
703
34dc7c2f 704 /*
9babb374
BB
705 * Ensure the lwb buffer pointer is cleared before releasing
706 * the txg. If we have had an allocation failure and
707 * the txg is waiting to sync then we want want zil_sync()
708 * to remove the lwb so that it's not picked up as the next new
709 * one in zil_commit_writer(). zil_sync() will only remove
710 * the lwb if lwb_buf is null.
34dc7c2f 711 */
34dc7c2f
BB
712 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
713 mutex_enter(&zilog->zl_lock);
714 lwb->lwb_buf = NULL;
715 if (zio->io_error)
716 zilog->zl_log_error = B_TRUE;
717 mutex_exit(&zilog->zl_lock);
9babb374
BB
718
719 /*
720 * Now that we've written this log block, we have a stable pointer
721 * to the next block in the chain, so it's OK to let the txg in
722 * which we allocated the next block sync.
723 */
724 txg_rele_to_sync(&lwb->lwb_txgh);
34dc7c2f
BB
725}
726
727/*
728 * Initialize the io for a log block.
34dc7c2f
BB
729 */
730static void
731zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
732{
733 zbookmark_t zb;
734
735 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
736 zb.zb_object = 0;
737 zb.zb_level = -1;
738 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
739
740 if (zilog->zl_root_zio == NULL) {
741 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
742 ZIO_FLAG_CANFAIL);
743 }
744 if (lwb->lwb_zio == NULL) {
745 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
9babb374
BB
746 0, &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz,
747 zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
748 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb);
34dc7c2f
BB
749 }
750}
751
752/*
753 * Start a log block write and advance to the next log block.
754 * Calls are serialized.
755 */
756static lwb_t *
757zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
758{
759 lwb_t *nlwb;
760 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
761 spa_t *spa = zilog->zl_spa;
762 blkptr_t *bp = &ztp->zit_next_blk;
763 uint64_t txg;
764 uint64_t zil_blksz;
765 int error;
766
767 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
768
769 /*
770 * Allocate the next block and save its address in this block
771 * before writing it in order to establish the log chain.
772 * Note that if the allocation of nlwb synced before we wrote
773 * the block that points at it (lwb), we'd leak it if we crashed.
774 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
775 */
776 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
777 txg_rele_to_quiesce(&lwb->lwb_txgh);
778
779 /*
780 * Pick a ZIL blocksize. We request a size that is the
781 * maximum of the previous used size, the current used size and
782 * the amount waiting in the queue.
783 */
784 zil_blksz = MAX(zilog->zl_prev_used,
785 zilog->zl_cur_used + sizeof (*ztp));
786 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
787 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
788 if (zil_blksz > ZIL_MAX_BLKSZ)
789 zil_blksz = ZIL_MAX_BLKSZ;
790
791 BP_ZERO(bp);
792 /* pass the old blkptr in order to spread log blocks across devs */
793 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg);
794 if (error) {
795 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
796
797 /*
798 * We dirty the dataset to ensure that zil_sync() will
799 * be called to remove this lwb from our zl_lwb_list.
800 * Failing to do so, may leave an lwb with a NULL lwb_buf
801 * hanging around on the zl_lwb_list.
802 */
803 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
804 dmu_tx_commit(tx);
805
806 /*
807 * Since we've just experienced an allocation failure so we
808 * terminate the current lwb and send it on its way.
809 */
810 ztp->zit_pad = 0;
811 ztp->zit_nused = lwb->lwb_nused;
812 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
813 zio_nowait(lwb->lwb_zio);
814
815 /*
816 * By returning NULL the caller will call tx_wait_synced()
817 */
818 return (NULL);
819 }
820
821 ASSERT3U(bp->blk_birth, ==, txg);
822 ztp->zit_pad = 0;
823 ztp->zit_nused = lwb->lwb_nused;
824 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
825 bp->blk_cksum = lwb->lwb_blk.blk_cksum;
826 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
827
828 /*
829 * Allocate a new log write buffer (lwb).
830 */
831 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
832
833 nlwb->lwb_zilog = zilog;
834 nlwb->lwb_blk = *bp;
835 nlwb->lwb_nused = 0;
836 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
837 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
838 nlwb->lwb_max_txg = txg;
839 nlwb->lwb_zio = NULL;
840
841 /*
842 * Put new lwb at the end of the log chain
843 */
844 mutex_enter(&zilog->zl_lock);
845 list_insert_tail(&zilog->zl_lwb_list, nlwb);
846 mutex_exit(&zilog->zl_lock);
847
848 /* Record the block for later vdev flushing */
849 zil_add_block(zilog, &lwb->lwb_blk);
850
851 /*
852 * kick off the write for the old log block
853 */
854 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
855 ASSERT(lwb->lwb_zio);
856 zio_nowait(lwb->lwb_zio);
857
858 return (nlwb);
859}
860
861static lwb_t *
862zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
863{
864 lr_t *lrc = &itx->itx_lr; /* common log record */
865 lr_write_t *lr = (lr_write_t *)lrc;
866 uint64_t txg = lrc->lrc_txg;
867 uint64_t reclen = lrc->lrc_reclen;
868 uint64_t dlen;
869
870 if (lwb == NULL)
871 return (NULL);
872 ASSERT(lwb->lwb_buf != NULL);
873
874 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
875 dlen = P2ROUNDUP_TYPED(
876 lr->lr_length, sizeof (uint64_t), uint64_t);
877 else
878 dlen = 0;
879
880 zilog->zl_cur_used += (reclen + dlen);
881
882 zil_lwb_write_init(zilog, lwb);
883
884 /*
885 * If this record won't fit in the current log block, start a new one.
886 */
887 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
888 lwb = zil_lwb_write_start(zilog, lwb);
889 if (lwb == NULL)
890 return (NULL);
891 zil_lwb_write_init(zilog, lwb);
892 ASSERT(lwb->lwb_nused == 0);
893 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
894 txg_wait_synced(zilog->zl_dmu_pool, txg);
895 return (lwb);
896 }
897 }
898
899 /*
900 * Update the lrc_seq, to be log record sequence number. See zil.h
901 * Then copy the record to the log buffer.
902 */
903 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
904 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
905
906 /*
907 * If it's a write, fetch the data or get its blkptr as appropriate.
908 */
909 if (lrc->lrc_txtype == TX_WRITE) {
910 if (txg > spa_freeze_txg(zilog->zl_spa))
911 txg_wait_synced(zilog->zl_dmu_pool, txg);
912 if (itx->itx_wr_state != WR_COPIED) {
913 char *dbuf;
914 int error;
915
916 /* alignment is guaranteed */
917 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
918 if (dlen) {
919 ASSERT(itx->itx_wr_state == WR_NEED_COPY);
920 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
921 lr->lr_common.lrc_reclen += dlen;
922 } else {
923 ASSERT(itx->itx_wr_state == WR_INDIRECT);
924 dbuf = NULL;
925 }
926 error = zilog->zl_get_data(
927 itx->itx_private, lr, dbuf, lwb->lwb_zio);
928 if (error) {
929 ASSERT(error == ENOENT || error == EEXIST ||
930 error == EALREADY);
931 return (lwb);
932 }
933 }
934 }
935
936 lwb->lwb_nused += reclen + dlen;
937 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
938 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
939 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
940
941 return (lwb);
942}
943
944itx_t *
945zil_itx_create(uint64_t txtype, size_t lrsize)
946{
947 itx_t *itx;
948
949 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
950
951 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
952 itx->itx_lr.lrc_txtype = txtype;
953 itx->itx_lr.lrc_reclen = lrsize;
954 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
955 itx->itx_lr.lrc_seq = 0; /* defensive */
956
957 return (itx);
958}
959
960uint64_t
961zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
962{
963 uint64_t seq;
964
965 ASSERT(itx->itx_lr.lrc_seq == 0);
966
967 mutex_enter(&zilog->zl_lock);
968 list_insert_tail(&zilog->zl_itx_list, itx);
969 zilog->zl_itx_list_sz += itx->itx_sod;
970 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
971 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
972 mutex_exit(&zilog->zl_lock);
973
974 return (seq);
975}
976
977/*
978 * Free up all in-memory intent log transactions that have now been synced.
979 */
980static void
981zil_itx_clean(zilog_t *zilog)
982{
983 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
984 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
985 list_t clean_list;
986 itx_t *itx;
987
988 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
989
990 mutex_enter(&zilog->zl_lock);
991 /* wait for a log writer to finish walking list */
992 while (zilog->zl_writer) {
993 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
994 }
995
996 /*
997 * Move the sync'd log transactions to a separate list so we can call
998 * kmem_free without holding the zl_lock.
999 *
1000 * There is no need to set zl_writer as we don't drop zl_lock here
1001 */
1002 while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
1003 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
1004 list_remove(&zilog->zl_itx_list, itx);
1005 zilog->zl_itx_list_sz -= itx->itx_sod;
1006 list_insert_tail(&clean_list, itx);
1007 }
1008 cv_broadcast(&zilog->zl_cv_writer);
1009 mutex_exit(&zilog->zl_lock);
1010
1011 /* destroy sync'd log transactions */
1012 while ((itx = list_head(&clean_list)) != NULL) {
1013 list_remove(&clean_list, itx);
1014 kmem_free(itx, offsetof(itx_t, itx_lr)
1015 + itx->itx_lr.lrc_reclen);
1016 }
1017 list_destroy(&clean_list);
1018}
1019
1020/*
1021 * If there are any in-memory intent log transactions which have now been
1022 * synced then start up a taskq to free them.
1023 */
1024void
1025zil_clean(zilog_t *zilog)
1026{
1027 itx_t *itx;
1028
1029 mutex_enter(&zilog->zl_lock);
1030 itx = list_head(&zilog->zl_itx_list);
1031 if ((itx != NULL) &&
1032 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
1033 (void) taskq_dispatch(zilog->zl_clean_taskq,
9babb374 1034 (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP);
34dc7c2f
BB
1035 }
1036 mutex_exit(&zilog->zl_lock);
1037}
1038
b128c09f 1039static void
34dc7c2f
BB
1040zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
1041{
1042 uint64_t txg;
1043 uint64_t commit_seq = 0;
1044 itx_t *itx, *itx_next = (itx_t *)-1;
1045 lwb_t *lwb;
1046 spa_t *spa;
1047
1048 zilog->zl_writer = B_TRUE;
b128c09f 1049 ASSERT(zilog->zl_root_zio == NULL);
34dc7c2f
BB
1050 spa = zilog->zl_spa;
1051
1052 if (zilog->zl_suspend) {
1053 lwb = NULL;
1054 } else {
1055 lwb = list_tail(&zilog->zl_lwb_list);
1056 if (lwb == NULL) {
1057 /*
1058 * Return if there's nothing to flush before we
1059 * dirty the fs by calling zil_create()
1060 */
1061 if (list_is_empty(&zilog->zl_itx_list)) {
1062 zilog->zl_writer = B_FALSE;
1063 return;
1064 }
1065 mutex_exit(&zilog->zl_lock);
1066 zil_create(zilog);
1067 mutex_enter(&zilog->zl_lock);
1068 lwb = list_tail(&zilog->zl_lwb_list);
1069 }
1070 }
1071
1072 /* Loop through in-memory log transactions filling log blocks. */
1073 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1074 for (;;) {
1075 /*
1076 * Find the next itx to push:
1077 * Push all transactions related to specified foid and all
1078 * other transactions except TX_WRITE, TX_TRUNCATE,
1079 * TX_SETATTR and TX_ACL for all other files.
1080 */
1081 if (itx_next != (itx_t *)-1)
1082 itx = itx_next;
1083 else
1084 itx = list_head(&zilog->zl_itx_list);
1085 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
1086 if (foid == 0) /* push all foids? */
1087 break;
1088 if (itx->itx_sync) /* push all O_[D]SYNC */
1089 break;
1090 switch (itx->itx_lr.lrc_txtype) {
1091 case TX_SETATTR:
1092 case TX_WRITE:
1093 case TX_TRUNCATE:
1094 case TX_ACL:
1095 /* lr_foid is same offset for these records */
1096 if (((lr_write_t *)&itx->itx_lr)->lr_foid
1097 != foid) {
1098 continue; /* skip this record */
1099 }
1100 }
1101 break;
1102 }
1103 if (itx == NULL)
1104 break;
1105
1106 if ((itx->itx_lr.lrc_seq > seq) &&
1107 ((lwb == NULL) || (lwb->lwb_nused == 0) ||
1108 (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) {
1109 break;
1110 }
1111
1112 /*
1113 * Save the next pointer. Even though we soon drop
1114 * zl_lock all threads that may change the list
1115 * (another writer or zil_itx_clean) can't do so until
1116 * they have zl_writer.
1117 */
1118 itx_next = list_next(&zilog->zl_itx_list, itx);
1119 list_remove(&zilog->zl_itx_list, itx);
1120 zilog->zl_itx_list_sz -= itx->itx_sod;
1121 mutex_exit(&zilog->zl_lock);
1122 txg = itx->itx_lr.lrc_txg;
1123 ASSERT(txg);
1124
1125 if (txg > spa_last_synced_txg(spa) ||
1126 txg > spa_freeze_txg(spa))
1127 lwb = zil_lwb_commit(zilog, itx, lwb);
1128 kmem_free(itx, offsetof(itx_t, itx_lr)
1129 + itx->itx_lr.lrc_reclen);
1130 mutex_enter(&zilog->zl_lock);
1131 }
1132 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1133 /* determine commit sequence number */
1134 itx = list_head(&zilog->zl_itx_list);
1135 if (itx)
1136 commit_seq = itx->itx_lr.lrc_seq;
1137 else
1138 commit_seq = zilog->zl_itx_seq;
1139 mutex_exit(&zilog->zl_lock);
1140
1141 /* write the last block out */
1142 if (lwb != NULL && lwb->lwb_zio != NULL)
1143 lwb = zil_lwb_write_start(zilog, lwb);
1144
1145 zilog->zl_prev_used = zilog->zl_cur_used;
1146 zilog->zl_cur_used = 0;
1147
1148 /*
1149 * Wait if necessary for the log blocks to be on stable storage.
1150 */
1151 if (zilog->zl_root_zio) {
1152 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
1153 (void) zio_wait(zilog->zl_root_zio);
b128c09f 1154 zilog->zl_root_zio = NULL;
34dc7c2f
BB
1155 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
1156 zil_flush_vdevs(zilog);
1157 }
1158
1159 if (zilog->zl_log_error || lwb == NULL) {
1160 zilog->zl_log_error = 0;
1161 txg_wait_synced(zilog->zl_dmu_pool, 0);
1162 }
1163
1164 mutex_enter(&zilog->zl_lock);
1165 zilog->zl_writer = B_FALSE;
1166
1167 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq);
1168 zilog->zl_commit_seq = commit_seq;
1169}
1170
1171/*
1172 * Push zfs transactions to stable storage up to the supplied sequence number.
1173 * If foid is 0 push out all transactions, otherwise push only those
1174 * for that file or might have been used to create that file.
1175 */
1176void
1177zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
1178{
1179 if (zilog == NULL || seq == 0)
1180 return;
1181
1182 mutex_enter(&zilog->zl_lock);
1183
1184 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */
1185
1186 while (zilog->zl_writer) {
1187 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1188 if (seq < zilog->zl_commit_seq) {
1189 mutex_exit(&zilog->zl_lock);
1190 return;
1191 }
1192 }
1193 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1194 /* wake up others waiting on the commit */
1195 cv_broadcast(&zilog->zl_cv_writer);
1196 mutex_exit(&zilog->zl_lock);
1197}
1198
1199/*
1200 * Called in syncing context to free committed log blocks and update log header.
1201 */
1202void
1203zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1204{
1205 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1206 uint64_t txg = dmu_tx_get_txg(tx);
1207 spa_t *spa = zilog->zl_spa;
1208 lwb_t *lwb;
1209
9babb374
BB
1210 /*
1211 * We don't zero out zl_destroy_txg, so make sure we don't try
1212 * to destroy it twice.
1213 */
1214 if (spa_sync_pass(spa) != 1)
1215 return;
1216
34dc7c2f
BB
1217 mutex_enter(&zilog->zl_lock);
1218
1219 ASSERT(zilog->zl_stop_sync == 0);
1220
fb5f0bc8 1221 zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK];
34dc7c2f
BB
1222
1223 if (zilog->zl_destroy_txg == txg) {
1224 blkptr_t blk = zh->zh_log;
1225
1226 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
34dc7c2f
BB
1227
1228 bzero(zh, sizeof (zil_header_t));
fb5f0bc8 1229 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
34dc7c2f
BB
1230
1231 if (zilog->zl_keep_first) {
1232 /*
1233 * If this block was part of log chain that couldn't
1234 * be claimed because a device was missing during
1235 * zil_claim(), but that device later returns,
1236 * then this block could erroneously appear valid.
1237 * To guard against this, assign a new GUID to the new
1238 * log chain so it doesn't matter what blk points to.
1239 */
1240 zil_init_log_chain(zilog, &blk);
1241 zh->zh_log = blk;
1242 }
1243 }
1244
9babb374 1245 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
34dc7c2f
BB
1246 zh->zh_log = lwb->lwb_blk;
1247 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1248 break;
1249 list_remove(&zilog->zl_lwb_list, lwb);
1250 zio_free_blk(spa, &lwb->lwb_blk, txg);
1251 kmem_cache_free(zil_lwb_cache, lwb);
1252
1253 /*
1254 * If we don't have anything left in the lwb list then
1255 * we've had an allocation failure and we need to zero
1256 * out the zil_header blkptr so that we don't end
1257 * up freeing the same block twice.
1258 */
1259 if (list_head(&zilog->zl_lwb_list) == NULL)
1260 BP_ZERO(&zh->zh_log);
1261 }
1262 mutex_exit(&zilog->zl_lock);
1263}
1264
1265void
1266zil_init(void)
1267{
1268 zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1269 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1270}
1271
1272void
1273zil_fini(void)
1274{
1275 kmem_cache_destroy(zil_lwb_cache);
1276}
1277
1278zilog_t *
1279zil_alloc(objset_t *os, zil_header_t *zh_phys)
1280{
1281 zilog_t *zilog;
1282
1283 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1284
1285 zilog->zl_header = zh_phys;
1286 zilog->zl_os = os;
1287 zilog->zl_spa = dmu_objset_spa(os);
1288 zilog->zl_dmu_pool = dmu_objset_pool(os);
1289 zilog->zl_destroy_txg = TXG_INITIAL - 1;
1290
1291 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1292
1293 list_create(&zilog->zl_itx_list, sizeof (itx_t),
1294 offsetof(itx_t, itx_node));
1295
1296 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1297 offsetof(lwb_t, lwb_node));
1298
1299 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1300
1301 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1302 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1303
1304 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1305 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1306
1307 return (zilog);
1308}
1309
1310void
1311zil_free(zilog_t *zilog)
1312{
1313 lwb_t *lwb;
1314
1315 zilog->zl_stop_sync = 1;
1316
1317 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1318 list_remove(&zilog->zl_lwb_list, lwb);
1319 if (lwb->lwb_buf != NULL)
1320 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1321 kmem_cache_free(zil_lwb_cache, lwb);
1322 }
1323 list_destroy(&zilog->zl_lwb_list);
1324
1325 avl_destroy(&zilog->zl_vdev_tree);
1326 mutex_destroy(&zilog->zl_vdev_lock);
1327
1328 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1329 list_destroy(&zilog->zl_itx_list);
1330 mutex_destroy(&zilog->zl_lock);
1331
1332 cv_destroy(&zilog->zl_cv_writer);
1333 cv_destroy(&zilog->zl_cv_suspend);
1334
1335 kmem_free(zilog, sizeof (zilog_t));
1336}
1337
34dc7c2f
BB
1338/*
1339 * Open an intent log.
1340 */
1341zilog_t *
1342zil_open(objset_t *os, zil_get_data_t *get_data)
1343{
1344 zilog_t *zilog = dmu_objset_zil(os);
1345
1346 zilog->zl_get_data = get_data;
1347 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1348 2, 2, TASKQ_PREPOPULATE);
1349
1350 return (zilog);
1351}
1352
1353/*
1354 * Close an intent log.
1355 */
1356void
1357zil_close(zilog_t *zilog)
1358{
1359 /*
1360 * If the log isn't already committed, mark the objset dirty
1361 * (so zil_sync() will be called) and wait for that txg to sync.
1362 */
1363 if (!zil_is_committed(zilog)) {
1364 uint64_t txg;
1365 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1366 (void) dmu_tx_assign(tx, TXG_WAIT);
1367 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1368 txg = dmu_tx_get_txg(tx);
1369 dmu_tx_commit(tx);
1370 txg_wait_synced(zilog->zl_dmu_pool, txg);
1371 }
1372
1373 taskq_destroy(zilog->zl_clean_taskq);
1374 zilog->zl_clean_taskq = NULL;
1375 zilog->zl_get_data = NULL;
1376
1377 zil_itx_clean(zilog);
1378 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1379}
1380
1381/*
1382 * Suspend an intent log. While in suspended mode, we still honor
1383 * synchronous semantics, but we rely on txg_wait_synced() to do it.
1384 * We suspend the log briefly when taking a snapshot so that the snapshot
1385 * contains all the data it's supposed to, and has an empty intent log.
1386 */
1387int
1388zil_suspend(zilog_t *zilog)
1389{
1390 const zil_header_t *zh = zilog->zl_header;
1391
1392 mutex_enter(&zilog->zl_lock);
9babb374 1393 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
34dc7c2f
BB
1394 mutex_exit(&zilog->zl_lock);
1395 return (EBUSY);
1396 }
1397 if (zilog->zl_suspend++ != 0) {
1398 /*
1399 * Someone else already began a suspend.
1400 * Just wait for them to finish.
1401 */
1402 while (zilog->zl_suspending)
1403 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
34dc7c2f
BB
1404 mutex_exit(&zilog->zl_lock);
1405 return (0);
1406 }
1407 zilog->zl_suspending = B_TRUE;
1408 mutex_exit(&zilog->zl_lock);
1409
1410 zil_commit(zilog, UINT64_MAX, 0);
1411
1412 /*
1413 * Wait for any in-flight log writes to complete.
1414 */
1415 mutex_enter(&zilog->zl_lock);
1416 while (zilog->zl_writer)
1417 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1418 mutex_exit(&zilog->zl_lock);
1419
1420 zil_destroy(zilog, B_FALSE);
1421
1422 mutex_enter(&zilog->zl_lock);
1423 zilog->zl_suspending = B_FALSE;
1424 cv_broadcast(&zilog->zl_cv_suspend);
1425 mutex_exit(&zilog->zl_lock);
1426
1427 return (0);
1428}
1429
1430void
1431zil_resume(zilog_t *zilog)
1432{
1433 mutex_enter(&zilog->zl_lock);
1434 ASSERT(zilog->zl_suspend != 0);
1435 zilog->zl_suspend--;
1436 mutex_exit(&zilog->zl_lock);
1437}
1438
1439typedef struct zil_replay_arg {
1440 objset_t *zr_os;
1441 zil_replay_func_t **zr_replay;
1442 void *zr_arg;
34dc7c2f
BB
1443 boolean_t zr_byteswap;
1444 char *zr_lrbuf;
1445} zil_replay_arg_t;
1446
1447static void
1448zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1449{
1450 zil_replay_arg_t *zr = zra;
1451 const zil_header_t *zh = zilog->zl_header;
1452 uint64_t reclen = lr->lrc_reclen;
1453 uint64_t txtype = lr->lrc_txtype;
1454 char *name;
fb5f0bc8 1455 int pass, error;
34dc7c2f 1456
fb5f0bc8 1457 if (!zilog->zl_replay) /* giving up */
34dc7c2f
BB
1458 return;
1459
1460 if (lr->lrc_txg < claim_txg) /* already committed */
1461 return;
1462
1463 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
1464 return;
1465
1466 /* Strip case-insensitive bit, still present in log record */
1467 txtype &= ~TX_CI;
1468
fb5f0bc8
BB
1469 if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1470 error = EINVAL;
1471 goto bad;
1472 }
1473
34dc7c2f
BB
1474 /*
1475 * Make a copy of the data so we can revise and extend it.
1476 */
1477 bcopy(lr, zr->zr_lrbuf, reclen);
1478
1479 /*
1480 * The log block containing this lr may have been byteswapped
1481 * so that we can easily examine common fields like lrc_txtype.
1482 * However, the log is a mix of different data types, and only the
1483 * replay vectors know how to byteswap their records. Therefore, if
1484 * the lr was byteswapped, undo it before invoking the replay vector.
1485 */
1486 if (zr->zr_byteswap)
1487 byteswap_uint64_array(zr->zr_lrbuf, reclen);
1488
1489 /*
1490 * If this is a TX_WRITE with a blkptr, suck in the data.
1491 */
1492 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1493 lr_write_t *lrw = (lr_write_t *)lr;
1494 blkptr_t *wbp = &lrw->lr_blkptr;
1495 uint64_t wlen = lrw->lr_length;
1496 char *wbuf = zr->zr_lrbuf + reclen;
1497
1498 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */
1499 bzero(wbuf, wlen);
1500 } else {
1501 /*
1502 * A subsequent write may have overwritten this block,
1503 * in which case wbp may have been been freed and
1504 * reallocated, and our read of wbp may fail with a
1505 * checksum error. We can safely ignore this because
1506 * the later write will provide the correct data.
1507 */
1508 zbookmark_t zb;
1509
1510 zb.zb_objset = dmu_objset_id(zilog->zl_os);
1511 zb.zb_object = lrw->lr_foid;
1512 zb.zb_level = -1;
1513 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp);
1514
1515 (void) zio_wait(zio_read(NULL, zilog->zl_spa,
1516 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL,
1517 ZIO_PRIORITY_SYNC_READ,
1518 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1519 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen);
1520 }
1521 }
1522
1523 /*
1524 * We must now do two things atomically: replay this log record,
fb5f0bc8
BB
1525 * and update the log header sequence number to reflect the fact that
1526 * we did so. At the end of each replay function the sequence number
1527 * is updated if we are in replay mode.
34dc7c2f 1528 */
fb5f0bc8
BB
1529 for (pass = 1; pass <= 2; pass++) {
1530 zilog->zl_replaying_seq = lr->lrc_seq;
1531 /* Only byteswap (if needed) on the 1st pass. */
1532 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1533 zr->zr_byteswap && pass == 1);
34dc7c2f
BB
1534
1535 if (!error)
1536 return;
1537
1538 /*
1539 * The DMU's dnode layer doesn't see removes until the txg
1540 * commits, so a subsequent claim can spuriously fail with
fb5f0bc8
BB
1541 * EEXIST. So if we receive any error we try syncing out
1542 * any removes then retry the transaction.
34dc7c2f 1543 */
fb5f0bc8 1544 if (pass == 1)
34dc7c2f 1545 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
34dc7c2f
BB
1546 }
1547
b128c09f 1548bad:
fb5f0bc8 1549 ASSERT(error);
34dc7c2f
BB
1550 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1551 dmu_objset_name(zr->zr_os, name);
1552 cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1553 "dataset %s, seq 0x%llx, txtype %llu %s\n",
1554 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype,
1555 (lr->lrc_txtype & TX_CI) ? "CI" : "");
fb5f0bc8 1556 zilog->zl_replay = B_FALSE;
34dc7c2f
BB
1557 kmem_free(name, MAXNAMELEN);
1558}
1559
1560/* ARGSUSED */
1561static void
1562zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1563{
1564 zilog->zl_replay_blks++;
1565}
1566
1567/*
1568 * If this dataset has a non-empty intent log, replay it and destroy it.
1569 */
1570void
fb5f0bc8 1571zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
34dc7c2f
BB
1572{
1573 zilog_t *zilog = dmu_objset_zil(os);
1574 const zil_header_t *zh = zilog->zl_header;
1575 zil_replay_arg_t zr;
1576
9babb374 1577 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
34dc7c2f
BB
1578 zil_destroy(zilog, B_TRUE);
1579 return;
1580 }
1581
1582 zr.zr_os = os;
1583 zr.zr_replay = replay_func;
1584 zr.zr_arg = arg;
34dc7c2f
BB
1585 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1586 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1587
1588 /*
1589 * Wait for in-progress removes to sync before starting replay.
1590 */
1591 txg_wait_synced(zilog->zl_dmu_pool, 0);
1592
fb5f0bc8 1593 zilog->zl_replay = B_TRUE;
34dc7c2f
BB
1594 zilog->zl_replay_time = lbolt;
1595 ASSERT(zilog->zl_replay_blks == 0);
1596 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1597 zh->zh_claim_txg);
1598 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1599
1600 zil_destroy(zilog, B_FALSE);
1601 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
fb5f0bc8 1602 zilog->zl_replay = B_FALSE;
34dc7c2f
BB
1603}
1604
1605/*
1606 * Report whether all transactions are committed
1607 */
1608int
1609zil_is_committed(zilog_t *zilog)
1610{
1611 lwb_t *lwb;
1612 int ret;
1613
1614 mutex_enter(&zilog->zl_lock);
1615 while (zilog->zl_writer)
1616 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1617
1618 /* recent unpushed intent log transactions? */
1619 if (!list_is_empty(&zilog->zl_itx_list)) {
1620 ret = B_FALSE;
1621 goto out;
1622 }
1623
1624 /* intent log never used? */
1625 lwb = list_head(&zilog->zl_lwb_list);
1626 if (lwb == NULL) {
1627 ret = B_TRUE;
1628 goto out;
1629 }
1630
1631 /*
1632 * more than 1 log buffer means zil_sync() hasn't yet freed
1633 * entries after a txg has committed
1634 */
1635 if (list_next(&zilog->zl_lwb_list, lwb)) {
1636 ret = B_FALSE;
1637 goto out;
1638 }
1639
1640 ASSERT(zil_empty(zilog));
1641 ret = B_TRUE;
1642out:
1643 cv_broadcast(&zilog->zl_cv_writer);
1644 mutex_exit(&zilog->zl_lock);
1645 return (ret);
1646}
9babb374
BB
1647
1648/* ARGSUSED */
1649int
1650zil_vdev_offline(char *osname, void *arg)
1651{
1652 objset_t *os;
1653 zilog_t *zilog;
1654 int error;
1655
1656 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
1657 if (error)
1658 return (error);
1659
1660 zilog = dmu_objset_zil(os);
1661 if (zil_suspend(zilog) != 0)
1662 error = EEXIST;
1663 else
1664 zil_resume(zilog);
1665 dmu_objset_close(os);
1666 return (error);
1667}