]> git.proxmox.com Git - mirror_zfs.git/blob - zfs/lib/libzpool/zil.c
Initial Linux ZFS GIT Repo
[mirror_zfs.git] / zfs / lib / libzpool / zil.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #pragma ident "@(#)zil.c 1.34 08/02/22 SMI"
27
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/dmu.h>
31 #include <sys/zap.h>
32 #include <sys/arc.h>
33 #include <sys/stat.h>
34 #include <sys/resource.h>
35 #include <sys/zil.h>
36 #include <sys/zil_impl.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/vdev.h>
39 #include <sys/dmu_tx.h>
40
41 /*
42 * The zfs intent log (ZIL) saves transaction records of system calls
43 * that change the file system in memory with enough information
44 * to be able to replay them. These are stored in memory until
45 * either the DMU transaction group (txg) commits them to the stable pool
46 * and they can be discarded, or they are flushed to the stable log
47 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
48 * requirement. In the event of a panic or power fail then those log
49 * records (transactions) are replayed.
50 *
51 * There is one ZIL per file system. Its on-disk (pool) format consists
52 * of 3 parts:
53 *
54 * - ZIL header
55 * - ZIL blocks
56 * - ZIL records
57 *
58 * A log record holds a system call transaction. Log blocks can
59 * hold many log records and the blocks are chained together.
60 * Each ZIL block contains a block pointer (blkptr_t) to the next
61 * ZIL block in the chain. The ZIL header points to the first
62 * block in the chain. Note there is not a fixed place in the pool
63 * to hold blocks. They are dynamically allocated and freed as
64 * needed from the blocks available. Figure X shows the ZIL structure:
65 */
66
67 /*
68 * This global ZIL switch affects all pools
69 */
70 int zil_disable = 0; /* disable intent logging */
71
72 /*
73 * Tunable parameter for debugging or performance analysis. Setting
74 * zfs_nocacheflush will cause corruption on power loss if a volatile
75 * out-of-order write cache is enabled.
76 */
77 boolean_t zfs_nocacheflush = B_FALSE;
78
79 static kmem_cache_t *zil_lwb_cache;
80
81 static int
82 zil_dva_compare(const void *x1, const void *x2)
83 {
84 const dva_t *dva1 = x1;
85 const dva_t *dva2 = x2;
86
87 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
88 return (-1);
89 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
90 return (1);
91
92 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
93 return (-1);
94 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
95 return (1);
96
97 return (0);
98 }
99
100 static void
101 zil_dva_tree_init(avl_tree_t *t)
102 {
103 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
104 offsetof(zil_dva_node_t, zn_node));
105 }
106
107 static void
108 zil_dva_tree_fini(avl_tree_t *t)
109 {
110 zil_dva_node_t *zn;
111 void *cookie = NULL;
112
113 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
114 kmem_free(zn, sizeof (zil_dva_node_t));
115
116 avl_destroy(t);
117 }
118
119 static int
120 zil_dva_tree_add(avl_tree_t *t, dva_t *dva)
121 {
122 zil_dva_node_t *zn;
123 avl_index_t where;
124
125 if (avl_find(t, dva, &where) != NULL)
126 return (EEXIST);
127
128 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
129 zn->zn_dva = *dva;
130 avl_insert(t, zn, where);
131
132 return (0);
133 }
134
135 static zil_header_t *
136 zil_header_in_syncing_context(zilog_t *zilog)
137 {
138 return ((zil_header_t *)zilog->zl_header);
139 }
140
141 static void
142 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
143 {
144 zio_cksum_t *zc = &bp->blk_cksum;
145
146 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
147 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
148 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
149 zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
150 }
151
152 /*
153 * Read a log block, make sure it's valid, and byteswap it if necessary.
154 */
155 static int
156 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
157 {
158 blkptr_t blk = *bp;
159 zbookmark_t zb;
160 uint32_t aflags = ARC_WAIT;
161 int error;
162
163 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
164 zb.zb_object = 0;
165 zb.zb_level = -1;
166 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
167
168 *abufpp = NULL;
169
170 error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array,
171 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
172 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
173
174 if (error == 0) {
175 char *data = (*abufpp)->b_data;
176 uint64_t blksz = BP_GET_LSIZE(bp);
177 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
178 zio_cksum_t cksum = bp->blk_cksum;
179
180 /*
181 * Sequence numbers should be... sequential. The checksum
182 * verifier for the next block should be bp's checksum plus 1.
183 */
184 cksum.zc_word[ZIL_ZC_SEQ]++;
185
186 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum)))
187 error = ESTALE;
188 else if (BP_IS_HOLE(&ztp->zit_next_blk))
189 error = ENOENT;
190 else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))
191 error = EOVERFLOW;
192
193 if (error) {
194 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
195 *abufpp = NULL;
196 }
197 }
198
199 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
200
201 return (error);
202 }
203
204 /*
205 * Parse the intent log, and call parse_func for each valid record within.
206 * Return the highest sequence number.
207 */
208 uint64_t
209 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
210 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
211 {
212 const zil_header_t *zh = zilog->zl_header;
213 uint64_t claim_seq = zh->zh_claim_seq;
214 uint64_t seq = 0;
215 uint64_t max_seq = 0;
216 blkptr_t blk = zh->zh_log;
217 arc_buf_t *abuf;
218 char *lrbuf, *lrp;
219 zil_trailer_t *ztp;
220 int reclen, error;
221
222 if (BP_IS_HOLE(&blk))
223 return (max_seq);
224
225 /*
226 * Starting at the block pointed to by zh_log we read the log chain.
227 * For each block in the chain we strongly check that block to
228 * ensure its validity. We stop when an invalid block is found.
229 * For each block pointer in the chain we call parse_blk_func().
230 * For each record in each valid block we call parse_lr_func().
231 * If the log has been claimed, stop if we encounter a sequence
232 * number greater than the highest claimed sequence number.
233 */
234 zil_dva_tree_init(&zilog->zl_dva_tree);
235 for (;;) {
236 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
237
238 if (claim_seq != 0 && seq > claim_seq)
239 break;
240
241 ASSERT(max_seq < seq);
242 max_seq = seq;
243
244 error = zil_read_log_block(zilog, &blk, &abuf);
245
246 if (parse_blk_func != NULL)
247 parse_blk_func(zilog, &blk, arg, txg);
248
249 if (error)
250 break;
251
252 lrbuf = abuf->b_data;
253 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
254 blk = ztp->zit_next_blk;
255
256 if (parse_lr_func == NULL) {
257 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
258 continue;
259 }
260
261 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
262 lr_t *lr = (lr_t *)lrp;
263 reclen = lr->lrc_reclen;
264 ASSERT3U(reclen, >=, sizeof (lr_t));
265 parse_lr_func(zilog, lr, arg, txg);
266 }
267 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
268 }
269 zil_dva_tree_fini(&zilog->zl_dva_tree);
270
271 return (max_seq);
272 }
273
274 /* ARGSUSED */
275 static void
276 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
277 {
278 spa_t *spa = zilog->zl_spa;
279 int err;
280
281 /*
282 * Claim log block if not already committed and not already claimed.
283 */
284 if (bp->blk_birth >= first_txg &&
285 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
286 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL));
287 ASSERT(err == 0);
288 }
289 }
290
291 static void
292 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
293 {
294 if (lrc->lrc_txtype == TX_WRITE) {
295 lr_write_t *lr = (lr_write_t *)lrc;
296 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
297 }
298 }
299
300 /* ARGSUSED */
301 static void
302 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
303 {
304 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
305 }
306
307 static void
308 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
309 {
310 /*
311 * If we previously claimed it, we need to free it.
312 */
313 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
314 lr_write_t *lr = (lr_write_t *)lrc;
315 blkptr_t *bp = &lr->lr_blkptr;
316 if (bp->blk_birth >= claim_txg &&
317 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
318 (void) arc_free(NULL, zilog->zl_spa,
319 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
320 }
321 }
322 }
323
324 /*
325 * Create an on-disk intent log.
326 */
327 static void
328 zil_create(zilog_t *zilog)
329 {
330 const zil_header_t *zh = zilog->zl_header;
331 lwb_t *lwb;
332 uint64_t txg = 0;
333 dmu_tx_t *tx = NULL;
334 blkptr_t blk;
335 int error = 0;
336
337 /*
338 * Wait for any previous destroy to complete.
339 */
340 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
341
342 ASSERT(zh->zh_claim_txg == 0);
343 ASSERT(zh->zh_replay_seq == 0);
344
345 blk = zh->zh_log;
346
347 /*
348 * If we don't already have an initial log block, allocate one now.
349 */
350 if (BP_IS_HOLE(&blk)) {
351 tx = dmu_tx_create(zilog->zl_os);
352 (void) dmu_tx_assign(tx, TXG_WAIT);
353 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
354 txg = dmu_tx_get_txg(tx);
355
356 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
357 NULL, txg);
358
359 if (error == 0)
360 zil_init_log_chain(zilog, &blk);
361 }
362
363 /*
364 * Allocate a log write buffer (lwb) for the first log block.
365 */
366 if (error == 0) {
367 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
368 lwb->lwb_zilog = zilog;
369 lwb->lwb_blk = blk;
370 lwb->lwb_nused = 0;
371 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
372 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
373 lwb->lwb_max_txg = txg;
374 lwb->lwb_zio = NULL;
375
376 mutex_enter(&zilog->zl_lock);
377 list_insert_tail(&zilog->zl_lwb_list, lwb);
378 mutex_exit(&zilog->zl_lock);
379 }
380
381 /*
382 * If we just allocated the first log block, commit our transaction
383 * and wait for zil_sync() to stuff the block poiner into zh_log.
384 * (zh is part of the MOS, so we cannot modify it in open context.)
385 */
386 if (tx != NULL) {
387 dmu_tx_commit(tx);
388 txg_wait_synced(zilog->zl_dmu_pool, txg);
389 }
390
391 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
392 }
393
394 /*
395 * In one tx, free all log blocks and clear the log header.
396 * If keep_first is set, then we're replaying a log with no content.
397 * We want to keep the first block, however, so that the first
398 * synchronous transaction doesn't require a txg_wait_synced()
399 * in zil_create(). We don't need to txg_wait_synced() here either
400 * when keep_first is set, because both zil_create() and zil_destroy()
401 * will wait for any in-progress destroys to complete.
402 */
403 void
404 zil_destroy(zilog_t *zilog, boolean_t keep_first)
405 {
406 const zil_header_t *zh = zilog->zl_header;
407 lwb_t *lwb;
408 dmu_tx_t *tx;
409 uint64_t txg;
410
411 /*
412 * Wait for any previous destroy to complete.
413 */
414 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
415
416 if (BP_IS_HOLE(&zh->zh_log))
417 return;
418
419 tx = dmu_tx_create(zilog->zl_os);
420 (void) dmu_tx_assign(tx, TXG_WAIT);
421 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
422 txg = dmu_tx_get_txg(tx);
423
424 mutex_enter(&zilog->zl_lock);
425
426 /*
427 * It is possible for the ZIL to get the previously mounted zilog
428 * structure of the same dataset if quickly remounted and the dbuf
429 * eviction has not completed. In this case we can see a non
430 * empty lwb list and keep_first will be set. We fix this by
431 * clearing the keep_first. This will be slower but it's very rare.
432 */
433 if (!list_is_empty(&zilog->zl_lwb_list) && keep_first)
434 keep_first = B_FALSE;
435
436 ASSERT3U(zilog->zl_destroy_txg, <, txg);
437 zilog->zl_destroy_txg = txg;
438 zilog->zl_keep_first = keep_first;
439
440 if (!list_is_empty(&zilog->zl_lwb_list)) {
441 ASSERT(zh->zh_claim_txg == 0);
442 ASSERT(!keep_first);
443 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
444 list_remove(&zilog->zl_lwb_list, lwb);
445 if (lwb->lwb_buf != NULL)
446 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
447 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
448 kmem_cache_free(zil_lwb_cache, lwb);
449 }
450 } else {
451 if (!keep_first) {
452 (void) zil_parse(zilog, zil_free_log_block,
453 zil_free_log_record, tx, zh->zh_claim_txg);
454 }
455 }
456 mutex_exit(&zilog->zl_lock);
457
458 dmu_tx_commit(tx);
459 }
460
461 /*
462 * zil_rollback_destroy() is only called by the rollback code.
463 * We already have a syncing tx. Rollback has exclusive access to the
464 * dataset, so we don't have to worry about concurrent zil access.
465 * The actual freeing of any log blocks occurs in zil_sync() later in
466 * this txg syncing phase.
467 */
468 void
469 zil_rollback_destroy(zilog_t *zilog, dmu_tx_t *tx)
470 {
471 const zil_header_t *zh = zilog->zl_header;
472 uint64_t txg;
473
474 if (BP_IS_HOLE(&zh->zh_log))
475 return;
476
477 txg = dmu_tx_get_txg(tx);
478 ASSERT3U(zilog->zl_destroy_txg, <, txg);
479 zilog->zl_destroy_txg = txg;
480 zilog->zl_keep_first = B_FALSE;
481
482 /*
483 * Ensure there's no outstanding ZIL IO. No lwbs or just the
484 * unused one that allocated in advance is ok.
485 */
486 ASSERT(zilog->zl_lwb_list.list_head.list_next ==
487 zilog->zl_lwb_list.list_head.list_prev);
488 (void) zil_parse(zilog, zil_free_log_block, zil_free_log_record,
489 tx, zh->zh_claim_txg);
490 }
491
492 int
493 zil_claim(char *osname, void *txarg)
494 {
495 dmu_tx_t *tx = txarg;
496 uint64_t first_txg = dmu_tx_get_txg(tx);
497 zilog_t *zilog;
498 zil_header_t *zh;
499 objset_t *os;
500 int error;
501
502 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os);
503 if (error) {
504 cmn_err(CE_WARN, "can't process intent log for %s", osname);
505 return (0);
506 }
507
508 zilog = dmu_objset_zil(os);
509 zh = zil_header_in_syncing_context(zilog);
510
511 /*
512 * Claim all log blocks if we haven't already done so, and remember
513 * the highest claimed sequence number. This ensures that if we can
514 * read only part of the log now (e.g. due to a missing device),
515 * but we can read the entire log later, we will not try to replay
516 * or destroy beyond the last block we successfully claimed.
517 */
518 ASSERT3U(zh->zh_claim_txg, <=, first_txg);
519 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
520 zh->zh_claim_txg = first_txg;
521 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
522 zil_claim_log_record, tx, first_txg);
523 dsl_dataset_dirty(dmu_objset_ds(os), tx);
524 }
525
526 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
527 dmu_objset_close(os);
528 return (0);
529 }
530
531 static int
532 zil_vdev_compare(const void *x1, const void *x2)
533 {
534 uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
535 uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
536
537 if (v1 < v2)
538 return (-1);
539 if (v1 > v2)
540 return (1);
541
542 return (0);
543 }
544
545 void
546 zil_add_block(zilog_t *zilog, blkptr_t *bp)
547 {
548 avl_tree_t *t = &zilog->zl_vdev_tree;
549 avl_index_t where;
550 zil_vdev_node_t *zv, zvsearch;
551 int ndvas = BP_GET_NDVAS(bp);
552 int i;
553
554 if (zfs_nocacheflush)
555 return;
556
557 ASSERT(zilog->zl_writer);
558
559 /*
560 * Even though we're zl_writer, we still need a lock because the
561 * zl_get_data() callbacks may have dmu_sync() done callbacks
562 * that will run concurrently.
563 */
564 mutex_enter(&zilog->zl_vdev_lock);
565 for (i = 0; i < ndvas; i++) {
566 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
567 if (avl_find(t, &zvsearch, &where) == NULL) {
568 zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
569 zv->zv_vdev = zvsearch.zv_vdev;
570 avl_insert(t, zv, where);
571 }
572 }
573 mutex_exit(&zilog->zl_vdev_lock);
574 }
575
576 void
577 zil_flush_vdevs(zilog_t *zilog)
578 {
579 spa_t *spa = zilog->zl_spa;
580 avl_tree_t *t = &zilog->zl_vdev_tree;
581 void *cookie = NULL;
582 zil_vdev_node_t *zv;
583 zio_t *zio;
584
585 ASSERT(zilog->zl_writer);
586
587 /*
588 * We don't need zl_vdev_lock here because we're the zl_writer,
589 * and all zl_get_data() callbacks are done.
590 */
591 if (avl_numnodes(t) == 0)
592 return;
593
594 spa_config_enter(spa, RW_READER, FTAG);
595
596 zio = zio_root(spa, NULL, NULL,
597 ZIO_FLAG_CONFIG_HELD | ZIO_FLAG_CANFAIL);
598
599 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
600 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
601 if (vd != NULL)
602 zio_flush(zio, vd);
603 kmem_free(zv, sizeof (*zv));
604 }
605
606 /*
607 * Wait for all the flushes to complete. Not all devices actually
608 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
609 */
610 (void) zio_wait(zio);
611
612 spa_config_exit(spa, FTAG);
613 }
614
615 /*
616 * Function called when a log block write completes
617 */
618 static void
619 zil_lwb_write_done(zio_t *zio)
620 {
621 lwb_t *lwb = zio->io_private;
622 zilog_t *zilog = lwb->lwb_zilog;
623
624 /*
625 * Now that we've written this log block, we have a stable pointer
626 * to the next block in the chain, so it's OK to let the txg in
627 * which we allocated the next block sync.
628 */
629 txg_rele_to_sync(&lwb->lwb_txgh);
630
631 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
632 mutex_enter(&zilog->zl_lock);
633 lwb->lwb_buf = NULL;
634 if (zio->io_error)
635 zilog->zl_log_error = B_TRUE;
636 mutex_exit(&zilog->zl_lock);
637 }
638
639 /*
640 * Initialize the io for a log block.
641 *
642 * Note, we should not initialize the IO until we are about
643 * to use it, since zio_rewrite() does a spa_config_enter().
644 */
645 static void
646 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
647 {
648 zbookmark_t zb;
649
650 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
651 zb.zb_object = 0;
652 zb.zb_level = -1;
653 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
654
655 if (zilog->zl_root_zio == NULL) {
656 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
657 ZIO_FLAG_CANFAIL);
658 }
659 if (lwb->lwb_zio == NULL) {
660 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
661 ZIO_CHECKSUM_ZILOG, 0, &lwb->lwb_blk, lwb->lwb_buf,
662 lwb->lwb_sz, zil_lwb_write_done, lwb,
663 ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_CANFAIL, &zb);
664 }
665 }
666
667 /*
668 * Start a log block write and advance to the next log block.
669 * Calls are serialized.
670 */
671 static lwb_t *
672 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
673 {
674 lwb_t *nlwb;
675 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
676 spa_t *spa = zilog->zl_spa;
677 blkptr_t *bp = &ztp->zit_next_blk;
678 uint64_t txg;
679 uint64_t zil_blksz;
680 int error;
681
682 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
683
684 /*
685 * Allocate the next block and save its address in this block
686 * before writing it in order to establish the log chain.
687 * Note that if the allocation of nlwb synced before we wrote
688 * the block that points at it (lwb), we'd leak it if we crashed.
689 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
690 */
691 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
692 txg_rele_to_quiesce(&lwb->lwb_txgh);
693
694 /*
695 * Pick a ZIL blocksize. We request a size that is the
696 * maximum of the previous used size, the current used size and
697 * the amount waiting in the queue.
698 */
699 zil_blksz = MAX(zilog->zl_prev_used,
700 zilog->zl_cur_used + sizeof (*ztp));
701 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
702 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
703 if (zil_blksz > ZIL_MAX_BLKSZ)
704 zil_blksz = ZIL_MAX_BLKSZ;
705
706 BP_ZERO(bp);
707 /* pass the old blkptr in order to spread log blocks across devs */
708 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg);
709 if (error) {
710 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
711
712 /*
713 * We dirty the dataset to ensure that zil_sync() will
714 * be called to remove this lwb from our zl_lwb_list.
715 * Failing to do so, may leave an lwb with a NULL lwb_buf
716 * hanging around on the zl_lwb_list.
717 */
718 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
719 dmu_tx_commit(tx);
720
721 /*
722 * Since we've just experienced an allocation failure so we
723 * terminate the current lwb and send it on its way.
724 */
725 ztp->zit_pad = 0;
726 ztp->zit_nused = lwb->lwb_nused;
727 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
728 zio_nowait(lwb->lwb_zio);
729
730 /*
731 * By returning NULL the caller will call tx_wait_synced()
732 */
733 return (NULL);
734 }
735
736 ASSERT3U(bp->blk_birth, ==, txg);
737 ztp->zit_pad = 0;
738 ztp->zit_nused = lwb->lwb_nused;
739 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
740 bp->blk_cksum = lwb->lwb_blk.blk_cksum;
741 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
742
743 /*
744 * Allocate a new log write buffer (lwb).
745 */
746 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
747
748 nlwb->lwb_zilog = zilog;
749 nlwb->lwb_blk = *bp;
750 nlwb->lwb_nused = 0;
751 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
752 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
753 nlwb->lwb_max_txg = txg;
754 nlwb->lwb_zio = NULL;
755
756 /*
757 * Put new lwb at the end of the log chain
758 */
759 mutex_enter(&zilog->zl_lock);
760 list_insert_tail(&zilog->zl_lwb_list, nlwb);
761 mutex_exit(&zilog->zl_lock);
762
763 /* Record the block for later vdev flushing */
764 zil_add_block(zilog, &lwb->lwb_blk);
765
766 /*
767 * kick off the write for the old log block
768 */
769 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
770 ASSERT(lwb->lwb_zio);
771 zio_nowait(lwb->lwb_zio);
772
773 return (nlwb);
774 }
775
776 static lwb_t *
777 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
778 {
779 lr_t *lrc = &itx->itx_lr; /* common log record */
780 lr_write_t *lr = (lr_write_t *)lrc;
781 uint64_t txg = lrc->lrc_txg;
782 uint64_t reclen = lrc->lrc_reclen;
783 uint64_t dlen;
784
785 if (lwb == NULL)
786 return (NULL);
787 ASSERT(lwb->lwb_buf != NULL);
788
789 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
790 dlen = P2ROUNDUP_TYPED(
791 lr->lr_length, sizeof (uint64_t), uint64_t);
792 else
793 dlen = 0;
794
795 zilog->zl_cur_used += (reclen + dlen);
796
797 zil_lwb_write_init(zilog, lwb);
798
799 /*
800 * If this record won't fit in the current log block, start a new one.
801 */
802 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
803 lwb = zil_lwb_write_start(zilog, lwb);
804 if (lwb == NULL)
805 return (NULL);
806 zil_lwb_write_init(zilog, lwb);
807 ASSERT(lwb->lwb_nused == 0);
808 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
809 txg_wait_synced(zilog->zl_dmu_pool, txg);
810 return (lwb);
811 }
812 }
813
814 /*
815 * Update the lrc_seq, to be log record sequence number. See zil.h
816 * Then copy the record to the log buffer.
817 */
818 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
819 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
820
821 /*
822 * If it's a write, fetch the data or get its blkptr as appropriate.
823 */
824 if (lrc->lrc_txtype == TX_WRITE) {
825 if (txg > spa_freeze_txg(zilog->zl_spa))
826 txg_wait_synced(zilog->zl_dmu_pool, txg);
827 if (itx->itx_wr_state != WR_COPIED) {
828 char *dbuf;
829 int error;
830
831 /* alignment is guaranteed */
832 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
833 if (dlen) {
834 ASSERT(itx->itx_wr_state == WR_NEED_COPY);
835 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
836 lr->lr_common.lrc_reclen += dlen;
837 } else {
838 ASSERT(itx->itx_wr_state == WR_INDIRECT);
839 dbuf = NULL;
840 }
841 error = zilog->zl_get_data(
842 itx->itx_private, lr, dbuf, lwb->lwb_zio);
843 if (error) {
844 ASSERT(error == ENOENT || error == EEXIST ||
845 error == EALREADY);
846 return (lwb);
847 }
848 }
849 }
850
851 lwb->lwb_nused += reclen + dlen;
852 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
853 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
854 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
855
856 return (lwb);
857 }
858
859 itx_t *
860 zil_itx_create(uint64_t txtype, size_t lrsize)
861 {
862 itx_t *itx;
863
864 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
865
866 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
867 itx->itx_lr.lrc_txtype = txtype;
868 itx->itx_lr.lrc_reclen = lrsize;
869 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
870 itx->itx_lr.lrc_seq = 0; /* defensive */
871
872 return (itx);
873 }
874
875 uint64_t
876 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
877 {
878 uint64_t seq;
879
880 ASSERT(itx->itx_lr.lrc_seq == 0);
881
882 mutex_enter(&zilog->zl_lock);
883 list_insert_tail(&zilog->zl_itx_list, itx);
884 zilog->zl_itx_list_sz += itx->itx_sod;
885 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
886 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
887 mutex_exit(&zilog->zl_lock);
888
889 return (seq);
890 }
891
892 /*
893 * Free up all in-memory intent log transactions that have now been synced.
894 */
895 static void
896 zil_itx_clean(zilog_t *zilog)
897 {
898 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
899 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
900 list_t clean_list;
901 itx_t *itx;
902
903 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
904
905 mutex_enter(&zilog->zl_lock);
906 /* wait for a log writer to finish walking list */
907 while (zilog->zl_writer) {
908 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
909 }
910
911 /*
912 * Move the sync'd log transactions to a separate list so we can call
913 * kmem_free without holding the zl_lock.
914 *
915 * There is no need to set zl_writer as we don't drop zl_lock here
916 */
917 while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
918 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
919 list_remove(&zilog->zl_itx_list, itx);
920 zilog->zl_itx_list_sz -= itx->itx_sod;
921 list_insert_tail(&clean_list, itx);
922 }
923 cv_broadcast(&zilog->zl_cv_writer);
924 mutex_exit(&zilog->zl_lock);
925
926 /* destroy sync'd log transactions */
927 while ((itx = list_head(&clean_list)) != NULL) {
928 list_remove(&clean_list, itx);
929 kmem_free(itx, offsetof(itx_t, itx_lr)
930 + itx->itx_lr.lrc_reclen);
931 }
932 list_destroy(&clean_list);
933 }
934
935 /*
936 * If there are any in-memory intent log transactions which have now been
937 * synced then start up a taskq to free them.
938 */
939 void
940 zil_clean(zilog_t *zilog)
941 {
942 itx_t *itx;
943
944 mutex_enter(&zilog->zl_lock);
945 itx = list_head(&zilog->zl_itx_list);
946 if ((itx != NULL) &&
947 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
948 (void) taskq_dispatch(zilog->zl_clean_taskq,
949 (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP);
950 }
951 mutex_exit(&zilog->zl_lock);
952 }
953
954 void
955 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
956 {
957 uint64_t txg;
958 uint64_t commit_seq = 0;
959 itx_t *itx, *itx_next = (itx_t *)-1;
960 lwb_t *lwb;
961 spa_t *spa;
962
963 zilog->zl_writer = B_TRUE;
964 zilog->zl_root_zio = NULL;
965 spa = zilog->zl_spa;
966
967 if (zilog->zl_suspend) {
968 lwb = NULL;
969 } else {
970 lwb = list_tail(&zilog->zl_lwb_list);
971 if (lwb == NULL) {
972 /*
973 * Return if there's nothing to flush before we
974 * dirty the fs by calling zil_create()
975 */
976 if (list_is_empty(&zilog->zl_itx_list)) {
977 zilog->zl_writer = B_FALSE;
978 return;
979 }
980 mutex_exit(&zilog->zl_lock);
981 zil_create(zilog);
982 mutex_enter(&zilog->zl_lock);
983 lwb = list_tail(&zilog->zl_lwb_list);
984 }
985 }
986
987 /* Loop through in-memory log transactions filling log blocks. */
988 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
989 for (;;) {
990 /*
991 * Find the next itx to push:
992 * Push all transactions related to specified foid and all
993 * other transactions except TX_WRITE, TX_TRUNCATE,
994 * TX_SETATTR and TX_ACL for all other files.
995 */
996 if (itx_next != (itx_t *)-1)
997 itx = itx_next;
998 else
999 itx = list_head(&zilog->zl_itx_list);
1000 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
1001 if (foid == 0) /* push all foids? */
1002 break;
1003 if (itx->itx_sync) /* push all O_[D]SYNC */
1004 break;
1005 switch (itx->itx_lr.lrc_txtype) {
1006 case TX_SETATTR:
1007 case TX_WRITE:
1008 case TX_TRUNCATE:
1009 case TX_ACL:
1010 /* lr_foid is same offset for these records */
1011 if (((lr_write_t *)&itx->itx_lr)->lr_foid
1012 != foid) {
1013 continue; /* skip this record */
1014 }
1015 }
1016 break;
1017 }
1018 if (itx == NULL)
1019 break;
1020
1021 if ((itx->itx_lr.lrc_seq > seq) &&
1022 ((lwb == NULL) || (lwb->lwb_nused == 0) ||
1023 (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) {
1024 break;
1025 }
1026
1027 /*
1028 * Save the next pointer. Even though we soon drop
1029 * zl_lock all threads that may change the list
1030 * (another writer or zil_itx_clean) can't do so until
1031 * they have zl_writer.
1032 */
1033 itx_next = list_next(&zilog->zl_itx_list, itx);
1034 list_remove(&zilog->zl_itx_list, itx);
1035 zilog->zl_itx_list_sz -= itx->itx_sod;
1036 mutex_exit(&zilog->zl_lock);
1037 txg = itx->itx_lr.lrc_txg;
1038 ASSERT(txg);
1039
1040 if (txg > spa_last_synced_txg(spa) ||
1041 txg > spa_freeze_txg(spa))
1042 lwb = zil_lwb_commit(zilog, itx, lwb);
1043 kmem_free(itx, offsetof(itx_t, itx_lr)
1044 + itx->itx_lr.lrc_reclen);
1045 mutex_enter(&zilog->zl_lock);
1046 }
1047 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1048 /* determine commit sequence number */
1049 itx = list_head(&zilog->zl_itx_list);
1050 if (itx)
1051 commit_seq = itx->itx_lr.lrc_seq;
1052 else
1053 commit_seq = zilog->zl_itx_seq;
1054 mutex_exit(&zilog->zl_lock);
1055
1056 /* write the last block out */
1057 if (lwb != NULL && lwb->lwb_zio != NULL)
1058 lwb = zil_lwb_write_start(zilog, lwb);
1059
1060 zilog->zl_prev_used = zilog->zl_cur_used;
1061 zilog->zl_cur_used = 0;
1062
1063 /*
1064 * Wait if necessary for the log blocks to be on stable storage.
1065 */
1066 if (zilog->zl_root_zio) {
1067 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
1068 (void) zio_wait(zilog->zl_root_zio);
1069 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
1070 zil_flush_vdevs(zilog);
1071 }
1072
1073 if (zilog->zl_log_error || lwb == NULL) {
1074 zilog->zl_log_error = 0;
1075 txg_wait_synced(zilog->zl_dmu_pool, 0);
1076 }
1077
1078 mutex_enter(&zilog->zl_lock);
1079 zilog->zl_writer = B_FALSE;
1080
1081 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq);
1082 zilog->zl_commit_seq = commit_seq;
1083 }
1084
1085 /*
1086 * Push zfs transactions to stable storage up to the supplied sequence number.
1087 * If foid is 0 push out all transactions, otherwise push only those
1088 * for that file or might have been used to create that file.
1089 */
1090 void
1091 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
1092 {
1093 if (zilog == NULL || seq == 0)
1094 return;
1095
1096 mutex_enter(&zilog->zl_lock);
1097
1098 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */
1099
1100 while (zilog->zl_writer) {
1101 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1102 if (seq < zilog->zl_commit_seq) {
1103 mutex_exit(&zilog->zl_lock);
1104 return;
1105 }
1106 }
1107 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1108 /* wake up others waiting on the commit */
1109 cv_broadcast(&zilog->zl_cv_writer);
1110 mutex_exit(&zilog->zl_lock);
1111 }
1112
1113 /*
1114 * Called in syncing context to free committed log blocks and update log header.
1115 */
1116 void
1117 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1118 {
1119 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1120 uint64_t txg = dmu_tx_get_txg(tx);
1121 spa_t *spa = zilog->zl_spa;
1122 lwb_t *lwb;
1123
1124 mutex_enter(&zilog->zl_lock);
1125
1126 ASSERT(zilog->zl_stop_sync == 0);
1127
1128 zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK];
1129
1130 if (zilog->zl_destroy_txg == txg) {
1131 blkptr_t blk = zh->zh_log;
1132
1133 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1134 ASSERT(spa_sync_pass(spa) == 1);
1135
1136 bzero(zh, sizeof (zil_header_t));
1137 bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq));
1138
1139 if (zilog->zl_keep_first) {
1140 /*
1141 * If this block was part of log chain that couldn't
1142 * be claimed because a device was missing during
1143 * zil_claim(), but that device later returns,
1144 * then this block could erroneously appear valid.
1145 * To guard against this, assign a new GUID to the new
1146 * log chain so it doesn't matter what blk points to.
1147 */
1148 zil_init_log_chain(zilog, &blk);
1149 zh->zh_log = blk;
1150 }
1151 }
1152
1153 for (;;) {
1154 lwb = list_head(&zilog->zl_lwb_list);
1155 if (lwb == NULL) {
1156 mutex_exit(&zilog->zl_lock);
1157 return;
1158 }
1159 zh->zh_log = lwb->lwb_blk;
1160 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1161 break;
1162 list_remove(&zilog->zl_lwb_list, lwb);
1163 zio_free_blk(spa, &lwb->lwb_blk, txg);
1164 kmem_cache_free(zil_lwb_cache, lwb);
1165
1166 /*
1167 * If we don't have anything left in the lwb list then
1168 * we've had an allocation failure and we need to zero
1169 * out the zil_header blkptr so that we don't end
1170 * up freeing the same block twice.
1171 */
1172 if (list_head(&zilog->zl_lwb_list) == NULL)
1173 BP_ZERO(&zh->zh_log);
1174 }
1175 mutex_exit(&zilog->zl_lock);
1176 }
1177
1178 void
1179 zil_init(void)
1180 {
1181 zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1182 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1183 }
1184
1185 void
1186 zil_fini(void)
1187 {
1188 kmem_cache_destroy(zil_lwb_cache);
1189 }
1190
1191 zilog_t *
1192 zil_alloc(objset_t *os, zil_header_t *zh_phys)
1193 {
1194 zilog_t *zilog;
1195
1196 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1197
1198 zilog->zl_header = zh_phys;
1199 zilog->zl_os = os;
1200 zilog->zl_spa = dmu_objset_spa(os);
1201 zilog->zl_dmu_pool = dmu_objset_pool(os);
1202 zilog->zl_destroy_txg = TXG_INITIAL - 1;
1203
1204 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1205
1206 list_create(&zilog->zl_itx_list, sizeof (itx_t),
1207 offsetof(itx_t, itx_node));
1208
1209 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1210 offsetof(lwb_t, lwb_node));
1211
1212 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1213
1214 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1215 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1216
1217 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1218 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1219
1220 return (zilog);
1221 }
1222
1223 void
1224 zil_free(zilog_t *zilog)
1225 {
1226 lwb_t *lwb;
1227
1228 zilog->zl_stop_sync = 1;
1229
1230 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1231 list_remove(&zilog->zl_lwb_list, lwb);
1232 if (lwb->lwb_buf != NULL)
1233 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1234 kmem_cache_free(zil_lwb_cache, lwb);
1235 }
1236 list_destroy(&zilog->zl_lwb_list);
1237
1238 avl_destroy(&zilog->zl_vdev_tree);
1239 mutex_destroy(&zilog->zl_vdev_lock);
1240
1241 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1242 list_destroy(&zilog->zl_itx_list);
1243 mutex_destroy(&zilog->zl_lock);
1244
1245 cv_destroy(&zilog->zl_cv_writer);
1246 cv_destroy(&zilog->zl_cv_suspend);
1247
1248 kmem_free(zilog, sizeof (zilog_t));
1249 }
1250
1251 /*
1252 * return true if the initial log block is not valid
1253 */
1254 static int
1255 zil_empty(zilog_t *zilog)
1256 {
1257 const zil_header_t *zh = zilog->zl_header;
1258 arc_buf_t *abuf = NULL;
1259
1260 if (BP_IS_HOLE(&zh->zh_log))
1261 return (1);
1262
1263 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
1264 return (1);
1265
1266 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
1267 return (0);
1268 }
1269
1270 /*
1271 * Open an intent log.
1272 */
1273 zilog_t *
1274 zil_open(objset_t *os, zil_get_data_t *get_data)
1275 {
1276 zilog_t *zilog = dmu_objset_zil(os);
1277
1278 zilog->zl_get_data = get_data;
1279 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1280 2, 2, TASKQ_PREPOPULATE);
1281
1282 return (zilog);
1283 }
1284
1285 /*
1286 * Close an intent log.
1287 */
1288 void
1289 zil_close(zilog_t *zilog)
1290 {
1291 /*
1292 * If the log isn't already committed, mark the objset dirty
1293 * (so zil_sync() will be called) and wait for that txg to sync.
1294 */
1295 if (!zil_is_committed(zilog)) {
1296 uint64_t txg;
1297 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1298 (void) dmu_tx_assign(tx, TXG_WAIT);
1299 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1300 txg = dmu_tx_get_txg(tx);
1301 dmu_tx_commit(tx);
1302 txg_wait_synced(zilog->zl_dmu_pool, txg);
1303 }
1304
1305 taskq_destroy(zilog->zl_clean_taskq);
1306 zilog->zl_clean_taskq = NULL;
1307 zilog->zl_get_data = NULL;
1308
1309 zil_itx_clean(zilog);
1310 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1311 }
1312
1313 /*
1314 * Suspend an intent log. While in suspended mode, we still honor
1315 * synchronous semantics, but we rely on txg_wait_synced() to do it.
1316 * We suspend the log briefly when taking a snapshot so that the snapshot
1317 * contains all the data it's supposed to, and has an empty intent log.
1318 */
1319 int
1320 zil_suspend(zilog_t *zilog)
1321 {
1322 const zil_header_t *zh = zilog->zl_header;
1323
1324 mutex_enter(&zilog->zl_lock);
1325 if (zh->zh_claim_txg != 0) { /* unplayed log */
1326 mutex_exit(&zilog->zl_lock);
1327 return (EBUSY);
1328 }
1329 if (zilog->zl_suspend++ != 0) {
1330 /*
1331 * Someone else already began a suspend.
1332 * Just wait for them to finish.
1333 */
1334 while (zilog->zl_suspending)
1335 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1336 ASSERT(BP_IS_HOLE(&zh->zh_log));
1337 mutex_exit(&zilog->zl_lock);
1338 return (0);
1339 }
1340 zilog->zl_suspending = B_TRUE;
1341 mutex_exit(&zilog->zl_lock);
1342
1343 zil_commit(zilog, UINT64_MAX, 0);
1344
1345 /*
1346 * Wait for any in-flight log writes to complete.
1347 */
1348 mutex_enter(&zilog->zl_lock);
1349 while (zilog->zl_writer)
1350 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1351 mutex_exit(&zilog->zl_lock);
1352
1353 zil_destroy(zilog, B_FALSE);
1354
1355 mutex_enter(&zilog->zl_lock);
1356 zilog->zl_suspending = B_FALSE;
1357 cv_broadcast(&zilog->zl_cv_suspend);
1358 mutex_exit(&zilog->zl_lock);
1359
1360 return (0);
1361 }
1362
1363 void
1364 zil_resume(zilog_t *zilog)
1365 {
1366 mutex_enter(&zilog->zl_lock);
1367 ASSERT(zilog->zl_suspend != 0);
1368 zilog->zl_suspend--;
1369 mutex_exit(&zilog->zl_lock);
1370 }
1371
1372 typedef struct zil_replay_arg {
1373 objset_t *zr_os;
1374 zil_replay_func_t **zr_replay;
1375 void *zr_arg;
1376 uint64_t *zr_txgp;
1377 boolean_t zr_byteswap;
1378 char *zr_lrbuf;
1379 } zil_replay_arg_t;
1380
1381 static void
1382 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1383 {
1384 zil_replay_arg_t *zr = zra;
1385 const zil_header_t *zh = zilog->zl_header;
1386 uint64_t reclen = lr->lrc_reclen;
1387 uint64_t txtype = lr->lrc_txtype;
1388 char *name;
1389 int pass, error, sunk;
1390
1391 if (zilog->zl_stop_replay)
1392 return;
1393
1394 if (lr->lrc_txg < claim_txg) /* already committed */
1395 return;
1396
1397 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
1398 return;
1399
1400 /* Strip case-insensitive bit, still present in log record */
1401 txtype &= ~TX_CI;
1402
1403 /*
1404 * Make a copy of the data so we can revise and extend it.
1405 */
1406 bcopy(lr, zr->zr_lrbuf, reclen);
1407
1408 /*
1409 * The log block containing this lr may have been byteswapped
1410 * so that we can easily examine common fields like lrc_txtype.
1411 * However, the log is a mix of different data types, and only the
1412 * replay vectors know how to byteswap their records. Therefore, if
1413 * the lr was byteswapped, undo it before invoking the replay vector.
1414 */
1415 if (zr->zr_byteswap)
1416 byteswap_uint64_array(zr->zr_lrbuf, reclen);
1417
1418 /*
1419 * If this is a TX_WRITE with a blkptr, suck in the data.
1420 */
1421 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1422 lr_write_t *lrw = (lr_write_t *)lr;
1423 blkptr_t *wbp = &lrw->lr_blkptr;
1424 uint64_t wlen = lrw->lr_length;
1425 char *wbuf = zr->zr_lrbuf + reclen;
1426
1427 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */
1428 bzero(wbuf, wlen);
1429 } else {
1430 /*
1431 * A subsequent write may have overwritten this block,
1432 * in which case wbp may have been been freed and
1433 * reallocated, and our read of wbp may fail with a
1434 * checksum error. We can safely ignore this because
1435 * the later write will provide the correct data.
1436 */
1437 zbookmark_t zb;
1438
1439 zb.zb_objset = dmu_objset_id(zilog->zl_os);
1440 zb.zb_object = lrw->lr_foid;
1441 zb.zb_level = -1;
1442 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp);
1443
1444 (void) zio_wait(zio_read(NULL, zilog->zl_spa,
1445 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL,
1446 ZIO_PRIORITY_SYNC_READ,
1447 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1448 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen);
1449 }
1450 }
1451
1452 /*
1453 * We must now do two things atomically: replay this log record,
1454 * and update the log header to reflect the fact that we did so.
1455 * We use the DMU's ability to assign into a specific txg to do this.
1456 */
1457 for (pass = 1, sunk = B_FALSE; /* CONSTANTCONDITION */; pass++) {
1458 uint64_t replay_txg;
1459 dmu_tx_t *replay_tx;
1460
1461 replay_tx = dmu_tx_create(zr->zr_os);
1462 error = dmu_tx_assign(replay_tx, TXG_WAIT);
1463 if (error) {
1464 dmu_tx_abort(replay_tx);
1465 break;
1466 }
1467
1468 replay_txg = dmu_tx_get_txg(replay_tx);
1469
1470 if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1471 error = EINVAL;
1472 } else {
1473 /*
1474 * On the first pass, arrange for the replay vector
1475 * to fail its dmu_tx_assign(). That's the only way
1476 * to ensure that those code paths remain well tested.
1477 *
1478 * Only byteswap (if needed) on the 1st pass.
1479 */
1480 *zr->zr_txgp = replay_txg - (pass == 1);
1481 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1482 zr->zr_byteswap && pass == 1);
1483 *zr->zr_txgp = TXG_NOWAIT;
1484 }
1485
1486 if (error == 0) {
1487 dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx);
1488 zilog->zl_replay_seq[replay_txg & TXG_MASK] =
1489 lr->lrc_seq;
1490 }
1491
1492 dmu_tx_commit(replay_tx);
1493
1494 if (!error)
1495 return;
1496
1497 /*
1498 * The DMU's dnode layer doesn't see removes until the txg
1499 * commits, so a subsequent claim can spuriously fail with
1500 * EEXIST. So if we receive any error other than ERESTART
1501 * we try syncing out any removes then retrying the
1502 * transaction.
1503 */
1504 if (error != ERESTART && !sunk) {
1505 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1506 sunk = B_TRUE;
1507 continue; /* retry */
1508 }
1509
1510 if (error != ERESTART)
1511 break;
1512
1513 if (pass != 1)
1514 txg_wait_open(spa_get_dsl(zilog->zl_spa),
1515 replay_txg + 1);
1516
1517 dprintf("pass %d, retrying\n", pass);
1518 }
1519
1520 ASSERT(error && error != ERESTART);
1521 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1522 dmu_objset_name(zr->zr_os, name);
1523 cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1524 "dataset %s, seq 0x%llx, txtype %llu %s\n",
1525 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype,
1526 (lr->lrc_txtype & TX_CI) ? "CI" : "");
1527 zilog->zl_stop_replay = 1;
1528 kmem_free(name, MAXNAMELEN);
1529 }
1530
1531 /* ARGSUSED */
1532 static void
1533 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1534 {
1535 zilog->zl_replay_blks++;
1536 }
1537
1538 /*
1539 * If this dataset has a non-empty intent log, replay it and destroy it.
1540 */
1541 void
1542 zil_replay(objset_t *os, void *arg, uint64_t *txgp,
1543 zil_replay_func_t *replay_func[TX_MAX_TYPE])
1544 {
1545 zilog_t *zilog = dmu_objset_zil(os);
1546 const zil_header_t *zh = zilog->zl_header;
1547 zil_replay_arg_t zr;
1548
1549 if (zil_empty(zilog)) {
1550 zil_destroy(zilog, B_TRUE);
1551 return;
1552 }
1553
1554 zr.zr_os = os;
1555 zr.zr_replay = replay_func;
1556 zr.zr_arg = arg;
1557 zr.zr_txgp = txgp;
1558 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1559 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1560
1561 /*
1562 * Wait for in-progress removes to sync before starting replay.
1563 */
1564 txg_wait_synced(zilog->zl_dmu_pool, 0);
1565
1566 zilog->zl_stop_replay = 0;
1567 zilog->zl_replay_time = lbolt;
1568 ASSERT(zilog->zl_replay_blks == 0);
1569 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1570 zh->zh_claim_txg);
1571 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1572
1573 zil_destroy(zilog, B_FALSE);
1574 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1575 }
1576
1577 /*
1578 * Report whether all transactions are committed
1579 */
1580 int
1581 zil_is_committed(zilog_t *zilog)
1582 {
1583 lwb_t *lwb;
1584 int ret;
1585
1586 mutex_enter(&zilog->zl_lock);
1587 while (zilog->zl_writer)
1588 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1589
1590 /* recent unpushed intent log transactions? */
1591 if (!list_is_empty(&zilog->zl_itx_list)) {
1592 ret = B_FALSE;
1593 goto out;
1594 }
1595
1596 /* intent log never used? */
1597 lwb = list_head(&zilog->zl_lwb_list);
1598 if (lwb == NULL) {
1599 ret = B_TRUE;
1600 goto out;
1601 }
1602
1603 /*
1604 * more than 1 log buffer means zil_sync() hasn't yet freed
1605 * entries after a txg has committed
1606 */
1607 if (list_next(&zilog->zl_lwb_list, lwb)) {
1608 ret = B_FALSE;
1609 goto out;
1610 }
1611
1612 ASSERT(zil_empty(zilog));
1613 ret = B_TRUE;
1614 out:
1615 cv_broadcast(&zilog->zl_cv_writer);
1616 mutex_exit(&zilog->zl_lock);
1617 return (ret);
1618 }