]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/xfs/xfs_log_recover.c
xfs: convert to SPDX license tags
[mirror_ubuntu-jammy-kernel.git] / fs / xfs / xfs_log_recover.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_da_format.h"
17 #include "xfs_da_btree.h"
18 #include "xfs_inode.h"
19 #include "xfs_trans.h"
20 #include "xfs_log.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_alloc.h"
27 #include "xfs_ialloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_cksum.h"
30 #include "xfs_trace.h"
31 #include "xfs_icache.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_error.h"
34 #include "xfs_dir2.h"
35 #include "xfs_rmap_item.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_refcount_item.h"
38 #include "xfs_bmap_item.h"
39
40 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
41
42 STATIC int
43 xlog_find_zeroed(
44 struct xlog *,
45 xfs_daddr_t *);
46 STATIC int
47 xlog_clear_stale_blocks(
48 struct xlog *,
49 xfs_lsn_t);
50 #if defined(DEBUG)
51 STATIC void
52 xlog_recover_check_summary(
53 struct xlog *);
54 #else
55 #define xlog_recover_check_summary(log)
56 #endif
57 STATIC int
58 xlog_do_recovery_pass(
59 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
60
61 /*
62 * This structure is used during recovery to record the buf log items which
63 * have been canceled and should not be replayed.
64 */
65 struct xfs_buf_cancel {
66 xfs_daddr_t bc_blkno;
67 uint bc_len;
68 int bc_refcount;
69 struct list_head bc_list;
70 };
71
72 /*
73 * Sector aligned buffer routines for buffer create/read/write/access
74 */
75
76 /*
77 * Verify the log-relative block number and length in basic blocks are valid for
78 * an operation involving the given XFS log buffer. Returns true if the fields
79 * are valid, false otherwise.
80 */
81 static inline bool
82 xlog_verify_bp(
83 struct xlog *log,
84 xfs_daddr_t blk_no,
85 int bbcount)
86 {
87 if (blk_no < 0 || blk_no >= log->l_logBBsize)
88 return false;
89 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
90 return false;
91 return true;
92 }
93
94 /*
95 * Allocate a buffer to hold log data. The buffer needs to be able
96 * to map to a range of nbblks basic blocks at any valid (basic
97 * block) offset within the log.
98 */
99 STATIC xfs_buf_t *
100 xlog_get_bp(
101 struct xlog *log,
102 int nbblks)
103 {
104 struct xfs_buf *bp;
105
106 /*
107 * Pass log block 0 since we don't have an addr yet, buffer will be
108 * verified on read.
109 */
110 if (!xlog_verify_bp(log, 0, nbblks)) {
111 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
112 nbblks);
113 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
114 return NULL;
115 }
116
117 /*
118 * We do log I/O in units of log sectors (a power-of-2
119 * multiple of the basic block size), so we round up the
120 * requested size to accommodate the basic blocks required
121 * for complete log sectors.
122 *
123 * In addition, the buffer may be used for a non-sector-
124 * aligned block offset, in which case an I/O of the
125 * requested size could extend beyond the end of the
126 * buffer. If the requested size is only 1 basic block it
127 * will never straddle a sector boundary, so this won't be
128 * an issue. Nor will this be a problem if the log I/O is
129 * done in basic blocks (sector size 1). But otherwise we
130 * extend the buffer by one extra log sector to ensure
131 * there's space to accommodate this possibility.
132 */
133 if (nbblks > 1 && log->l_sectBBsize > 1)
134 nbblks += log->l_sectBBsize;
135 nbblks = round_up(nbblks, log->l_sectBBsize);
136
137 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
138 if (bp)
139 xfs_buf_unlock(bp);
140 return bp;
141 }
142
143 STATIC void
144 xlog_put_bp(
145 xfs_buf_t *bp)
146 {
147 xfs_buf_free(bp);
148 }
149
150 /*
151 * Return the address of the start of the given block number's data
152 * in a log buffer. The buffer covers a log sector-aligned region.
153 */
154 STATIC char *
155 xlog_align(
156 struct xlog *log,
157 xfs_daddr_t blk_no,
158 int nbblks,
159 struct xfs_buf *bp)
160 {
161 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
162
163 ASSERT(offset + nbblks <= bp->b_length);
164 return bp->b_addr + BBTOB(offset);
165 }
166
167
168 /*
169 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
170 */
171 STATIC int
172 xlog_bread_noalign(
173 struct xlog *log,
174 xfs_daddr_t blk_no,
175 int nbblks,
176 struct xfs_buf *bp)
177 {
178 int error;
179
180 if (!xlog_verify_bp(log, blk_no, nbblks)) {
181 xfs_warn(log->l_mp,
182 "Invalid log block/length (0x%llx, 0x%x) for buffer",
183 blk_no, nbblks);
184 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
185 return -EFSCORRUPTED;
186 }
187
188 blk_no = round_down(blk_no, log->l_sectBBsize);
189 nbblks = round_up(nbblks, log->l_sectBBsize);
190
191 ASSERT(nbblks > 0);
192 ASSERT(nbblks <= bp->b_length);
193
194 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
195 bp->b_flags |= XBF_READ;
196 bp->b_io_length = nbblks;
197 bp->b_error = 0;
198
199 error = xfs_buf_submit_wait(bp);
200 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
201 xfs_buf_ioerror_alert(bp, __func__);
202 return error;
203 }
204
205 STATIC int
206 xlog_bread(
207 struct xlog *log,
208 xfs_daddr_t blk_no,
209 int nbblks,
210 struct xfs_buf *bp,
211 char **offset)
212 {
213 int error;
214
215 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
216 if (error)
217 return error;
218
219 *offset = xlog_align(log, blk_no, nbblks, bp);
220 return 0;
221 }
222
223 /*
224 * Read at an offset into the buffer. Returns with the buffer in it's original
225 * state regardless of the result of the read.
226 */
227 STATIC int
228 xlog_bread_offset(
229 struct xlog *log,
230 xfs_daddr_t blk_no, /* block to read from */
231 int nbblks, /* blocks to read */
232 struct xfs_buf *bp,
233 char *offset)
234 {
235 char *orig_offset = bp->b_addr;
236 int orig_len = BBTOB(bp->b_length);
237 int error, error2;
238
239 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
240 if (error)
241 return error;
242
243 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
244
245 /* must reset buffer pointer even on error */
246 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
247 if (error)
248 return error;
249 return error2;
250 }
251
252 /*
253 * Write out the buffer at the given block for the given number of blocks.
254 * The buffer is kept locked across the write and is returned locked.
255 * This can only be used for synchronous log writes.
256 */
257 STATIC int
258 xlog_bwrite(
259 struct xlog *log,
260 xfs_daddr_t blk_no,
261 int nbblks,
262 struct xfs_buf *bp)
263 {
264 int error;
265
266 if (!xlog_verify_bp(log, blk_no, nbblks)) {
267 xfs_warn(log->l_mp,
268 "Invalid log block/length (0x%llx, 0x%x) for buffer",
269 blk_no, nbblks);
270 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
271 return -EFSCORRUPTED;
272 }
273
274 blk_no = round_down(blk_no, log->l_sectBBsize);
275 nbblks = round_up(nbblks, log->l_sectBBsize);
276
277 ASSERT(nbblks > 0);
278 ASSERT(nbblks <= bp->b_length);
279
280 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
281 xfs_buf_hold(bp);
282 xfs_buf_lock(bp);
283 bp->b_io_length = nbblks;
284 bp->b_error = 0;
285
286 error = xfs_bwrite(bp);
287 if (error)
288 xfs_buf_ioerror_alert(bp, __func__);
289 xfs_buf_relse(bp);
290 return error;
291 }
292
293 #ifdef DEBUG
294 /*
295 * dump debug superblock and log record information
296 */
297 STATIC void
298 xlog_header_check_dump(
299 xfs_mount_t *mp,
300 xlog_rec_header_t *head)
301 {
302 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
303 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
304 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
305 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
306 }
307 #else
308 #define xlog_header_check_dump(mp, head)
309 #endif
310
311 /*
312 * check log record header for recovery
313 */
314 STATIC int
315 xlog_header_check_recover(
316 xfs_mount_t *mp,
317 xlog_rec_header_t *head)
318 {
319 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
320
321 /*
322 * IRIX doesn't write the h_fmt field and leaves it zeroed
323 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
324 * a dirty log created in IRIX.
325 */
326 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
327 xfs_warn(mp,
328 "dirty log written in incompatible format - can't recover");
329 xlog_header_check_dump(mp, head);
330 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
331 XFS_ERRLEVEL_HIGH, mp);
332 return -EFSCORRUPTED;
333 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
334 xfs_warn(mp,
335 "dirty log entry has mismatched uuid - can't recover");
336 xlog_header_check_dump(mp, head);
337 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
338 XFS_ERRLEVEL_HIGH, mp);
339 return -EFSCORRUPTED;
340 }
341 return 0;
342 }
343
344 /*
345 * read the head block of the log and check the header
346 */
347 STATIC int
348 xlog_header_check_mount(
349 xfs_mount_t *mp,
350 xlog_rec_header_t *head)
351 {
352 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
353
354 if (uuid_is_null(&head->h_fs_uuid)) {
355 /*
356 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
357 * h_fs_uuid is null, we assume this log was last mounted
358 * by IRIX and continue.
359 */
360 xfs_warn(mp, "null uuid in log - IRIX style log");
361 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
362 xfs_warn(mp, "log has mismatched uuid - can't recover");
363 xlog_header_check_dump(mp, head);
364 XFS_ERROR_REPORT("xlog_header_check_mount",
365 XFS_ERRLEVEL_HIGH, mp);
366 return -EFSCORRUPTED;
367 }
368 return 0;
369 }
370
371 STATIC void
372 xlog_recover_iodone(
373 struct xfs_buf *bp)
374 {
375 if (bp->b_error) {
376 /*
377 * We're not going to bother about retrying
378 * this during recovery. One strike!
379 */
380 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
381 xfs_buf_ioerror_alert(bp, __func__);
382 xfs_force_shutdown(bp->b_target->bt_mount,
383 SHUTDOWN_META_IO_ERROR);
384 }
385 }
386
387 /*
388 * On v5 supers, a bli could be attached to update the metadata LSN.
389 * Clean it up.
390 */
391 if (bp->b_log_item)
392 xfs_buf_item_relse(bp);
393 ASSERT(bp->b_log_item == NULL);
394
395 bp->b_iodone = NULL;
396 xfs_buf_ioend(bp);
397 }
398
399 /*
400 * This routine finds (to an approximation) the first block in the physical
401 * log which contains the given cycle. It uses a binary search algorithm.
402 * Note that the algorithm can not be perfect because the disk will not
403 * necessarily be perfect.
404 */
405 STATIC int
406 xlog_find_cycle_start(
407 struct xlog *log,
408 struct xfs_buf *bp,
409 xfs_daddr_t first_blk,
410 xfs_daddr_t *last_blk,
411 uint cycle)
412 {
413 char *offset;
414 xfs_daddr_t mid_blk;
415 xfs_daddr_t end_blk;
416 uint mid_cycle;
417 int error;
418
419 end_blk = *last_blk;
420 mid_blk = BLK_AVG(first_blk, end_blk);
421 while (mid_blk != first_blk && mid_blk != end_blk) {
422 error = xlog_bread(log, mid_blk, 1, bp, &offset);
423 if (error)
424 return error;
425 mid_cycle = xlog_get_cycle(offset);
426 if (mid_cycle == cycle)
427 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
428 else
429 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
430 mid_blk = BLK_AVG(first_blk, end_blk);
431 }
432 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
433 (mid_blk == end_blk && mid_blk-1 == first_blk));
434
435 *last_blk = end_blk;
436
437 return 0;
438 }
439
440 /*
441 * Check that a range of blocks does not contain stop_on_cycle_no.
442 * Fill in *new_blk with the block offset where such a block is
443 * found, or with -1 (an invalid block number) if there is no such
444 * block in the range. The scan needs to occur from front to back
445 * and the pointer into the region must be updated since a later
446 * routine will need to perform another test.
447 */
448 STATIC int
449 xlog_find_verify_cycle(
450 struct xlog *log,
451 xfs_daddr_t start_blk,
452 int nbblks,
453 uint stop_on_cycle_no,
454 xfs_daddr_t *new_blk)
455 {
456 xfs_daddr_t i, j;
457 uint cycle;
458 xfs_buf_t *bp;
459 xfs_daddr_t bufblks;
460 char *buf = NULL;
461 int error = 0;
462
463 /*
464 * Greedily allocate a buffer big enough to handle the full
465 * range of basic blocks we'll be examining. If that fails,
466 * try a smaller size. We need to be able to read at least
467 * a log sector, or we're out of luck.
468 */
469 bufblks = 1 << ffs(nbblks);
470 while (bufblks > log->l_logBBsize)
471 bufblks >>= 1;
472 while (!(bp = xlog_get_bp(log, bufblks))) {
473 bufblks >>= 1;
474 if (bufblks < log->l_sectBBsize)
475 return -ENOMEM;
476 }
477
478 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
479 int bcount;
480
481 bcount = min(bufblks, (start_blk + nbblks - i));
482
483 error = xlog_bread(log, i, bcount, bp, &buf);
484 if (error)
485 goto out;
486
487 for (j = 0; j < bcount; j++) {
488 cycle = xlog_get_cycle(buf);
489 if (cycle == stop_on_cycle_no) {
490 *new_blk = i+j;
491 goto out;
492 }
493
494 buf += BBSIZE;
495 }
496 }
497
498 *new_blk = -1;
499
500 out:
501 xlog_put_bp(bp);
502 return error;
503 }
504
505 /*
506 * Potentially backup over partial log record write.
507 *
508 * In the typical case, last_blk is the number of the block directly after
509 * a good log record. Therefore, we subtract one to get the block number
510 * of the last block in the given buffer. extra_bblks contains the number
511 * of blocks we would have read on a previous read. This happens when the
512 * last log record is split over the end of the physical log.
513 *
514 * extra_bblks is the number of blocks potentially verified on a previous
515 * call to this routine.
516 */
517 STATIC int
518 xlog_find_verify_log_record(
519 struct xlog *log,
520 xfs_daddr_t start_blk,
521 xfs_daddr_t *last_blk,
522 int extra_bblks)
523 {
524 xfs_daddr_t i;
525 xfs_buf_t *bp;
526 char *offset = NULL;
527 xlog_rec_header_t *head = NULL;
528 int error = 0;
529 int smallmem = 0;
530 int num_blks = *last_blk - start_blk;
531 int xhdrs;
532
533 ASSERT(start_blk != 0 || *last_blk != start_blk);
534
535 if (!(bp = xlog_get_bp(log, num_blks))) {
536 if (!(bp = xlog_get_bp(log, 1)))
537 return -ENOMEM;
538 smallmem = 1;
539 } else {
540 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
541 if (error)
542 goto out;
543 offset += ((num_blks - 1) << BBSHIFT);
544 }
545
546 for (i = (*last_blk) - 1; i >= 0; i--) {
547 if (i < start_blk) {
548 /* valid log record not found */
549 xfs_warn(log->l_mp,
550 "Log inconsistent (didn't find previous header)");
551 ASSERT(0);
552 error = -EIO;
553 goto out;
554 }
555
556 if (smallmem) {
557 error = xlog_bread(log, i, 1, bp, &offset);
558 if (error)
559 goto out;
560 }
561
562 head = (xlog_rec_header_t *)offset;
563
564 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
565 break;
566
567 if (!smallmem)
568 offset -= BBSIZE;
569 }
570
571 /*
572 * We hit the beginning of the physical log & still no header. Return
573 * to caller. If caller can handle a return of -1, then this routine
574 * will be called again for the end of the physical log.
575 */
576 if (i == -1) {
577 error = 1;
578 goto out;
579 }
580
581 /*
582 * We have the final block of the good log (the first block
583 * of the log record _before_ the head. So we check the uuid.
584 */
585 if ((error = xlog_header_check_mount(log->l_mp, head)))
586 goto out;
587
588 /*
589 * We may have found a log record header before we expected one.
590 * last_blk will be the 1st block # with a given cycle #. We may end
591 * up reading an entire log record. In this case, we don't want to
592 * reset last_blk. Only when last_blk points in the middle of a log
593 * record do we update last_blk.
594 */
595 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
596 uint h_size = be32_to_cpu(head->h_size);
597
598 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
599 if (h_size % XLOG_HEADER_CYCLE_SIZE)
600 xhdrs++;
601 } else {
602 xhdrs = 1;
603 }
604
605 if (*last_blk - i + extra_bblks !=
606 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
607 *last_blk = i;
608
609 out:
610 xlog_put_bp(bp);
611 return error;
612 }
613
614 /*
615 * Head is defined to be the point of the log where the next log write
616 * could go. This means that incomplete LR writes at the end are
617 * eliminated when calculating the head. We aren't guaranteed that previous
618 * LR have complete transactions. We only know that a cycle number of
619 * current cycle number -1 won't be present in the log if we start writing
620 * from our current block number.
621 *
622 * last_blk contains the block number of the first block with a given
623 * cycle number.
624 *
625 * Return: zero if normal, non-zero if error.
626 */
627 STATIC int
628 xlog_find_head(
629 struct xlog *log,
630 xfs_daddr_t *return_head_blk)
631 {
632 xfs_buf_t *bp;
633 char *offset;
634 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
635 int num_scan_bblks;
636 uint first_half_cycle, last_half_cycle;
637 uint stop_on_cycle;
638 int error, log_bbnum = log->l_logBBsize;
639
640 /* Is the end of the log device zeroed? */
641 error = xlog_find_zeroed(log, &first_blk);
642 if (error < 0) {
643 xfs_warn(log->l_mp, "empty log check failed");
644 return error;
645 }
646 if (error == 1) {
647 *return_head_blk = first_blk;
648
649 /* Is the whole lot zeroed? */
650 if (!first_blk) {
651 /* Linux XFS shouldn't generate totally zeroed logs -
652 * mkfs etc write a dummy unmount record to a fresh
653 * log so we can store the uuid in there
654 */
655 xfs_warn(log->l_mp, "totally zeroed log");
656 }
657
658 return 0;
659 }
660
661 first_blk = 0; /* get cycle # of 1st block */
662 bp = xlog_get_bp(log, 1);
663 if (!bp)
664 return -ENOMEM;
665
666 error = xlog_bread(log, 0, 1, bp, &offset);
667 if (error)
668 goto bp_err;
669
670 first_half_cycle = xlog_get_cycle(offset);
671
672 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
673 error = xlog_bread(log, last_blk, 1, bp, &offset);
674 if (error)
675 goto bp_err;
676
677 last_half_cycle = xlog_get_cycle(offset);
678 ASSERT(last_half_cycle != 0);
679
680 /*
681 * If the 1st half cycle number is equal to the last half cycle number,
682 * then the entire log is stamped with the same cycle number. In this
683 * case, head_blk can't be set to zero (which makes sense). The below
684 * math doesn't work out properly with head_blk equal to zero. Instead,
685 * we set it to log_bbnum which is an invalid block number, but this
686 * value makes the math correct. If head_blk doesn't changed through
687 * all the tests below, *head_blk is set to zero at the very end rather
688 * than log_bbnum. In a sense, log_bbnum and zero are the same block
689 * in a circular file.
690 */
691 if (first_half_cycle == last_half_cycle) {
692 /*
693 * In this case we believe that the entire log should have
694 * cycle number last_half_cycle. We need to scan backwards
695 * from the end verifying that there are no holes still
696 * containing last_half_cycle - 1. If we find such a hole,
697 * then the start of that hole will be the new head. The
698 * simple case looks like
699 * x | x ... | x - 1 | x
700 * Another case that fits this picture would be
701 * x | x + 1 | x ... | x
702 * In this case the head really is somewhere at the end of the
703 * log, as one of the latest writes at the beginning was
704 * incomplete.
705 * One more case is
706 * x | x + 1 | x ... | x - 1 | x
707 * This is really the combination of the above two cases, and
708 * the head has to end up at the start of the x-1 hole at the
709 * end of the log.
710 *
711 * In the 256k log case, we will read from the beginning to the
712 * end of the log and search for cycle numbers equal to x-1.
713 * We don't worry about the x+1 blocks that we encounter,
714 * because we know that they cannot be the head since the log
715 * started with x.
716 */
717 head_blk = log_bbnum;
718 stop_on_cycle = last_half_cycle - 1;
719 } else {
720 /*
721 * In this case we want to find the first block with cycle
722 * number matching last_half_cycle. We expect the log to be
723 * some variation on
724 * x + 1 ... | x ... | x
725 * The first block with cycle number x (last_half_cycle) will
726 * be where the new head belongs. First we do a binary search
727 * for the first occurrence of last_half_cycle. The binary
728 * search may not be totally accurate, so then we scan back
729 * from there looking for occurrences of last_half_cycle before
730 * us. If that backwards scan wraps around the beginning of
731 * the log, then we look for occurrences of last_half_cycle - 1
732 * at the end of the log. The cases we're looking for look
733 * like
734 * v binary search stopped here
735 * x + 1 ... | x | x + 1 | x ... | x
736 * ^ but we want to locate this spot
737 * or
738 * <---------> less than scan distance
739 * x + 1 ... | x ... | x - 1 | x
740 * ^ we want to locate this spot
741 */
742 stop_on_cycle = last_half_cycle;
743 if ((error = xlog_find_cycle_start(log, bp, first_blk,
744 &head_blk, last_half_cycle)))
745 goto bp_err;
746 }
747
748 /*
749 * Now validate the answer. Scan back some number of maximum possible
750 * blocks and make sure each one has the expected cycle number. The
751 * maximum is determined by the total possible amount of buffering
752 * in the in-core log. The following number can be made tighter if
753 * we actually look at the block size of the filesystem.
754 */
755 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
756 if (head_blk >= num_scan_bblks) {
757 /*
758 * We are guaranteed that the entire check can be performed
759 * in one buffer.
760 */
761 start_blk = head_blk - num_scan_bblks;
762 if ((error = xlog_find_verify_cycle(log,
763 start_blk, num_scan_bblks,
764 stop_on_cycle, &new_blk)))
765 goto bp_err;
766 if (new_blk != -1)
767 head_blk = new_blk;
768 } else { /* need to read 2 parts of log */
769 /*
770 * We are going to scan backwards in the log in two parts.
771 * First we scan the physical end of the log. In this part
772 * of the log, we are looking for blocks with cycle number
773 * last_half_cycle - 1.
774 * If we find one, then we know that the log starts there, as
775 * we've found a hole that didn't get written in going around
776 * the end of the physical log. The simple case for this is
777 * x + 1 ... | x ... | x - 1 | x
778 * <---------> less than scan distance
779 * If all of the blocks at the end of the log have cycle number
780 * last_half_cycle, then we check the blocks at the start of
781 * the log looking for occurrences of last_half_cycle. If we
782 * find one, then our current estimate for the location of the
783 * first occurrence of last_half_cycle is wrong and we move
784 * back to the hole we've found. This case looks like
785 * x + 1 ... | x | x + 1 | x ...
786 * ^ binary search stopped here
787 * Another case we need to handle that only occurs in 256k
788 * logs is
789 * x + 1 ... | x ... | x+1 | x ...
790 * ^ binary search stops here
791 * In a 256k log, the scan at the end of the log will see the
792 * x + 1 blocks. We need to skip past those since that is
793 * certainly not the head of the log. By searching for
794 * last_half_cycle-1 we accomplish that.
795 */
796 ASSERT(head_blk <= INT_MAX &&
797 (xfs_daddr_t) num_scan_bblks >= head_blk);
798 start_blk = log_bbnum - (num_scan_bblks - head_blk);
799 if ((error = xlog_find_verify_cycle(log, start_blk,
800 num_scan_bblks - (int)head_blk,
801 (stop_on_cycle - 1), &new_blk)))
802 goto bp_err;
803 if (new_blk != -1) {
804 head_blk = new_blk;
805 goto validate_head;
806 }
807
808 /*
809 * Scan beginning of log now. The last part of the physical
810 * log is good. This scan needs to verify that it doesn't find
811 * the last_half_cycle.
812 */
813 start_blk = 0;
814 ASSERT(head_blk <= INT_MAX);
815 if ((error = xlog_find_verify_cycle(log,
816 start_blk, (int)head_blk,
817 stop_on_cycle, &new_blk)))
818 goto bp_err;
819 if (new_blk != -1)
820 head_blk = new_blk;
821 }
822
823 validate_head:
824 /*
825 * Now we need to make sure head_blk is not pointing to a block in
826 * the middle of a log record.
827 */
828 num_scan_bblks = XLOG_REC_SHIFT(log);
829 if (head_blk >= num_scan_bblks) {
830 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
831
832 /* start ptr at last block ptr before head_blk */
833 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
834 if (error == 1)
835 error = -EIO;
836 if (error)
837 goto bp_err;
838 } else {
839 start_blk = 0;
840 ASSERT(head_blk <= INT_MAX);
841 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
842 if (error < 0)
843 goto bp_err;
844 if (error == 1) {
845 /* We hit the beginning of the log during our search */
846 start_blk = log_bbnum - (num_scan_bblks - head_blk);
847 new_blk = log_bbnum;
848 ASSERT(start_blk <= INT_MAX &&
849 (xfs_daddr_t) log_bbnum-start_blk >= 0);
850 ASSERT(head_blk <= INT_MAX);
851 error = xlog_find_verify_log_record(log, start_blk,
852 &new_blk, (int)head_blk);
853 if (error == 1)
854 error = -EIO;
855 if (error)
856 goto bp_err;
857 if (new_blk != log_bbnum)
858 head_blk = new_blk;
859 } else if (error)
860 goto bp_err;
861 }
862
863 xlog_put_bp(bp);
864 if (head_blk == log_bbnum)
865 *return_head_blk = 0;
866 else
867 *return_head_blk = head_blk;
868 /*
869 * When returning here, we have a good block number. Bad block
870 * means that during a previous crash, we didn't have a clean break
871 * from cycle number N to cycle number N-1. In this case, we need
872 * to find the first block with cycle number N-1.
873 */
874 return 0;
875
876 bp_err:
877 xlog_put_bp(bp);
878
879 if (error)
880 xfs_warn(log->l_mp, "failed to find log head");
881 return error;
882 }
883
884 /*
885 * Seek backwards in the log for log record headers.
886 *
887 * Given a starting log block, walk backwards until we find the provided number
888 * of records or hit the provided tail block. The return value is the number of
889 * records encountered or a negative error code. The log block and buffer
890 * pointer of the last record seen are returned in rblk and rhead respectively.
891 */
892 STATIC int
893 xlog_rseek_logrec_hdr(
894 struct xlog *log,
895 xfs_daddr_t head_blk,
896 xfs_daddr_t tail_blk,
897 int count,
898 struct xfs_buf *bp,
899 xfs_daddr_t *rblk,
900 struct xlog_rec_header **rhead,
901 bool *wrapped)
902 {
903 int i;
904 int error;
905 int found = 0;
906 char *offset = NULL;
907 xfs_daddr_t end_blk;
908
909 *wrapped = false;
910
911 /*
912 * Walk backwards from the head block until we hit the tail or the first
913 * block in the log.
914 */
915 end_blk = head_blk > tail_blk ? tail_blk : 0;
916 for (i = (int) head_blk - 1; i >= end_blk; i--) {
917 error = xlog_bread(log, i, 1, bp, &offset);
918 if (error)
919 goto out_error;
920
921 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
922 *rblk = i;
923 *rhead = (struct xlog_rec_header *) offset;
924 if (++found == count)
925 break;
926 }
927 }
928
929 /*
930 * If we haven't hit the tail block or the log record header count,
931 * start looking again from the end of the physical log. Note that
932 * callers can pass head == tail if the tail is not yet known.
933 */
934 if (tail_blk >= head_blk && found != count) {
935 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
936 error = xlog_bread(log, i, 1, bp, &offset);
937 if (error)
938 goto out_error;
939
940 if (*(__be32 *)offset ==
941 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
942 *wrapped = true;
943 *rblk = i;
944 *rhead = (struct xlog_rec_header *) offset;
945 if (++found == count)
946 break;
947 }
948 }
949 }
950
951 return found;
952
953 out_error:
954 return error;
955 }
956
957 /*
958 * Seek forward in the log for log record headers.
959 *
960 * Given head and tail blocks, walk forward from the tail block until we find
961 * the provided number of records or hit the head block. The return value is the
962 * number of records encountered or a negative error code. The log block and
963 * buffer pointer of the last record seen are returned in rblk and rhead
964 * respectively.
965 */
966 STATIC int
967 xlog_seek_logrec_hdr(
968 struct xlog *log,
969 xfs_daddr_t head_blk,
970 xfs_daddr_t tail_blk,
971 int count,
972 struct xfs_buf *bp,
973 xfs_daddr_t *rblk,
974 struct xlog_rec_header **rhead,
975 bool *wrapped)
976 {
977 int i;
978 int error;
979 int found = 0;
980 char *offset = NULL;
981 xfs_daddr_t end_blk;
982
983 *wrapped = false;
984
985 /*
986 * Walk forward from the tail block until we hit the head or the last
987 * block in the log.
988 */
989 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
990 for (i = (int) tail_blk; i <= end_blk; i++) {
991 error = xlog_bread(log, i, 1, bp, &offset);
992 if (error)
993 goto out_error;
994
995 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
996 *rblk = i;
997 *rhead = (struct xlog_rec_header *) offset;
998 if (++found == count)
999 break;
1000 }
1001 }
1002
1003 /*
1004 * If we haven't hit the head block or the log record header count,
1005 * start looking again from the start of the physical log.
1006 */
1007 if (tail_blk > head_blk && found != count) {
1008 for (i = 0; i < (int) head_blk; i++) {
1009 error = xlog_bread(log, i, 1, bp, &offset);
1010 if (error)
1011 goto out_error;
1012
1013 if (*(__be32 *)offset ==
1014 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1015 *wrapped = true;
1016 *rblk = i;
1017 *rhead = (struct xlog_rec_header *) offset;
1018 if (++found == count)
1019 break;
1020 }
1021 }
1022 }
1023
1024 return found;
1025
1026 out_error:
1027 return error;
1028 }
1029
1030 /*
1031 * Calculate distance from head to tail (i.e., unused space in the log).
1032 */
1033 static inline int
1034 xlog_tail_distance(
1035 struct xlog *log,
1036 xfs_daddr_t head_blk,
1037 xfs_daddr_t tail_blk)
1038 {
1039 if (head_blk < tail_blk)
1040 return tail_blk - head_blk;
1041
1042 return tail_blk + (log->l_logBBsize - head_blk);
1043 }
1044
1045 /*
1046 * Verify the log tail. This is particularly important when torn or incomplete
1047 * writes have been detected near the front of the log and the head has been
1048 * walked back accordingly.
1049 *
1050 * We also have to handle the case where the tail was pinned and the head
1051 * blocked behind the tail right before a crash. If the tail had been pushed
1052 * immediately prior to the crash and the subsequent checkpoint was only
1053 * partially written, it's possible it overwrote the last referenced tail in the
1054 * log with garbage. This is not a coherency problem because the tail must have
1055 * been pushed before it can be overwritten, but appears as log corruption to
1056 * recovery because we have no way to know the tail was updated if the
1057 * subsequent checkpoint didn't write successfully.
1058 *
1059 * Therefore, CRC check the log from tail to head. If a failure occurs and the
1060 * offending record is within max iclog bufs from the head, walk the tail
1061 * forward and retry until a valid tail is found or corruption is detected out
1062 * of the range of a possible overwrite.
1063 */
1064 STATIC int
1065 xlog_verify_tail(
1066 struct xlog *log,
1067 xfs_daddr_t head_blk,
1068 xfs_daddr_t *tail_blk,
1069 int hsize)
1070 {
1071 struct xlog_rec_header *thead;
1072 struct xfs_buf *bp;
1073 xfs_daddr_t first_bad;
1074 int error = 0;
1075 bool wrapped;
1076 xfs_daddr_t tmp_tail;
1077 xfs_daddr_t orig_tail = *tail_blk;
1078
1079 bp = xlog_get_bp(log, 1);
1080 if (!bp)
1081 return -ENOMEM;
1082
1083 /*
1084 * Make sure the tail points to a record (returns positive count on
1085 * success).
1086 */
1087 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
1088 &tmp_tail, &thead, &wrapped);
1089 if (error < 0)
1090 goto out;
1091 if (*tail_blk != tmp_tail)
1092 *tail_blk = tmp_tail;
1093
1094 /*
1095 * Run a CRC check from the tail to the head. We can't just check
1096 * MAX_ICLOGS records past the tail because the tail may point to stale
1097 * blocks cleared during the search for the head/tail. These blocks are
1098 * overwritten with zero-length records and thus record count is not a
1099 * reliable indicator of the iclog state before a crash.
1100 */
1101 first_bad = 0;
1102 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1103 XLOG_RECOVER_CRCPASS, &first_bad);
1104 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1105 int tail_distance;
1106
1107 /*
1108 * Is corruption within range of the head? If so, retry from
1109 * the next record. Otherwise return an error.
1110 */
1111 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1112 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1113 break;
1114
1115 /* skip to the next record; returns positive count on success */
1116 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
1117 &tmp_tail, &thead, &wrapped);
1118 if (error < 0)
1119 goto out;
1120
1121 *tail_blk = tmp_tail;
1122 first_bad = 0;
1123 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1124 XLOG_RECOVER_CRCPASS, &first_bad);
1125 }
1126
1127 if (!error && *tail_blk != orig_tail)
1128 xfs_warn(log->l_mp,
1129 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1130 orig_tail, *tail_blk);
1131 out:
1132 xlog_put_bp(bp);
1133 return error;
1134 }
1135
1136 /*
1137 * Detect and trim torn writes from the head of the log.
1138 *
1139 * Storage without sector atomicity guarantees can result in torn writes in the
1140 * log in the event of a crash. Our only means to detect this scenario is via
1141 * CRC verification. While we can't always be certain that CRC verification
1142 * failure is due to a torn write vs. an unrelated corruption, we do know that
1143 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1144 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1145 * the log and treat failures in this range as torn writes as a matter of
1146 * policy. In the event of CRC failure, the head is walked back to the last good
1147 * record in the log and the tail is updated from that record and verified.
1148 */
1149 STATIC int
1150 xlog_verify_head(
1151 struct xlog *log,
1152 xfs_daddr_t *head_blk, /* in/out: unverified head */
1153 xfs_daddr_t *tail_blk, /* out: tail block */
1154 struct xfs_buf *bp,
1155 xfs_daddr_t *rhead_blk, /* start blk of last record */
1156 struct xlog_rec_header **rhead, /* ptr to last record */
1157 bool *wrapped) /* last rec. wraps phys. log */
1158 {
1159 struct xlog_rec_header *tmp_rhead;
1160 struct xfs_buf *tmp_bp;
1161 xfs_daddr_t first_bad;
1162 xfs_daddr_t tmp_rhead_blk;
1163 int found;
1164 int error;
1165 bool tmp_wrapped;
1166
1167 /*
1168 * Check the head of the log for torn writes. Search backwards from the
1169 * head until we hit the tail or the maximum number of log record I/Os
1170 * that could have been in flight at one time. Use a temporary buffer so
1171 * we don't trash the rhead/bp pointers from the caller.
1172 */
1173 tmp_bp = xlog_get_bp(log, 1);
1174 if (!tmp_bp)
1175 return -ENOMEM;
1176 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1177 XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1178 &tmp_rhead, &tmp_wrapped);
1179 xlog_put_bp(tmp_bp);
1180 if (error < 0)
1181 return error;
1182
1183 /*
1184 * Now run a CRC verification pass over the records starting at the
1185 * block found above to the current head. If a CRC failure occurs, the
1186 * log block of the first bad record is saved in first_bad.
1187 */
1188 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1189 XLOG_RECOVER_CRCPASS, &first_bad);
1190 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1191 /*
1192 * We've hit a potential torn write. Reset the error and warn
1193 * about it.
1194 */
1195 error = 0;
1196 xfs_warn(log->l_mp,
1197 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1198 first_bad, *head_blk);
1199
1200 /*
1201 * Get the header block and buffer pointer for the last good
1202 * record before the bad record.
1203 *
1204 * Note that xlog_find_tail() clears the blocks at the new head
1205 * (i.e., the records with invalid CRC) if the cycle number
1206 * matches the the current cycle.
1207 */
1208 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1209 rhead_blk, rhead, wrapped);
1210 if (found < 0)
1211 return found;
1212 if (found == 0) /* XXX: right thing to do here? */
1213 return -EIO;
1214
1215 /*
1216 * Reset the head block to the starting block of the first bad
1217 * log record and set the tail block based on the last good
1218 * record.
1219 *
1220 * Bail out if the updated head/tail match as this indicates
1221 * possible corruption outside of the acceptable
1222 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1223 */
1224 *head_blk = first_bad;
1225 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1226 if (*head_blk == *tail_blk) {
1227 ASSERT(0);
1228 return 0;
1229 }
1230 }
1231 if (error)
1232 return error;
1233
1234 return xlog_verify_tail(log, *head_blk, tail_blk,
1235 be32_to_cpu((*rhead)->h_size));
1236 }
1237
1238 /*
1239 * Check whether the head of the log points to an unmount record. In other
1240 * words, determine whether the log is clean. If so, update the in-core state
1241 * appropriately.
1242 */
1243 static int
1244 xlog_check_unmount_rec(
1245 struct xlog *log,
1246 xfs_daddr_t *head_blk,
1247 xfs_daddr_t *tail_blk,
1248 struct xlog_rec_header *rhead,
1249 xfs_daddr_t rhead_blk,
1250 struct xfs_buf *bp,
1251 bool *clean)
1252 {
1253 struct xlog_op_header *op_head;
1254 xfs_daddr_t umount_data_blk;
1255 xfs_daddr_t after_umount_blk;
1256 int hblks;
1257 int error;
1258 char *offset;
1259
1260 *clean = false;
1261
1262 /*
1263 * Look for unmount record. If we find it, then we know there was a
1264 * clean unmount. Since 'i' could be the last block in the physical
1265 * log, we convert to a log block before comparing to the head_blk.
1266 *
1267 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1268 * below. We won't want to clear the unmount record if there is one, so
1269 * we pass the lsn of the unmount record rather than the block after it.
1270 */
1271 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1272 int h_size = be32_to_cpu(rhead->h_size);
1273 int h_version = be32_to_cpu(rhead->h_version);
1274
1275 if ((h_version & XLOG_VERSION_2) &&
1276 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1277 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1278 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1279 hblks++;
1280 } else {
1281 hblks = 1;
1282 }
1283 } else {
1284 hblks = 1;
1285 }
1286 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1287 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1288 if (*head_blk == after_umount_blk &&
1289 be32_to_cpu(rhead->h_num_logops) == 1) {
1290 umount_data_blk = rhead_blk + hblks;
1291 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1292 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1293 if (error)
1294 return error;
1295
1296 op_head = (struct xlog_op_header *)offset;
1297 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1298 /*
1299 * Set tail and last sync so that newly written log
1300 * records will point recovery to after the current
1301 * unmount record.
1302 */
1303 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1304 log->l_curr_cycle, after_umount_blk);
1305 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1306 log->l_curr_cycle, after_umount_blk);
1307 *tail_blk = after_umount_blk;
1308
1309 *clean = true;
1310 }
1311 }
1312
1313 return 0;
1314 }
1315
1316 static void
1317 xlog_set_state(
1318 struct xlog *log,
1319 xfs_daddr_t head_blk,
1320 struct xlog_rec_header *rhead,
1321 xfs_daddr_t rhead_blk,
1322 bool bump_cycle)
1323 {
1324 /*
1325 * Reset log values according to the state of the log when we
1326 * crashed. In the case where head_blk == 0, we bump curr_cycle
1327 * one because the next write starts a new cycle rather than
1328 * continuing the cycle of the last good log record. At this
1329 * point we have guaranteed that all partial log records have been
1330 * accounted for. Therefore, we know that the last good log record
1331 * written was complete and ended exactly on the end boundary
1332 * of the physical log.
1333 */
1334 log->l_prev_block = rhead_blk;
1335 log->l_curr_block = (int)head_blk;
1336 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1337 if (bump_cycle)
1338 log->l_curr_cycle++;
1339 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1340 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1341 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1342 BBTOB(log->l_curr_block));
1343 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1344 BBTOB(log->l_curr_block));
1345 }
1346
1347 /*
1348 * Find the sync block number or the tail of the log.
1349 *
1350 * This will be the block number of the last record to have its
1351 * associated buffers synced to disk. Every log record header has
1352 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1353 * to get a sync block number. The only concern is to figure out which
1354 * log record header to believe.
1355 *
1356 * The following algorithm uses the log record header with the largest
1357 * lsn. The entire log record does not need to be valid. We only care
1358 * that the header is valid.
1359 *
1360 * We could speed up search by using current head_blk buffer, but it is not
1361 * available.
1362 */
1363 STATIC int
1364 xlog_find_tail(
1365 struct xlog *log,
1366 xfs_daddr_t *head_blk,
1367 xfs_daddr_t *tail_blk)
1368 {
1369 xlog_rec_header_t *rhead;
1370 char *offset = NULL;
1371 xfs_buf_t *bp;
1372 int error;
1373 xfs_daddr_t rhead_blk;
1374 xfs_lsn_t tail_lsn;
1375 bool wrapped = false;
1376 bool clean = false;
1377
1378 /*
1379 * Find previous log record
1380 */
1381 if ((error = xlog_find_head(log, head_blk)))
1382 return error;
1383 ASSERT(*head_blk < INT_MAX);
1384
1385 bp = xlog_get_bp(log, 1);
1386 if (!bp)
1387 return -ENOMEM;
1388 if (*head_blk == 0) { /* special case */
1389 error = xlog_bread(log, 0, 1, bp, &offset);
1390 if (error)
1391 goto done;
1392
1393 if (xlog_get_cycle(offset) == 0) {
1394 *tail_blk = 0;
1395 /* leave all other log inited values alone */
1396 goto done;
1397 }
1398 }
1399
1400 /*
1401 * Search backwards through the log looking for the log record header
1402 * block. This wraps all the way back around to the head so something is
1403 * seriously wrong if we can't find it.
1404 */
1405 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1406 &rhead_blk, &rhead, &wrapped);
1407 if (error < 0)
1408 return error;
1409 if (!error) {
1410 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1411 return -EIO;
1412 }
1413 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1414
1415 /*
1416 * Set the log state based on the current head record.
1417 */
1418 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1419 tail_lsn = atomic64_read(&log->l_tail_lsn);
1420
1421 /*
1422 * Look for an unmount record at the head of the log. This sets the log
1423 * state to determine whether recovery is necessary.
1424 */
1425 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1426 rhead_blk, bp, &clean);
1427 if (error)
1428 goto done;
1429
1430 /*
1431 * Verify the log head if the log is not clean (e.g., we have anything
1432 * but an unmount record at the head). This uses CRC verification to
1433 * detect and trim torn writes. If discovered, CRC failures are
1434 * considered torn writes and the log head is trimmed accordingly.
1435 *
1436 * Note that we can only run CRC verification when the log is dirty
1437 * because there's no guarantee that the log data behind an unmount
1438 * record is compatible with the current architecture.
1439 */
1440 if (!clean) {
1441 xfs_daddr_t orig_head = *head_blk;
1442
1443 error = xlog_verify_head(log, head_blk, tail_blk, bp,
1444 &rhead_blk, &rhead, &wrapped);
1445 if (error)
1446 goto done;
1447
1448 /* update in-core state again if the head changed */
1449 if (*head_blk != orig_head) {
1450 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1451 wrapped);
1452 tail_lsn = atomic64_read(&log->l_tail_lsn);
1453 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1454 rhead, rhead_blk, bp,
1455 &clean);
1456 if (error)
1457 goto done;
1458 }
1459 }
1460
1461 /*
1462 * Note that the unmount was clean. If the unmount was not clean, we
1463 * need to know this to rebuild the superblock counters from the perag
1464 * headers if we have a filesystem using non-persistent counters.
1465 */
1466 if (clean)
1467 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1468
1469 /*
1470 * Make sure that there are no blocks in front of the head
1471 * with the same cycle number as the head. This can happen
1472 * because we allow multiple outstanding log writes concurrently,
1473 * and the later writes might make it out before earlier ones.
1474 *
1475 * We use the lsn from before modifying it so that we'll never
1476 * overwrite the unmount record after a clean unmount.
1477 *
1478 * Do this only if we are going to recover the filesystem
1479 *
1480 * NOTE: This used to say "if (!readonly)"
1481 * However on Linux, we can & do recover a read-only filesystem.
1482 * We only skip recovery if NORECOVERY is specified on mount,
1483 * in which case we would not be here.
1484 *
1485 * But... if the -device- itself is readonly, just skip this.
1486 * We can't recover this device anyway, so it won't matter.
1487 */
1488 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1489 error = xlog_clear_stale_blocks(log, tail_lsn);
1490
1491 done:
1492 xlog_put_bp(bp);
1493
1494 if (error)
1495 xfs_warn(log->l_mp, "failed to locate log tail");
1496 return error;
1497 }
1498
1499 /*
1500 * Is the log zeroed at all?
1501 *
1502 * The last binary search should be changed to perform an X block read
1503 * once X becomes small enough. You can then search linearly through
1504 * the X blocks. This will cut down on the number of reads we need to do.
1505 *
1506 * If the log is partially zeroed, this routine will pass back the blkno
1507 * of the first block with cycle number 0. It won't have a complete LR
1508 * preceding it.
1509 *
1510 * Return:
1511 * 0 => the log is completely written to
1512 * 1 => use *blk_no as the first block of the log
1513 * <0 => error has occurred
1514 */
1515 STATIC int
1516 xlog_find_zeroed(
1517 struct xlog *log,
1518 xfs_daddr_t *blk_no)
1519 {
1520 xfs_buf_t *bp;
1521 char *offset;
1522 uint first_cycle, last_cycle;
1523 xfs_daddr_t new_blk, last_blk, start_blk;
1524 xfs_daddr_t num_scan_bblks;
1525 int error, log_bbnum = log->l_logBBsize;
1526
1527 *blk_no = 0;
1528
1529 /* check totally zeroed log */
1530 bp = xlog_get_bp(log, 1);
1531 if (!bp)
1532 return -ENOMEM;
1533 error = xlog_bread(log, 0, 1, bp, &offset);
1534 if (error)
1535 goto bp_err;
1536
1537 first_cycle = xlog_get_cycle(offset);
1538 if (first_cycle == 0) { /* completely zeroed log */
1539 *blk_no = 0;
1540 xlog_put_bp(bp);
1541 return 1;
1542 }
1543
1544 /* check partially zeroed log */
1545 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1546 if (error)
1547 goto bp_err;
1548
1549 last_cycle = xlog_get_cycle(offset);
1550 if (last_cycle != 0) { /* log completely written to */
1551 xlog_put_bp(bp);
1552 return 0;
1553 } else if (first_cycle != 1) {
1554 /*
1555 * If the cycle of the last block is zero, the cycle of
1556 * the first block must be 1. If it's not, maybe we're
1557 * not looking at a log... Bail out.
1558 */
1559 xfs_warn(log->l_mp,
1560 "Log inconsistent or not a log (last==0, first!=1)");
1561 error = -EINVAL;
1562 goto bp_err;
1563 }
1564
1565 /* we have a partially zeroed log */
1566 last_blk = log_bbnum-1;
1567 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1568 goto bp_err;
1569
1570 /*
1571 * Validate the answer. Because there is no way to guarantee that
1572 * the entire log is made up of log records which are the same size,
1573 * we scan over the defined maximum blocks. At this point, the maximum
1574 * is not chosen to mean anything special. XXXmiken
1575 */
1576 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1577 ASSERT(num_scan_bblks <= INT_MAX);
1578
1579 if (last_blk < num_scan_bblks)
1580 num_scan_bblks = last_blk;
1581 start_blk = last_blk - num_scan_bblks;
1582
1583 /*
1584 * We search for any instances of cycle number 0 that occur before
1585 * our current estimate of the head. What we're trying to detect is
1586 * 1 ... | 0 | 1 | 0...
1587 * ^ binary search ends here
1588 */
1589 if ((error = xlog_find_verify_cycle(log, start_blk,
1590 (int)num_scan_bblks, 0, &new_blk)))
1591 goto bp_err;
1592 if (new_blk != -1)
1593 last_blk = new_blk;
1594
1595 /*
1596 * Potentially backup over partial log record write. We don't need
1597 * to search the end of the log because we know it is zero.
1598 */
1599 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1600 if (error == 1)
1601 error = -EIO;
1602 if (error)
1603 goto bp_err;
1604
1605 *blk_no = last_blk;
1606 bp_err:
1607 xlog_put_bp(bp);
1608 if (error)
1609 return error;
1610 return 1;
1611 }
1612
1613 /*
1614 * These are simple subroutines used by xlog_clear_stale_blocks() below
1615 * to initialize a buffer full of empty log record headers and write
1616 * them into the log.
1617 */
1618 STATIC void
1619 xlog_add_record(
1620 struct xlog *log,
1621 char *buf,
1622 int cycle,
1623 int block,
1624 int tail_cycle,
1625 int tail_block)
1626 {
1627 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1628
1629 memset(buf, 0, BBSIZE);
1630 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1631 recp->h_cycle = cpu_to_be32(cycle);
1632 recp->h_version = cpu_to_be32(
1633 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1634 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1635 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1636 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1637 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1638 }
1639
1640 STATIC int
1641 xlog_write_log_records(
1642 struct xlog *log,
1643 int cycle,
1644 int start_block,
1645 int blocks,
1646 int tail_cycle,
1647 int tail_block)
1648 {
1649 char *offset;
1650 xfs_buf_t *bp;
1651 int balign, ealign;
1652 int sectbb = log->l_sectBBsize;
1653 int end_block = start_block + blocks;
1654 int bufblks;
1655 int error = 0;
1656 int i, j = 0;
1657
1658 /*
1659 * Greedily allocate a buffer big enough to handle the full
1660 * range of basic blocks to be written. If that fails, try
1661 * a smaller size. We need to be able to write at least a
1662 * log sector, or we're out of luck.
1663 */
1664 bufblks = 1 << ffs(blocks);
1665 while (bufblks > log->l_logBBsize)
1666 bufblks >>= 1;
1667 while (!(bp = xlog_get_bp(log, bufblks))) {
1668 bufblks >>= 1;
1669 if (bufblks < sectbb)
1670 return -ENOMEM;
1671 }
1672
1673 /* We may need to do a read at the start to fill in part of
1674 * the buffer in the starting sector not covered by the first
1675 * write below.
1676 */
1677 balign = round_down(start_block, sectbb);
1678 if (balign != start_block) {
1679 error = xlog_bread_noalign(log, start_block, 1, bp);
1680 if (error)
1681 goto out_put_bp;
1682
1683 j = start_block - balign;
1684 }
1685
1686 for (i = start_block; i < end_block; i += bufblks) {
1687 int bcount, endcount;
1688
1689 bcount = min(bufblks, end_block - start_block);
1690 endcount = bcount - j;
1691
1692 /* We may need to do a read at the end to fill in part of
1693 * the buffer in the final sector not covered by the write.
1694 * If this is the same sector as the above read, skip it.
1695 */
1696 ealign = round_down(end_block, sectbb);
1697 if (j == 0 && (start_block + endcount > ealign)) {
1698 offset = bp->b_addr + BBTOB(ealign - start_block);
1699 error = xlog_bread_offset(log, ealign, sectbb,
1700 bp, offset);
1701 if (error)
1702 break;
1703
1704 }
1705
1706 offset = xlog_align(log, start_block, endcount, bp);
1707 for (; j < endcount; j++) {
1708 xlog_add_record(log, offset, cycle, i+j,
1709 tail_cycle, tail_block);
1710 offset += BBSIZE;
1711 }
1712 error = xlog_bwrite(log, start_block, endcount, bp);
1713 if (error)
1714 break;
1715 start_block += endcount;
1716 j = 0;
1717 }
1718
1719 out_put_bp:
1720 xlog_put_bp(bp);
1721 return error;
1722 }
1723
1724 /*
1725 * This routine is called to blow away any incomplete log writes out
1726 * in front of the log head. We do this so that we won't become confused
1727 * if we come up, write only a little bit more, and then crash again.
1728 * If we leave the partial log records out there, this situation could
1729 * cause us to think those partial writes are valid blocks since they
1730 * have the current cycle number. We get rid of them by overwriting them
1731 * with empty log records with the old cycle number rather than the
1732 * current one.
1733 *
1734 * The tail lsn is passed in rather than taken from
1735 * the log so that we will not write over the unmount record after a
1736 * clean unmount in a 512 block log. Doing so would leave the log without
1737 * any valid log records in it until a new one was written. If we crashed
1738 * during that time we would not be able to recover.
1739 */
1740 STATIC int
1741 xlog_clear_stale_blocks(
1742 struct xlog *log,
1743 xfs_lsn_t tail_lsn)
1744 {
1745 int tail_cycle, head_cycle;
1746 int tail_block, head_block;
1747 int tail_distance, max_distance;
1748 int distance;
1749 int error;
1750
1751 tail_cycle = CYCLE_LSN(tail_lsn);
1752 tail_block = BLOCK_LSN(tail_lsn);
1753 head_cycle = log->l_curr_cycle;
1754 head_block = log->l_curr_block;
1755
1756 /*
1757 * Figure out the distance between the new head of the log
1758 * and the tail. We want to write over any blocks beyond the
1759 * head that we may have written just before the crash, but
1760 * we don't want to overwrite the tail of the log.
1761 */
1762 if (head_cycle == tail_cycle) {
1763 /*
1764 * The tail is behind the head in the physical log,
1765 * so the distance from the head to the tail is the
1766 * distance from the head to the end of the log plus
1767 * the distance from the beginning of the log to the
1768 * tail.
1769 */
1770 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1771 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1772 XFS_ERRLEVEL_LOW, log->l_mp);
1773 return -EFSCORRUPTED;
1774 }
1775 tail_distance = tail_block + (log->l_logBBsize - head_block);
1776 } else {
1777 /*
1778 * The head is behind the tail in the physical log,
1779 * so the distance from the head to the tail is just
1780 * the tail block minus the head block.
1781 */
1782 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1783 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1784 XFS_ERRLEVEL_LOW, log->l_mp);
1785 return -EFSCORRUPTED;
1786 }
1787 tail_distance = tail_block - head_block;
1788 }
1789
1790 /*
1791 * If the head is right up against the tail, we can't clear
1792 * anything.
1793 */
1794 if (tail_distance <= 0) {
1795 ASSERT(tail_distance == 0);
1796 return 0;
1797 }
1798
1799 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1800 /*
1801 * Take the smaller of the maximum amount of outstanding I/O
1802 * we could have and the distance to the tail to clear out.
1803 * We take the smaller so that we don't overwrite the tail and
1804 * we don't waste all day writing from the head to the tail
1805 * for no reason.
1806 */
1807 max_distance = MIN(max_distance, tail_distance);
1808
1809 if ((head_block + max_distance) <= log->l_logBBsize) {
1810 /*
1811 * We can stomp all the blocks we need to without
1812 * wrapping around the end of the log. Just do it
1813 * in a single write. Use the cycle number of the
1814 * current cycle minus one so that the log will look like:
1815 * n ... | n - 1 ...
1816 */
1817 error = xlog_write_log_records(log, (head_cycle - 1),
1818 head_block, max_distance, tail_cycle,
1819 tail_block);
1820 if (error)
1821 return error;
1822 } else {
1823 /*
1824 * We need to wrap around the end of the physical log in
1825 * order to clear all the blocks. Do it in two separate
1826 * I/Os. The first write should be from the head to the
1827 * end of the physical log, and it should use the current
1828 * cycle number minus one just like above.
1829 */
1830 distance = log->l_logBBsize - head_block;
1831 error = xlog_write_log_records(log, (head_cycle - 1),
1832 head_block, distance, tail_cycle,
1833 tail_block);
1834
1835 if (error)
1836 return error;
1837
1838 /*
1839 * Now write the blocks at the start of the physical log.
1840 * This writes the remainder of the blocks we want to clear.
1841 * It uses the current cycle number since we're now on the
1842 * same cycle as the head so that we get:
1843 * n ... n ... | n - 1 ...
1844 * ^^^^^ blocks we're writing
1845 */
1846 distance = max_distance - (log->l_logBBsize - head_block);
1847 error = xlog_write_log_records(log, head_cycle, 0, distance,
1848 tail_cycle, tail_block);
1849 if (error)
1850 return error;
1851 }
1852
1853 return 0;
1854 }
1855
1856 /******************************************************************************
1857 *
1858 * Log recover routines
1859 *
1860 ******************************************************************************
1861 */
1862
1863 /*
1864 * Sort the log items in the transaction.
1865 *
1866 * The ordering constraints are defined by the inode allocation and unlink
1867 * behaviour. The rules are:
1868 *
1869 * 1. Every item is only logged once in a given transaction. Hence it
1870 * represents the last logged state of the item. Hence ordering is
1871 * dependent on the order in which operations need to be performed so
1872 * required initial conditions are always met.
1873 *
1874 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1875 * there's nothing to replay from them so we can simply cull them
1876 * from the transaction. However, we can't do that until after we've
1877 * replayed all the other items because they may be dependent on the
1878 * cancelled buffer and replaying the cancelled buffer can remove it
1879 * form the cancelled buffer table. Hence they have tobe done last.
1880 *
1881 * 3. Inode allocation buffers must be replayed before inode items that
1882 * read the buffer and replay changes into it. For filesystems using the
1883 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1884 * treated the same as inode allocation buffers as they create and
1885 * initialise the buffers directly.
1886 *
1887 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1888 * This ensures that inodes are completely flushed to the inode buffer
1889 * in a "free" state before we remove the unlinked inode list pointer.
1890 *
1891 * Hence the ordering needs to be inode allocation buffers first, inode items
1892 * second, inode unlink buffers third and cancelled buffers last.
1893 *
1894 * But there's a problem with that - we can't tell an inode allocation buffer
1895 * apart from a regular buffer, so we can't separate them. We can, however,
1896 * tell an inode unlink buffer from the others, and so we can separate them out
1897 * from all the other buffers and move them to last.
1898 *
1899 * Hence, 4 lists, in order from head to tail:
1900 * - buffer_list for all buffers except cancelled/inode unlink buffers
1901 * - item_list for all non-buffer items
1902 * - inode_buffer_list for inode unlink buffers
1903 * - cancel_list for the cancelled buffers
1904 *
1905 * Note that we add objects to the tail of the lists so that first-to-last
1906 * ordering is preserved within the lists. Adding objects to the head of the
1907 * list means when we traverse from the head we walk them in last-to-first
1908 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1909 * but for all other items there may be specific ordering that we need to
1910 * preserve.
1911 */
1912 STATIC int
1913 xlog_recover_reorder_trans(
1914 struct xlog *log,
1915 struct xlog_recover *trans,
1916 int pass)
1917 {
1918 xlog_recover_item_t *item, *n;
1919 int error = 0;
1920 LIST_HEAD(sort_list);
1921 LIST_HEAD(cancel_list);
1922 LIST_HEAD(buffer_list);
1923 LIST_HEAD(inode_buffer_list);
1924 LIST_HEAD(inode_list);
1925
1926 list_splice_init(&trans->r_itemq, &sort_list);
1927 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1928 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1929
1930 switch (ITEM_TYPE(item)) {
1931 case XFS_LI_ICREATE:
1932 list_move_tail(&item->ri_list, &buffer_list);
1933 break;
1934 case XFS_LI_BUF:
1935 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1936 trace_xfs_log_recover_item_reorder_head(log,
1937 trans, item, pass);
1938 list_move(&item->ri_list, &cancel_list);
1939 break;
1940 }
1941 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1942 list_move(&item->ri_list, &inode_buffer_list);
1943 break;
1944 }
1945 list_move_tail(&item->ri_list, &buffer_list);
1946 break;
1947 case XFS_LI_INODE:
1948 case XFS_LI_DQUOT:
1949 case XFS_LI_QUOTAOFF:
1950 case XFS_LI_EFD:
1951 case XFS_LI_EFI:
1952 case XFS_LI_RUI:
1953 case XFS_LI_RUD:
1954 case XFS_LI_CUI:
1955 case XFS_LI_CUD:
1956 case XFS_LI_BUI:
1957 case XFS_LI_BUD:
1958 trace_xfs_log_recover_item_reorder_tail(log,
1959 trans, item, pass);
1960 list_move_tail(&item->ri_list, &inode_list);
1961 break;
1962 default:
1963 xfs_warn(log->l_mp,
1964 "%s: unrecognized type of log operation",
1965 __func__);
1966 ASSERT(0);
1967 /*
1968 * return the remaining items back to the transaction
1969 * item list so they can be freed in caller.
1970 */
1971 if (!list_empty(&sort_list))
1972 list_splice_init(&sort_list, &trans->r_itemq);
1973 error = -EIO;
1974 goto out;
1975 }
1976 }
1977 out:
1978 ASSERT(list_empty(&sort_list));
1979 if (!list_empty(&buffer_list))
1980 list_splice(&buffer_list, &trans->r_itemq);
1981 if (!list_empty(&inode_list))
1982 list_splice_tail(&inode_list, &trans->r_itemq);
1983 if (!list_empty(&inode_buffer_list))
1984 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1985 if (!list_empty(&cancel_list))
1986 list_splice_tail(&cancel_list, &trans->r_itemq);
1987 return error;
1988 }
1989
1990 /*
1991 * Build up the table of buf cancel records so that we don't replay
1992 * cancelled data in the second pass. For buffer records that are
1993 * not cancel records, there is nothing to do here so we just return.
1994 *
1995 * If we get a cancel record which is already in the table, this indicates
1996 * that the buffer was cancelled multiple times. In order to ensure
1997 * that during pass 2 we keep the record in the table until we reach its
1998 * last occurrence in the log, we keep a reference count in the cancel
1999 * record in the table to tell us how many times we expect to see this
2000 * record during the second pass.
2001 */
2002 STATIC int
2003 xlog_recover_buffer_pass1(
2004 struct xlog *log,
2005 struct xlog_recover_item *item)
2006 {
2007 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2008 struct list_head *bucket;
2009 struct xfs_buf_cancel *bcp;
2010
2011 /*
2012 * If this isn't a cancel buffer item, then just return.
2013 */
2014 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
2015 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
2016 return 0;
2017 }
2018
2019 /*
2020 * Insert an xfs_buf_cancel record into the hash table of them.
2021 * If there is already an identical record, bump its reference count.
2022 */
2023 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
2024 list_for_each_entry(bcp, bucket, bc_list) {
2025 if (bcp->bc_blkno == buf_f->blf_blkno &&
2026 bcp->bc_len == buf_f->blf_len) {
2027 bcp->bc_refcount++;
2028 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
2029 return 0;
2030 }
2031 }
2032
2033 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
2034 bcp->bc_blkno = buf_f->blf_blkno;
2035 bcp->bc_len = buf_f->blf_len;
2036 bcp->bc_refcount = 1;
2037 list_add_tail(&bcp->bc_list, bucket);
2038
2039 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
2040 return 0;
2041 }
2042
2043 /*
2044 * Check to see whether the buffer being recovered has a corresponding
2045 * entry in the buffer cancel record table. If it is, return the cancel
2046 * buffer structure to the caller.
2047 */
2048 STATIC struct xfs_buf_cancel *
2049 xlog_peek_buffer_cancelled(
2050 struct xlog *log,
2051 xfs_daddr_t blkno,
2052 uint len,
2053 unsigned short flags)
2054 {
2055 struct list_head *bucket;
2056 struct xfs_buf_cancel *bcp;
2057
2058 if (!log->l_buf_cancel_table) {
2059 /* empty table means no cancelled buffers in the log */
2060 ASSERT(!(flags & XFS_BLF_CANCEL));
2061 return NULL;
2062 }
2063
2064 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2065 list_for_each_entry(bcp, bucket, bc_list) {
2066 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2067 return bcp;
2068 }
2069
2070 /*
2071 * We didn't find a corresponding entry in the table, so return 0 so
2072 * that the buffer is NOT cancelled.
2073 */
2074 ASSERT(!(flags & XFS_BLF_CANCEL));
2075 return NULL;
2076 }
2077
2078 /*
2079 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2080 * otherwise return 0. If the buffer is actually a buffer cancel item
2081 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2082 * table and remove it from the table if this is the last reference.
2083 *
2084 * We remove the cancel record from the table when we encounter its last
2085 * occurrence in the log so that if the same buffer is re-used again after its
2086 * last cancellation we actually replay the changes made at that point.
2087 */
2088 STATIC int
2089 xlog_check_buffer_cancelled(
2090 struct xlog *log,
2091 xfs_daddr_t blkno,
2092 uint len,
2093 unsigned short flags)
2094 {
2095 struct xfs_buf_cancel *bcp;
2096
2097 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2098 if (!bcp)
2099 return 0;
2100
2101 /*
2102 * We've go a match, so return 1 so that the recovery of this buffer
2103 * is cancelled. If this buffer is actually a buffer cancel log
2104 * item, then decrement the refcount on the one in the table and
2105 * remove it if this is the last reference.
2106 */
2107 if (flags & XFS_BLF_CANCEL) {
2108 if (--bcp->bc_refcount == 0) {
2109 list_del(&bcp->bc_list);
2110 kmem_free(bcp);
2111 }
2112 }
2113 return 1;
2114 }
2115
2116 /*
2117 * Perform recovery for a buffer full of inodes. In these buffers, the only
2118 * data which should be recovered is that which corresponds to the
2119 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2120 * data for the inodes is always logged through the inodes themselves rather
2121 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2122 *
2123 * The only time when buffers full of inodes are fully recovered is when the
2124 * buffer is full of newly allocated inodes. In this case the buffer will
2125 * not be marked as an inode buffer and so will be sent to
2126 * xlog_recover_do_reg_buffer() below during recovery.
2127 */
2128 STATIC int
2129 xlog_recover_do_inode_buffer(
2130 struct xfs_mount *mp,
2131 xlog_recover_item_t *item,
2132 struct xfs_buf *bp,
2133 xfs_buf_log_format_t *buf_f)
2134 {
2135 int i;
2136 int item_index = 0;
2137 int bit = 0;
2138 int nbits = 0;
2139 int reg_buf_offset = 0;
2140 int reg_buf_bytes = 0;
2141 int next_unlinked_offset;
2142 int inodes_per_buf;
2143 xfs_agino_t *logged_nextp;
2144 xfs_agino_t *buffer_nextp;
2145
2146 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2147
2148 /*
2149 * Post recovery validation only works properly on CRC enabled
2150 * filesystems.
2151 */
2152 if (xfs_sb_version_hascrc(&mp->m_sb))
2153 bp->b_ops = &xfs_inode_buf_ops;
2154
2155 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2156 for (i = 0; i < inodes_per_buf; i++) {
2157 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2158 offsetof(xfs_dinode_t, di_next_unlinked);
2159
2160 while (next_unlinked_offset >=
2161 (reg_buf_offset + reg_buf_bytes)) {
2162 /*
2163 * The next di_next_unlinked field is beyond
2164 * the current logged region. Find the next
2165 * logged region that contains or is beyond
2166 * the current di_next_unlinked field.
2167 */
2168 bit += nbits;
2169 bit = xfs_next_bit(buf_f->blf_data_map,
2170 buf_f->blf_map_size, bit);
2171
2172 /*
2173 * If there are no more logged regions in the
2174 * buffer, then we're done.
2175 */
2176 if (bit == -1)
2177 return 0;
2178
2179 nbits = xfs_contig_bits(buf_f->blf_data_map,
2180 buf_f->blf_map_size, bit);
2181 ASSERT(nbits > 0);
2182 reg_buf_offset = bit << XFS_BLF_SHIFT;
2183 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2184 item_index++;
2185 }
2186
2187 /*
2188 * If the current logged region starts after the current
2189 * di_next_unlinked field, then move on to the next
2190 * di_next_unlinked field.
2191 */
2192 if (next_unlinked_offset < reg_buf_offset)
2193 continue;
2194
2195 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2196 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2197 ASSERT((reg_buf_offset + reg_buf_bytes) <=
2198 BBTOB(bp->b_io_length));
2199
2200 /*
2201 * The current logged region contains a copy of the
2202 * current di_next_unlinked field. Extract its value
2203 * and copy it to the buffer copy.
2204 */
2205 logged_nextp = item->ri_buf[item_index].i_addr +
2206 next_unlinked_offset - reg_buf_offset;
2207 if (unlikely(*logged_nextp == 0)) {
2208 xfs_alert(mp,
2209 "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
2210 "Trying to replay bad (0) inode di_next_unlinked field.",
2211 item, bp);
2212 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2213 XFS_ERRLEVEL_LOW, mp);
2214 return -EFSCORRUPTED;
2215 }
2216
2217 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2218 *buffer_nextp = *logged_nextp;
2219
2220 /*
2221 * If necessary, recalculate the CRC in the on-disk inode. We
2222 * have to leave the inode in a consistent state for whoever
2223 * reads it next....
2224 */
2225 xfs_dinode_calc_crc(mp,
2226 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2227
2228 }
2229
2230 return 0;
2231 }
2232
2233 /*
2234 * V5 filesystems know the age of the buffer on disk being recovered. We can
2235 * have newer objects on disk than we are replaying, and so for these cases we
2236 * don't want to replay the current change as that will make the buffer contents
2237 * temporarily invalid on disk.
2238 *
2239 * The magic number might not match the buffer type we are going to recover
2240 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2241 * extract the LSN of the existing object in the buffer based on it's current
2242 * magic number. If we don't recognise the magic number in the buffer, then
2243 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2244 * so can recover the buffer.
2245 *
2246 * Note: we cannot rely solely on magic number matches to determine that the
2247 * buffer has a valid LSN - we also need to verify that it belongs to this
2248 * filesystem, so we need to extract the object's LSN and compare it to that
2249 * which we read from the superblock. If the UUIDs don't match, then we've got a
2250 * stale metadata block from an old filesystem instance that we need to recover
2251 * over the top of.
2252 */
2253 static xfs_lsn_t
2254 xlog_recover_get_buf_lsn(
2255 struct xfs_mount *mp,
2256 struct xfs_buf *bp)
2257 {
2258 uint32_t magic32;
2259 uint16_t magic16;
2260 uint16_t magicda;
2261 void *blk = bp->b_addr;
2262 uuid_t *uuid;
2263 xfs_lsn_t lsn = -1;
2264
2265 /* v4 filesystems always recover immediately */
2266 if (!xfs_sb_version_hascrc(&mp->m_sb))
2267 goto recover_immediately;
2268
2269 magic32 = be32_to_cpu(*(__be32 *)blk);
2270 switch (magic32) {
2271 case XFS_ABTB_CRC_MAGIC:
2272 case XFS_ABTC_CRC_MAGIC:
2273 case XFS_ABTB_MAGIC:
2274 case XFS_ABTC_MAGIC:
2275 case XFS_RMAP_CRC_MAGIC:
2276 case XFS_REFC_CRC_MAGIC:
2277 case XFS_IBT_CRC_MAGIC:
2278 case XFS_IBT_MAGIC: {
2279 struct xfs_btree_block *btb = blk;
2280
2281 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2282 uuid = &btb->bb_u.s.bb_uuid;
2283 break;
2284 }
2285 case XFS_BMAP_CRC_MAGIC:
2286 case XFS_BMAP_MAGIC: {
2287 struct xfs_btree_block *btb = blk;
2288
2289 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2290 uuid = &btb->bb_u.l.bb_uuid;
2291 break;
2292 }
2293 case XFS_AGF_MAGIC:
2294 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2295 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2296 break;
2297 case XFS_AGFL_MAGIC:
2298 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2299 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2300 break;
2301 case XFS_AGI_MAGIC:
2302 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2303 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2304 break;
2305 case XFS_SYMLINK_MAGIC:
2306 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2307 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2308 break;
2309 case XFS_DIR3_BLOCK_MAGIC:
2310 case XFS_DIR3_DATA_MAGIC:
2311 case XFS_DIR3_FREE_MAGIC:
2312 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2313 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2314 break;
2315 case XFS_ATTR3_RMT_MAGIC:
2316 /*
2317 * Remote attr blocks are written synchronously, rather than
2318 * being logged. That means they do not contain a valid LSN
2319 * (i.e. transactionally ordered) in them, and hence any time we
2320 * see a buffer to replay over the top of a remote attribute
2321 * block we should simply do so.
2322 */
2323 goto recover_immediately;
2324 case XFS_SB_MAGIC:
2325 /*
2326 * superblock uuids are magic. We may or may not have a
2327 * sb_meta_uuid on disk, but it will be set in the in-core
2328 * superblock. We set the uuid pointer for verification
2329 * according to the superblock feature mask to ensure we check
2330 * the relevant UUID in the superblock.
2331 */
2332 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2333 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2334 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2335 else
2336 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2337 break;
2338 default:
2339 break;
2340 }
2341
2342 if (lsn != (xfs_lsn_t)-1) {
2343 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2344 goto recover_immediately;
2345 return lsn;
2346 }
2347
2348 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2349 switch (magicda) {
2350 case XFS_DIR3_LEAF1_MAGIC:
2351 case XFS_DIR3_LEAFN_MAGIC:
2352 case XFS_DA3_NODE_MAGIC:
2353 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2354 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2355 break;
2356 default:
2357 break;
2358 }
2359
2360 if (lsn != (xfs_lsn_t)-1) {
2361 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2362 goto recover_immediately;
2363 return lsn;
2364 }
2365
2366 /*
2367 * We do individual object checks on dquot and inode buffers as they
2368 * have their own individual LSN records. Also, we could have a stale
2369 * buffer here, so we have to at least recognise these buffer types.
2370 *
2371 * A notd complexity here is inode unlinked list processing - it logs
2372 * the inode directly in the buffer, but we don't know which inodes have
2373 * been modified, and there is no global buffer LSN. Hence we need to
2374 * recover all inode buffer types immediately. This problem will be
2375 * fixed by logical logging of the unlinked list modifications.
2376 */
2377 magic16 = be16_to_cpu(*(__be16 *)blk);
2378 switch (magic16) {
2379 case XFS_DQUOT_MAGIC:
2380 case XFS_DINODE_MAGIC:
2381 goto recover_immediately;
2382 default:
2383 break;
2384 }
2385
2386 /* unknown buffer contents, recover immediately */
2387
2388 recover_immediately:
2389 return (xfs_lsn_t)-1;
2390
2391 }
2392
2393 /*
2394 * Validate the recovered buffer is of the correct type and attach the
2395 * appropriate buffer operations to them for writeback. Magic numbers are in a
2396 * few places:
2397 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2398 * the first 32 bits of the buffer (most blocks),
2399 * inside a struct xfs_da_blkinfo at the start of the buffer.
2400 */
2401 static void
2402 xlog_recover_validate_buf_type(
2403 struct xfs_mount *mp,
2404 struct xfs_buf *bp,
2405 xfs_buf_log_format_t *buf_f,
2406 xfs_lsn_t current_lsn)
2407 {
2408 struct xfs_da_blkinfo *info = bp->b_addr;
2409 uint32_t magic32;
2410 uint16_t magic16;
2411 uint16_t magicda;
2412 char *warnmsg = NULL;
2413
2414 /*
2415 * We can only do post recovery validation on items on CRC enabled
2416 * fielsystems as we need to know when the buffer was written to be able
2417 * to determine if we should have replayed the item. If we replay old
2418 * metadata over a newer buffer, then it will enter a temporarily
2419 * inconsistent state resulting in verification failures. Hence for now
2420 * just avoid the verification stage for non-crc filesystems
2421 */
2422 if (!xfs_sb_version_hascrc(&mp->m_sb))
2423 return;
2424
2425 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2426 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2427 magicda = be16_to_cpu(info->magic);
2428 switch (xfs_blft_from_flags(buf_f)) {
2429 case XFS_BLFT_BTREE_BUF:
2430 switch (magic32) {
2431 case XFS_ABTB_CRC_MAGIC:
2432 case XFS_ABTC_CRC_MAGIC:
2433 case XFS_ABTB_MAGIC:
2434 case XFS_ABTC_MAGIC:
2435 bp->b_ops = &xfs_allocbt_buf_ops;
2436 break;
2437 case XFS_IBT_CRC_MAGIC:
2438 case XFS_FIBT_CRC_MAGIC:
2439 case XFS_IBT_MAGIC:
2440 case XFS_FIBT_MAGIC:
2441 bp->b_ops = &xfs_inobt_buf_ops;
2442 break;
2443 case XFS_BMAP_CRC_MAGIC:
2444 case XFS_BMAP_MAGIC:
2445 bp->b_ops = &xfs_bmbt_buf_ops;
2446 break;
2447 case XFS_RMAP_CRC_MAGIC:
2448 bp->b_ops = &xfs_rmapbt_buf_ops;
2449 break;
2450 case XFS_REFC_CRC_MAGIC:
2451 bp->b_ops = &xfs_refcountbt_buf_ops;
2452 break;
2453 default:
2454 warnmsg = "Bad btree block magic!";
2455 break;
2456 }
2457 break;
2458 case XFS_BLFT_AGF_BUF:
2459 if (magic32 != XFS_AGF_MAGIC) {
2460 warnmsg = "Bad AGF block magic!";
2461 break;
2462 }
2463 bp->b_ops = &xfs_agf_buf_ops;
2464 break;
2465 case XFS_BLFT_AGFL_BUF:
2466 if (magic32 != XFS_AGFL_MAGIC) {
2467 warnmsg = "Bad AGFL block magic!";
2468 break;
2469 }
2470 bp->b_ops = &xfs_agfl_buf_ops;
2471 break;
2472 case XFS_BLFT_AGI_BUF:
2473 if (magic32 != XFS_AGI_MAGIC) {
2474 warnmsg = "Bad AGI block magic!";
2475 break;
2476 }
2477 bp->b_ops = &xfs_agi_buf_ops;
2478 break;
2479 case XFS_BLFT_UDQUOT_BUF:
2480 case XFS_BLFT_PDQUOT_BUF:
2481 case XFS_BLFT_GDQUOT_BUF:
2482 #ifdef CONFIG_XFS_QUOTA
2483 if (magic16 != XFS_DQUOT_MAGIC) {
2484 warnmsg = "Bad DQUOT block magic!";
2485 break;
2486 }
2487 bp->b_ops = &xfs_dquot_buf_ops;
2488 #else
2489 xfs_alert(mp,
2490 "Trying to recover dquots without QUOTA support built in!");
2491 ASSERT(0);
2492 #endif
2493 break;
2494 case XFS_BLFT_DINO_BUF:
2495 if (magic16 != XFS_DINODE_MAGIC) {
2496 warnmsg = "Bad INODE block magic!";
2497 break;
2498 }
2499 bp->b_ops = &xfs_inode_buf_ops;
2500 break;
2501 case XFS_BLFT_SYMLINK_BUF:
2502 if (magic32 != XFS_SYMLINK_MAGIC) {
2503 warnmsg = "Bad symlink block magic!";
2504 break;
2505 }
2506 bp->b_ops = &xfs_symlink_buf_ops;
2507 break;
2508 case XFS_BLFT_DIR_BLOCK_BUF:
2509 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2510 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2511 warnmsg = "Bad dir block magic!";
2512 break;
2513 }
2514 bp->b_ops = &xfs_dir3_block_buf_ops;
2515 break;
2516 case XFS_BLFT_DIR_DATA_BUF:
2517 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2518 magic32 != XFS_DIR3_DATA_MAGIC) {
2519 warnmsg = "Bad dir data magic!";
2520 break;
2521 }
2522 bp->b_ops = &xfs_dir3_data_buf_ops;
2523 break;
2524 case XFS_BLFT_DIR_FREE_BUF:
2525 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2526 magic32 != XFS_DIR3_FREE_MAGIC) {
2527 warnmsg = "Bad dir3 free magic!";
2528 break;
2529 }
2530 bp->b_ops = &xfs_dir3_free_buf_ops;
2531 break;
2532 case XFS_BLFT_DIR_LEAF1_BUF:
2533 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2534 magicda != XFS_DIR3_LEAF1_MAGIC) {
2535 warnmsg = "Bad dir leaf1 magic!";
2536 break;
2537 }
2538 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2539 break;
2540 case XFS_BLFT_DIR_LEAFN_BUF:
2541 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2542 magicda != XFS_DIR3_LEAFN_MAGIC) {
2543 warnmsg = "Bad dir leafn magic!";
2544 break;
2545 }
2546 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2547 break;
2548 case XFS_BLFT_DA_NODE_BUF:
2549 if (magicda != XFS_DA_NODE_MAGIC &&
2550 magicda != XFS_DA3_NODE_MAGIC) {
2551 warnmsg = "Bad da node magic!";
2552 break;
2553 }
2554 bp->b_ops = &xfs_da3_node_buf_ops;
2555 break;
2556 case XFS_BLFT_ATTR_LEAF_BUF:
2557 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2558 magicda != XFS_ATTR3_LEAF_MAGIC) {
2559 warnmsg = "Bad attr leaf magic!";
2560 break;
2561 }
2562 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2563 break;
2564 case XFS_BLFT_ATTR_RMT_BUF:
2565 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2566 warnmsg = "Bad attr remote magic!";
2567 break;
2568 }
2569 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2570 break;
2571 case XFS_BLFT_SB_BUF:
2572 if (magic32 != XFS_SB_MAGIC) {
2573 warnmsg = "Bad SB block magic!";
2574 break;
2575 }
2576 bp->b_ops = &xfs_sb_buf_ops;
2577 break;
2578 #ifdef CONFIG_XFS_RT
2579 case XFS_BLFT_RTBITMAP_BUF:
2580 case XFS_BLFT_RTSUMMARY_BUF:
2581 /* no magic numbers for verification of RT buffers */
2582 bp->b_ops = &xfs_rtbuf_ops;
2583 break;
2584 #endif /* CONFIG_XFS_RT */
2585 default:
2586 xfs_warn(mp, "Unknown buffer type %d!",
2587 xfs_blft_from_flags(buf_f));
2588 break;
2589 }
2590
2591 /*
2592 * Nothing else to do in the case of a NULL current LSN as this means
2593 * the buffer is more recent than the change in the log and will be
2594 * skipped.
2595 */
2596 if (current_lsn == NULLCOMMITLSN)
2597 return;
2598
2599 if (warnmsg) {
2600 xfs_warn(mp, warnmsg);
2601 ASSERT(0);
2602 }
2603
2604 /*
2605 * We must update the metadata LSN of the buffer as it is written out to
2606 * ensure that older transactions never replay over this one and corrupt
2607 * the buffer. This can occur if log recovery is interrupted at some
2608 * point after the current transaction completes, at which point a
2609 * subsequent mount starts recovery from the beginning.
2610 *
2611 * Write verifiers update the metadata LSN from log items attached to
2612 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2613 * the verifier. We'll clean it up in our ->iodone() callback.
2614 */
2615 if (bp->b_ops) {
2616 struct xfs_buf_log_item *bip;
2617
2618 ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2619 bp->b_iodone = xlog_recover_iodone;
2620 xfs_buf_item_init(bp, mp);
2621 bip = bp->b_log_item;
2622 bip->bli_item.li_lsn = current_lsn;
2623 }
2624 }
2625
2626 /*
2627 * Perform a 'normal' buffer recovery. Each logged region of the
2628 * buffer should be copied over the corresponding region in the
2629 * given buffer. The bitmap in the buf log format structure indicates
2630 * where to place the logged data.
2631 */
2632 STATIC void
2633 xlog_recover_do_reg_buffer(
2634 struct xfs_mount *mp,
2635 xlog_recover_item_t *item,
2636 struct xfs_buf *bp,
2637 xfs_buf_log_format_t *buf_f,
2638 xfs_lsn_t current_lsn)
2639 {
2640 int i;
2641 int bit;
2642 int nbits;
2643 xfs_failaddr_t fa;
2644
2645 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2646
2647 bit = 0;
2648 i = 1; /* 0 is the buf format structure */
2649 while (1) {
2650 bit = xfs_next_bit(buf_f->blf_data_map,
2651 buf_f->blf_map_size, bit);
2652 if (bit == -1)
2653 break;
2654 nbits = xfs_contig_bits(buf_f->blf_data_map,
2655 buf_f->blf_map_size, bit);
2656 ASSERT(nbits > 0);
2657 ASSERT(item->ri_buf[i].i_addr != NULL);
2658 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2659 ASSERT(BBTOB(bp->b_io_length) >=
2660 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2661
2662 /*
2663 * The dirty regions logged in the buffer, even though
2664 * contiguous, may span multiple chunks. This is because the
2665 * dirty region may span a physical page boundary in a buffer
2666 * and hence be split into two separate vectors for writing into
2667 * the log. Hence we need to trim nbits back to the length of
2668 * the current region being copied out of the log.
2669 */
2670 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2671 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2672
2673 /*
2674 * Do a sanity check if this is a dquot buffer. Just checking
2675 * the first dquot in the buffer should do. XXXThis is
2676 * probably a good thing to do for other buf types also.
2677 */
2678 fa = NULL;
2679 if (buf_f->blf_flags &
2680 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2681 if (item->ri_buf[i].i_addr == NULL) {
2682 xfs_alert(mp,
2683 "XFS: NULL dquot in %s.", __func__);
2684 goto next;
2685 }
2686 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2687 xfs_alert(mp,
2688 "XFS: dquot too small (%d) in %s.",
2689 item->ri_buf[i].i_len, __func__);
2690 goto next;
2691 }
2692 fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
2693 -1, 0);
2694 if (fa) {
2695 xfs_alert(mp,
2696 "dquot corrupt at %pS trying to replay into block 0x%llx",
2697 fa, bp->b_bn);
2698 goto next;
2699 }
2700 }
2701
2702 memcpy(xfs_buf_offset(bp,
2703 (uint)bit << XFS_BLF_SHIFT), /* dest */
2704 item->ri_buf[i].i_addr, /* source */
2705 nbits<<XFS_BLF_SHIFT); /* length */
2706 next:
2707 i++;
2708 bit += nbits;
2709 }
2710
2711 /* Shouldn't be any more regions */
2712 ASSERT(i == item->ri_total);
2713
2714 xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2715 }
2716
2717 /*
2718 * Perform a dquot buffer recovery.
2719 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2720 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2721 * Else, treat it as a regular buffer and do recovery.
2722 *
2723 * Return false if the buffer was tossed and true if we recovered the buffer to
2724 * indicate to the caller if the buffer needs writing.
2725 */
2726 STATIC bool
2727 xlog_recover_do_dquot_buffer(
2728 struct xfs_mount *mp,
2729 struct xlog *log,
2730 struct xlog_recover_item *item,
2731 struct xfs_buf *bp,
2732 struct xfs_buf_log_format *buf_f)
2733 {
2734 uint type;
2735
2736 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2737
2738 /*
2739 * Filesystems are required to send in quota flags at mount time.
2740 */
2741 if (!mp->m_qflags)
2742 return false;
2743
2744 type = 0;
2745 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2746 type |= XFS_DQ_USER;
2747 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2748 type |= XFS_DQ_PROJ;
2749 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2750 type |= XFS_DQ_GROUP;
2751 /*
2752 * This type of quotas was turned off, so ignore this buffer
2753 */
2754 if (log->l_quotaoffs_flag & type)
2755 return false;
2756
2757 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2758 return true;
2759 }
2760
2761 /*
2762 * This routine replays a modification made to a buffer at runtime.
2763 * There are actually two types of buffer, regular and inode, which
2764 * are handled differently. Inode buffers are handled differently
2765 * in that we only recover a specific set of data from them, namely
2766 * the inode di_next_unlinked fields. This is because all other inode
2767 * data is actually logged via inode records and any data we replay
2768 * here which overlaps that may be stale.
2769 *
2770 * When meta-data buffers are freed at run time we log a buffer item
2771 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2772 * of the buffer in the log should not be replayed at recovery time.
2773 * This is so that if the blocks covered by the buffer are reused for
2774 * file data before we crash we don't end up replaying old, freed
2775 * meta-data into a user's file.
2776 *
2777 * To handle the cancellation of buffer log items, we make two passes
2778 * over the log during recovery. During the first we build a table of
2779 * those buffers which have been cancelled, and during the second we
2780 * only replay those buffers which do not have corresponding cancel
2781 * records in the table. See xlog_recover_buffer_pass[1,2] above
2782 * for more details on the implementation of the table of cancel records.
2783 */
2784 STATIC int
2785 xlog_recover_buffer_pass2(
2786 struct xlog *log,
2787 struct list_head *buffer_list,
2788 struct xlog_recover_item *item,
2789 xfs_lsn_t current_lsn)
2790 {
2791 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2792 xfs_mount_t *mp = log->l_mp;
2793 xfs_buf_t *bp;
2794 int error;
2795 uint buf_flags;
2796 xfs_lsn_t lsn;
2797
2798 /*
2799 * In this pass we only want to recover all the buffers which have
2800 * not been cancelled and are not cancellation buffers themselves.
2801 */
2802 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2803 buf_f->blf_len, buf_f->blf_flags)) {
2804 trace_xfs_log_recover_buf_cancel(log, buf_f);
2805 return 0;
2806 }
2807
2808 trace_xfs_log_recover_buf_recover(log, buf_f);
2809
2810 buf_flags = 0;
2811 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2812 buf_flags |= XBF_UNMAPPED;
2813
2814 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2815 buf_flags, NULL);
2816 if (!bp)
2817 return -ENOMEM;
2818 error = bp->b_error;
2819 if (error) {
2820 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2821 goto out_release;
2822 }
2823
2824 /*
2825 * Recover the buffer only if we get an LSN from it and it's less than
2826 * the lsn of the transaction we are replaying.
2827 *
2828 * Note that we have to be extremely careful of readahead here.
2829 * Readahead does not attach verfiers to the buffers so if we don't
2830 * actually do any replay after readahead because of the LSN we found
2831 * in the buffer if more recent than that current transaction then we
2832 * need to attach the verifier directly. Failure to do so can lead to
2833 * future recovery actions (e.g. EFI and unlinked list recovery) can
2834 * operate on the buffers and they won't get the verifier attached. This
2835 * can lead to blocks on disk having the correct content but a stale
2836 * CRC.
2837 *
2838 * It is safe to assume these clean buffers are currently up to date.
2839 * If the buffer is dirtied by a later transaction being replayed, then
2840 * the verifier will be reset to match whatever recover turns that
2841 * buffer into.
2842 */
2843 lsn = xlog_recover_get_buf_lsn(mp, bp);
2844 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2845 trace_xfs_log_recover_buf_skip(log, buf_f);
2846 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2847 goto out_release;
2848 }
2849
2850 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2851 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2852 if (error)
2853 goto out_release;
2854 } else if (buf_f->blf_flags &
2855 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2856 bool dirty;
2857
2858 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2859 if (!dirty)
2860 goto out_release;
2861 } else {
2862 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2863 }
2864
2865 /*
2866 * Perform delayed write on the buffer. Asynchronous writes will be
2867 * slower when taking into account all the buffers to be flushed.
2868 *
2869 * Also make sure that only inode buffers with good sizes stay in
2870 * the buffer cache. The kernel moves inodes in buffers of 1 block
2871 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2872 * buffers in the log can be a different size if the log was generated
2873 * by an older kernel using unclustered inode buffers or a newer kernel
2874 * running with a different inode cluster size. Regardless, if the
2875 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2876 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2877 * the buffer out of the buffer cache so that the buffer won't
2878 * overlap with future reads of those inodes.
2879 */
2880 if (XFS_DINODE_MAGIC ==
2881 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2882 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2883 (uint32_t)log->l_mp->m_inode_cluster_size))) {
2884 xfs_buf_stale(bp);
2885 error = xfs_bwrite(bp);
2886 } else {
2887 ASSERT(bp->b_target->bt_mount == mp);
2888 bp->b_iodone = xlog_recover_iodone;
2889 xfs_buf_delwri_queue(bp, buffer_list);
2890 }
2891
2892 out_release:
2893 xfs_buf_relse(bp);
2894 return error;
2895 }
2896
2897 /*
2898 * Inode fork owner changes
2899 *
2900 * If we have been told that we have to reparent the inode fork, it's because an
2901 * extent swap operation on a CRC enabled filesystem has been done and we are
2902 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2903 * owners of it.
2904 *
2905 * The complexity here is that we don't have an inode context to work with, so
2906 * after we've replayed the inode we need to instantiate one. This is where the
2907 * fun begins.
2908 *
2909 * We are in the middle of log recovery, so we can't run transactions. That
2910 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2911 * that will result in the corresponding iput() running the inode through
2912 * xfs_inactive(). If we've just replayed an inode core that changes the link
2913 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2914 * transactions (bad!).
2915 *
2916 * So, to avoid this, we instantiate an inode directly from the inode core we've
2917 * just recovered. We have the buffer still locked, and all we really need to
2918 * instantiate is the inode core and the forks being modified. We can do this
2919 * manually, then run the inode btree owner change, and then tear down the
2920 * xfs_inode without having to run any transactions at all.
2921 *
2922 * Also, because we don't have a transaction context available here but need to
2923 * gather all the buffers we modify for writeback so we pass the buffer_list
2924 * instead for the operation to use.
2925 */
2926
2927 STATIC int
2928 xfs_recover_inode_owner_change(
2929 struct xfs_mount *mp,
2930 struct xfs_dinode *dip,
2931 struct xfs_inode_log_format *in_f,
2932 struct list_head *buffer_list)
2933 {
2934 struct xfs_inode *ip;
2935 int error;
2936
2937 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2938
2939 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2940 if (!ip)
2941 return -ENOMEM;
2942
2943 /* instantiate the inode */
2944 xfs_inode_from_disk(ip, dip);
2945 ASSERT(ip->i_d.di_version >= 3);
2946
2947 error = xfs_iformat_fork(ip, dip);
2948 if (error)
2949 goto out_free_ip;
2950
2951 if (!xfs_inode_verify_forks(ip)) {
2952 error = -EFSCORRUPTED;
2953 goto out_free_ip;
2954 }
2955
2956 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2957 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2958 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2959 ip->i_ino, buffer_list);
2960 if (error)
2961 goto out_free_ip;
2962 }
2963
2964 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2965 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2966 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2967 ip->i_ino, buffer_list);
2968 if (error)
2969 goto out_free_ip;
2970 }
2971
2972 out_free_ip:
2973 xfs_inode_free(ip);
2974 return error;
2975 }
2976
2977 STATIC int
2978 xlog_recover_inode_pass2(
2979 struct xlog *log,
2980 struct list_head *buffer_list,
2981 struct xlog_recover_item *item,
2982 xfs_lsn_t current_lsn)
2983 {
2984 struct xfs_inode_log_format *in_f;
2985 xfs_mount_t *mp = log->l_mp;
2986 xfs_buf_t *bp;
2987 xfs_dinode_t *dip;
2988 int len;
2989 char *src;
2990 char *dest;
2991 int error;
2992 int attr_index;
2993 uint fields;
2994 struct xfs_log_dinode *ldip;
2995 uint isize;
2996 int need_free = 0;
2997
2998 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
2999 in_f = item->ri_buf[0].i_addr;
3000 } else {
3001 in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
3002 need_free = 1;
3003 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
3004 if (error)
3005 goto error;
3006 }
3007
3008 /*
3009 * Inode buffers can be freed, look out for it,
3010 * and do not replay the inode.
3011 */
3012 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
3013 in_f->ilf_len, 0)) {
3014 error = 0;
3015 trace_xfs_log_recover_inode_cancel(log, in_f);
3016 goto error;
3017 }
3018 trace_xfs_log_recover_inode_recover(log, in_f);
3019
3020 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
3021 &xfs_inode_buf_ops);
3022 if (!bp) {
3023 error = -ENOMEM;
3024 goto error;
3025 }
3026 error = bp->b_error;
3027 if (error) {
3028 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
3029 goto out_release;
3030 }
3031 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
3032 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
3033
3034 /*
3035 * Make sure the place we're flushing out to really looks
3036 * like an inode!
3037 */
3038 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
3039 xfs_alert(mp,
3040 "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
3041 __func__, dip, bp, in_f->ilf_ino);
3042 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
3043 XFS_ERRLEVEL_LOW, mp);
3044 error = -EFSCORRUPTED;
3045 goto out_release;
3046 }
3047 ldip = item->ri_buf[1].i_addr;
3048 if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
3049 xfs_alert(mp,
3050 "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
3051 __func__, item, in_f->ilf_ino);
3052 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
3053 XFS_ERRLEVEL_LOW, mp);
3054 error = -EFSCORRUPTED;
3055 goto out_release;
3056 }
3057
3058 /*
3059 * If the inode has an LSN in it, recover the inode only if it's less
3060 * than the lsn of the transaction we are replaying. Note: we still
3061 * need to replay an owner change even though the inode is more recent
3062 * than the transaction as there is no guarantee that all the btree
3063 * blocks are more recent than this transaction, too.
3064 */
3065 if (dip->di_version >= 3) {
3066 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
3067
3068 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3069 trace_xfs_log_recover_inode_skip(log, in_f);
3070 error = 0;
3071 goto out_owner_change;
3072 }
3073 }
3074
3075 /*
3076 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3077 * are transactional and if ordering is necessary we can determine that
3078 * more accurately by the LSN field in the V3 inode core. Don't trust
3079 * the inode versions we might be changing them here - use the
3080 * superblock flag to determine whether we need to look at di_flushiter
3081 * to skip replay when the on disk inode is newer than the log one
3082 */
3083 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3084 ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3085 /*
3086 * Deal with the wrap case, DI_MAX_FLUSH is less
3087 * than smaller numbers
3088 */
3089 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3090 ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3091 /* do nothing */
3092 } else {
3093 trace_xfs_log_recover_inode_skip(log, in_f);
3094 error = 0;
3095 goto out_release;
3096 }
3097 }
3098
3099 /* Take the opportunity to reset the flush iteration count */
3100 ldip->di_flushiter = 0;
3101
3102 if (unlikely(S_ISREG(ldip->di_mode))) {
3103 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3104 (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3105 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3106 XFS_ERRLEVEL_LOW, mp, ldip,
3107 sizeof(*ldip));
3108 xfs_alert(mp,
3109 "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
3110 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3111 __func__, item, dip, bp, in_f->ilf_ino);
3112 error = -EFSCORRUPTED;
3113 goto out_release;
3114 }
3115 } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3116 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3117 (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3118 (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3119 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3120 XFS_ERRLEVEL_LOW, mp, ldip,
3121 sizeof(*ldip));
3122 xfs_alert(mp,
3123 "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
3124 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3125 __func__, item, dip, bp, in_f->ilf_ino);
3126 error = -EFSCORRUPTED;
3127 goto out_release;
3128 }
3129 }
3130 if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3131 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3132 XFS_ERRLEVEL_LOW, mp, ldip,
3133 sizeof(*ldip));
3134 xfs_alert(mp,
3135 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3136 "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
3137 __func__, item, dip, bp, in_f->ilf_ino,
3138 ldip->di_nextents + ldip->di_anextents,
3139 ldip->di_nblocks);
3140 error = -EFSCORRUPTED;
3141 goto out_release;
3142 }
3143 if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3144 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3145 XFS_ERRLEVEL_LOW, mp, ldip,
3146 sizeof(*ldip));
3147 xfs_alert(mp,
3148 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3149 "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
3150 item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3151 error = -EFSCORRUPTED;
3152 goto out_release;
3153 }
3154 isize = xfs_log_dinode_size(ldip->di_version);
3155 if (unlikely(item->ri_buf[1].i_len > isize)) {
3156 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3157 XFS_ERRLEVEL_LOW, mp, ldip,
3158 sizeof(*ldip));
3159 xfs_alert(mp,
3160 "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
3161 __func__, item->ri_buf[1].i_len, item);
3162 error = -EFSCORRUPTED;
3163 goto out_release;
3164 }
3165
3166 /* recover the log dinode inode into the on disk inode */
3167 xfs_log_dinode_to_disk(ldip, dip);
3168
3169 fields = in_f->ilf_fields;
3170 if (fields & XFS_ILOG_DEV)
3171 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3172
3173 if (in_f->ilf_size == 2)
3174 goto out_owner_change;
3175 len = item->ri_buf[2].i_len;
3176 src = item->ri_buf[2].i_addr;
3177 ASSERT(in_f->ilf_size <= 4);
3178 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3179 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3180 (len == in_f->ilf_dsize));
3181
3182 switch (fields & XFS_ILOG_DFORK) {
3183 case XFS_ILOG_DDATA:
3184 case XFS_ILOG_DEXT:
3185 memcpy(XFS_DFORK_DPTR(dip), src, len);
3186 break;
3187
3188 case XFS_ILOG_DBROOT:
3189 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3190 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3191 XFS_DFORK_DSIZE(dip, mp));
3192 break;
3193
3194 default:
3195 /*
3196 * There are no data fork flags set.
3197 */
3198 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3199 break;
3200 }
3201
3202 /*
3203 * If we logged any attribute data, recover it. There may or
3204 * may not have been any other non-core data logged in this
3205 * transaction.
3206 */
3207 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3208 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3209 attr_index = 3;
3210 } else {
3211 attr_index = 2;
3212 }
3213 len = item->ri_buf[attr_index].i_len;
3214 src = item->ri_buf[attr_index].i_addr;
3215 ASSERT(len == in_f->ilf_asize);
3216
3217 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3218 case XFS_ILOG_ADATA:
3219 case XFS_ILOG_AEXT:
3220 dest = XFS_DFORK_APTR(dip);
3221 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3222 memcpy(dest, src, len);
3223 break;
3224
3225 case XFS_ILOG_ABROOT:
3226 dest = XFS_DFORK_APTR(dip);
3227 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3228 len, (xfs_bmdr_block_t*)dest,
3229 XFS_DFORK_ASIZE(dip, mp));
3230 break;
3231
3232 default:
3233 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3234 ASSERT(0);
3235 error = -EIO;
3236 goto out_release;
3237 }
3238 }
3239
3240 out_owner_change:
3241 /* Recover the swapext owner change unless inode has been deleted */
3242 if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
3243 (dip->di_mode != 0))
3244 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3245 buffer_list);
3246 /* re-generate the checksum. */
3247 xfs_dinode_calc_crc(log->l_mp, dip);
3248
3249 ASSERT(bp->b_target->bt_mount == mp);
3250 bp->b_iodone = xlog_recover_iodone;
3251 xfs_buf_delwri_queue(bp, buffer_list);
3252
3253 out_release:
3254 xfs_buf_relse(bp);
3255 error:
3256 if (need_free)
3257 kmem_free(in_f);
3258 return error;
3259 }
3260
3261 /*
3262 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3263 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3264 * of that type.
3265 */
3266 STATIC int
3267 xlog_recover_quotaoff_pass1(
3268 struct xlog *log,
3269 struct xlog_recover_item *item)
3270 {
3271 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3272 ASSERT(qoff_f);
3273
3274 /*
3275 * The logitem format's flag tells us if this was user quotaoff,
3276 * group/project quotaoff or both.
3277 */
3278 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3279 log->l_quotaoffs_flag |= XFS_DQ_USER;
3280 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3281 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3282 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3283 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3284
3285 return 0;
3286 }
3287
3288 /*
3289 * Recover a dquot record
3290 */
3291 STATIC int
3292 xlog_recover_dquot_pass2(
3293 struct xlog *log,
3294 struct list_head *buffer_list,
3295 struct xlog_recover_item *item,
3296 xfs_lsn_t current_lsn)
3297 {
3298 xfs_mount_t *mp = log->l_mp;
3299 xfs_buf_t *bp;
3300 struct xfs_disk_dquot *ddq, *recddq;
3301 xfs_failaddr_t fa;
3302 int error;
3303 xfs_dq_logformat_t *dq_f;
3304 uint type;
3305
3306
3307 /*
3308 * Filesystems are required to send in quota flags at mount time.
3309 */
3310 if (mp->m_qflags == 0)
3311 return 0;
3312
3313 recddq = item->ri_buf[1].i_addr;
3314 if (recddq == NULL) {
3315 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3316 return -EIO;
3317 }
3318 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3319 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3320 item->ri_buf[1].i_len, __func__);
3321 return -EIO;
3322 }
3323
3324 /*
3325 * This type of quotas was turned off, so ignore this record.
3326 */
3327 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3328 ASSERT(type);
3329 if (log->l_quotaoffs_flag & type)
3330 return 0;
3331
3332 /*
3333 * At this point we know that quota was _not_ turned off.
3334 * Since the mount flags are not indicating to us otherwise, this
3335 * must mean that quota is on, and the dquot needs to be replayed.
3336 * Remember that we may not have fully recovered the superblock yet,
3337 * so we can't do the usual trick of looking at the SB quota bits.
3338 *
3339 * The other possibility, of course, is that the quota subsystem was
3340 * removed since the last mount - ENOSYS.
3341 */
3342 dq_f = item->ri_buf[0].i_addr;
3343 ASSERT(dq_f);
3344 fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
3345 if (fa) {
3346 xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
3347 dq_f->qlf_id, fa);
3348 return -EIO;
3349 }
3350 ASSERT(dq_f->qlf_len == 1);
3351
3352 /*
3353 * At this point we are assuming that the dquots have been allocated
3354 * and hence the buffer has valid dquots stamped in it. It should,
3355 * therefore, pass verifier validation. If the dquot is bad, then the
3356 * we'll return an error here, so we don't need to specifically check
3357 * the dquot in the buffer after the verifier has run.
3358 */
3359 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3360 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3361 &xfs_dquot_buf_ops);
3362 if (error)
3363 return error;
3364
3365 ASSERT(bp);
3366 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3367
3368 /*
3369 * If the dquot has an LSN in it, recover the dquot only if it's less
3370 * than the lsn of the transaction we are replaying.
3371 */
3372 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3373 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3374 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3375
3376 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3377 goto out_release;
3378 }
3379 }
3380
3381 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3382 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3383 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3384 XFS_DQUOT_CRC_OFF);
3385 }
3386
3387 ASSERT(dq_f->qlf_size == 2);
3388 ASSERT(bp->b_target->bt_mount == mp);
3389 bp->b_iodone = xlog_recover_iodone;
3390 xfs_buf_delwri_queue(bp, buffer_list);
3391
3392 out_release:
3393 xfs_buf_relse(bp);
3394 return 0;
3395 }
3396
3397 /*
3398 * This routine is called to create an in-core extent free intent
3399 * item from the efi format structure which was logged on disk.
3400 * It allocates an in-core efi, copies the extents from the format
3401 * structure into it, and adds the efi to the AIL with the given
3402 * LSN.
3403 */
3404 STATIC int
3405 xlog_recover_efi_pass2(
3406 struct xlog *log,
3407 struct xlog_recover_item *item,
3408 xfs_lsn_t lsn)
3409 {
3410 int error;
3411 struct xfs_mount *mp = log->l_mp;
3412 struct xfs_efi_log_item *efip;
3413 struct xfs_efi_log_format *efi_formatp;
3414
3415 efi_formatp = item->ri_buf[0].i_addr;
3416
3417 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3418 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3419 if (error) {
3420 xfs_efi_item_free(efip);
3421 return error;
3422 }
3423 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3424
3425 spin_lock(&log->l_ailp->ail_lock);
3426 /*
3427 * The EFI has two references. One for the EFD and one for EFI to ensure
3428 * it makes it into the AIL. Insert the EFI into the AIL directly and
3429 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3430 * AIL lock.
3431 */
3432 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3433 xfs_efi_release(efip);
3434 return 0;
3435 }
3436
3437
3438 /*
3439 * This routine is called when an EFD format structure is found in a committed
3440 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3441 * was still in the log. To do this it searches the AIL for the EFI with an id
3442 * equal to that in the EFD format structure. If we find it we drop the EFD
3443 * reference, which removes the EFI from the AIL and frees it.
3444 */
3445 STATIC int
3446 xlog_recover_efd_pass2(
3447 struct xlog *log,
3448 struct xlog_recover_item *item)
3449 {
3450 xfs_efd_log_format_t *efd_formatp;
3451 xfs_efi_log_item_t *efip = NULL;
3452 xfs_log_item_t *lip;
3453 uint64_t efi_id;
3454 struct xfs_ail_cursor cur;
3455 struct xfs_ail *ailp = log->l_ailp;
3456
3457 efd_formatp = item->ri_buf[0].i_addr;
3458 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3459 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3460 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3461 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3462 efi_id = efd_formatp->efd_efi_id;
3463
3464 /*
3465 * Search for the EFI with the id in the EFD format structure in the
3466 * AIL.
3467 */
3468 spin_lock(&ailp->ail_lock);
3469 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3470 while (lip != NULL) {
3471 if (lip->li_type == XFS_LI_EFI) {
3472 efip = (xfs_efi_log_item_t *)lip;
3473 if (efip->efi_format.efi_id == efi_id) {
3474 /*
3475 * Drop the EFD reference to the EFI. This
3476 * removes the EFI from the AIL and frees it.
3477 */
3478 spin_unlock(&ailp->ail_lock);
3479 xfs_efi_release(efip);
3480 spin_lock(&ailp->ail_lock);
3481 break;
3482 }
3483 }
3484 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3485 }
3486
3487 xfs_trans_ail_cursor_done(&cur);
3488 spin_unlock(&ailp->ail_lock);
3489
3490 return 0;
3491 }
3492
3493 /*
3494 * This routine is called to create an in-core extent rmap update
3495 * item from the rui format structure which was logged on disk.
3496 * It allocates an in-core rui, copies the extents from the format
3497 * structure into it, and adds the rui to the AIL with the given
3498 * LSN.
3499 */
3500 STATIC int
3501 xlog_recover_rui_pass2(
3502 struct xlog *log,
3503 struct xlog_recover_item *item,
3504 xfs_lsn_t lsn)
3505 {
3506 int error;
3507 struct xfs_mount *mp = log->l_mp;
3508 struct xfs_rui_log_item *ruip;
3509 struct xfs_rui_log_format *rui_formatp;
3510
3511 rui_formatp = item->ri_buf[0].i_addr;
3512
3513 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3514 error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3515 if (error) {
3516 xfs_rui_item_free(ruip);
3517 return error;
3518 }
3519 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3520
3521 spin_lock(&log->l_ailp->ail_lock);
3522 /*
3523 * The RUI has two references. One for the RUD and one for RUI to ensure
3524 * it makes it into the AIL. Insert the RUI into the AIL directly and
3525 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3526 * AIL lock.
3527 */
3528 xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3529 xfs_rui_release(ruip);
3530 return 0;
3531 }
3532
3533
3534 /*
3535 * This routine is called when an RUD format structure is found in a committed
3536 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3537 * was still in the log. To do this it searches the AIL for the RUI with an id
3538 * equal to that in the RUD format structure. If we find it we drop the RUD
3539 * reference, which removes the RUI from the AIL and frees it.
3540 */
3541 STATIC int
3542 xlog_recover_rud_pass2(
3543 struct xlog *log,
3544 struct xlog_recover_item *item)
3545 {
3546 struct xfs_rud_log_format *rud_formatp;
3547 struct xfs_rui_log_item *ruip = NULL;
3548 struct xfs_log_item *lip;
3549 uint64_t rui_id;
3550 struct xfs_ail_cursor cur;
3551 struct xfs_ail *ailp = log->l_ailp;
3552
3553 rud_formatp = item->ri_buf[0].i_addr;
3554 ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3555 rui_id = rud_formatp->rud_rui_id;
3556
3557 /*
3558 * Search for the RUI with the id in the RUD format structure in the
3559 * AIL.
3560 */
3561 spin_lock(&ailp->ail_lock);
3562 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3563 while (lip != NULL) {
3564 if (lip->li_type == XFS_LI_RUI) {
3565 ruip = (struct xfs_rui_log_item *)lip;
3566 if (ruip->rui_format.rui_id == rui_id) {
3567 /*
3568 * Drop the RUD reference to the RUI. This
3569 * removes the RUI from the AIL and frees it.
3570 */
3571 spin_unlock(&ailp->ail_lock);
3572 xfs_rui_release(ruip);
3573 spin_lock(&ailp->ail_lock);
3574 break;
3575 }
3576 }
3577 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3578 }
3579
3580 xfs_trans_ail_cursor_done(&cur);
3581 spin_unlock(&ailp->ail_lock);
3582
3583 return 0;
3584 }
3585
3586 /*
3587 * Copy an CUI format buffer from the given buf, and into the destination
3588 * CUI format structure. The CUI/CUD items were designed not to need any
3589 * special alignment handling.
3590 */
3591 static int
3592 xfs_cui_copy_format(
3593 struct xfs_log_iovec *buf,
3594 struct xfs_cui_log_format *dst_cui_fmt)
3595 {
3596 struct xfs_cui_log_format *src_cui_fmt;
3597 uint len;
3598
3599 src_cui_fmt = buf->i_addr;
3600 len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3601
3602 if (buf->i_len == len) {
3603 memcpy(dst_cui_fmt, src_cui_fmt, len);
3604 return 0;
3605 }
3606 return -EFSCORRUPTED;
3607 }
3608
3609 /*
3610 * This routine is called to create an in-core extent refcount update
3611 * item from the cui format structure which was logged on disk.
3612 * It allocates an in-core cui, copies the extents from the format
3613 * structure into it, and adds the cui to the AIL with the given
3614 * LSN.
3615 */
3616 STATIC int
3617 xlog_recover_cui_pass2(
3618 struct xlog *log,
3619 struct xlog_recover_item *item,
3620 xfs_lsn_t lsn)
3621 {
3622 int error;
3623 struct xfs_mount *mp = log->l_mp;
3624 struct xfs_cui_log_item *cuip;
3625 struct xfs_cui_log_format *cui_formatp;
3626
3627 cui_formatp = item->ri_buf[0].i_addr;
3628
3629 cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3630 error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3631 if (error) {
3632 xfs_cui_item_free(cuip);
3633 return error;
3634 }
3635 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3636
3637 spin_lock(&log->l_ailp->ail_lock);
3638 /*
3639 * The CUI has two references. One for the CUD and one for CUI to ensure
3640 * it makes it into the AIL. Insert the CUI into the AIL directly and
3641 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3642 * AIL lock.
3643 */
3644 xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3645 xfs_cui_release(cuip);
3646 return 0;
3647 }
3648
3649
3650 /*
3651 * This routine is called when an CUD format structure is found in a committed
3652 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3653 * was still in the log. To do this it searches the AIL for the CUI with an id
3654 * equal to that in the CUD format structure. If we find it we drop the CUD
3655 * reference, which removes the CUI from the AIL and frees it.
3656 */
3657 STATIC int
3658 xlog_recover_cud_pass2(
3659 struct xlog *log,
3660 struct xlog_recover_item *item)
3661 {
3662 struct xfs_cud_log_format *cud_formatp;
3663 struct xfs_cui_log_item *cuip = NULL;
3664 struct xfs_log_item *lip;
3665 uint64_t cui_id;
3666 struct xfs_ail_cursor cur;
3667 struct xfs_ail *ailp = log->l_ailp;
3668
3669 cud_formatp = item->ri_buf[0].i_addr;
3670 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3671 return -EFSCORRUPTED;
3672 cui_id = cud_formatp->cud_cui_id;
3673
3674 /*
3675 * Search for the CUI with the id in the CUD format structure in the
3676 * AIL.
3677 */
3678 spin_lock(&ailp->ail_lock);
3679 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3680 while (lip != NULL) {
3681 if (lip->li_type == XFS_LI_CUI) {
3682 cuip = (struct xfs_cui_log_item *)lip;
3683 if (cuip->cui_format.cui_id == cui_id) {
3684 /*
3685 * Drop the CUD reference to the CUI. This
3686 * removes the CUI from the AIL and frees it.
3687 */
3688 spin_unlock(&ailp->ail_lock);
3689 xfs_cui_release(cuip);
3690 spin_lock(&ailp->ail_lock);
3691 break;
3692 }
3693 }
3694 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3695 }
3696
3697 xfs_trans_ail_cursor_done(&cur);
3698 spin_unlock(&ailp->ail_lock);
3699
3700 return 0;
3701 }
3702
3703 /*
3704 * Copy an BUI format buffer from the given buf, and into the destination
3705 * BUI format structure. The BUI/BUD items were designed not to need any
3706 * special alignment handling.
3707 */
3708 static int
3709 xfs_bui_copy_format(
3710 struct xfs_log_iovec *buf,
3711 struct xfs_bui_log_format *dst_bui_fmt)
3712 {
3713 struct xfs_bui_log_format *src_bui_fmt;
3714 uint len;
3715
3716 src_bui_fmt = buf->i_addr;
3717 len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3718
3719 if (buf->i_len == len) {
3720 memcpy(dst_bui_fmt, src_bui_fmt, len);
3721 return 0;
3722 }
3723 return -EFSCORRUPTED;
3724 }
3725
3726 /*
3727 * This routine is called to create an in-core extent bmap update
3728 * item from the bui format structure which was logged on disk.
3729 * It allocates an in-core bui, copies the extents from the format
3730 * structure into it, and adds the bui to the AIL with the given
3731 * LSN.
3732 */
3733 STATIC int
3734 xlog_recover_bui_pass2(
3735 struct xlog *log,
3736 struct xlog_recover_item *item,
3737 xfs_lsn_t lsn)
3738 {
3739 int error;
3740 struct xfs_mount *mp = log->l_mp;
3741 struct xfs_bui_log_item *buip;
3742 struct xfs_bui_log_format *bui_formatp;
3743
3744 bui_formatp = item->ri_buf[0].i_addr;
3745
3746 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3747 return -EFSCORRUPTED;
3748 buip = xfs_bui_init(mp);
3749 error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3750 if (error) {
3751 xfs_bui_item_free(buip);
3752 return error;
3753 }
3754 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3755
3756 spin_lock(&log->l_ailp->ail_lock);
3757 /*
3758 * The RUI has two references. One for the RUD and one for RUI to ensure
3759 * it makes it into the AIL. Insert the RUI into the AIL directly and
3760 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3761 * AIL lock.
3762 */
3763 xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3764 xfs_bui_release(buip);
3765 return 0;
3766 }
3767
3768
3769 /*
3770 * This routine is called when an BUD format structure is found in a committed
3771 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3772 * was still in the log. To do this it searches the AIL for the BUI with an id
3773 * equal to that in the BUD format structure. If we find it we drop the BUD
3774 * reference, which removes the BUI from the AIL and frees it.
3775 */
3776 STATIC int
3777 xlog_recover_bud_pass2(
3778 struct xlog *log,
3779 struct xlog_recover_item *item)
3780 {
3781 struct xfs_bud_log_format *bud_formatp;
3782 struct xfs_bui_log_item *buip = NULL;
3783 struct xfs_log_item *lip;
3784 uint64_t bui_id;
3785 struct xfs_ail_cursor cur;
3786 struct xfs_ail *ailp = log->l_ailp;
3787
3788 bud_formatp = item->ri_buf[0].i_addr;
3789 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3790 return -EFSCORRUPTED;
3791 bui_id = bud_formatp->bud_bui_id;
3792
3793 /*
3794 * Search for the BUI with the id in the BUD format structure in the
3795 * AIL.
3796 */
3797 spin_lock(&ailp->ail_lock);
3798 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3799 while (lip != NULL) {
3800 if (lip->li_type == XFS_LI_BUI) {
3801 buip = (struct xfs_bui_log_item *)lip;
3802 if (buip->bui_format.bui_id == bui_id) {
3803 /*
3804 * Drop the BUD reference to the BUI. This
3805 * removes the BUI from the AIL and frees it.
3806 */
3807 spin_unlock(&ailp->ail_lock);
3808 xfs_bui_release(buip);
3809 spin_lock(&ailp->ail_lock);
3810 break;
3811 }
3812 }
3813 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3814 }
3815
3816 xfs_trans_ail_cursor_done(&cur);
3817 spin_unlock(&ailp->ail_lock);
3818
3819 return 0;
3820 }
3821
3822 /*
3823 * This routine is called when an inode create format structure is found in a
3824 * committed transaction in the log. It's purpose is to initialise the inodes
3825 * being allocated on disk. This requires us to get inode cluster buffers that
3826 * match the range to be initialised, stamped with inode templates and written
3827 * by delayed write so that subsequent modifications will hit the cached buffer
3828 * and only need writing out at the end of recovery.
3829 */
3830 STATIC int
3831 xlog_recover_do_icreate_pass2(
3832 struct xlog *log,
3833 struct list_head *buffer_list,
3834 xlog_recover_item_t *item)
3835 {
3836 struct xfs_mount *mp = log->l_mp;
3837 struct xfs_icreate_log *icl;
3838 xfs_agnumber_t agno;
3839 xfs_agblock_t agbno;
3840 unsigned int count;
3841 unsigned int isize;
3842 xfs_agblock_t length;
3843 int blks_per_cluster;
3844 int bb_per_cluster;
3845 int cancel_count;
3846 int nbufs;
3847 int i;
3848
3849 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3850 if (icl->icl_type != XFS_LI_ICREATE) {
3851 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3852 return -EINVAL;
3853 }
3854
3855 if (icl->icl_size != 1) {
3856 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3857 return -EINVAL;
3858 }
3859
3860 agno = be32_to_cpu(icl->icl_ag);
3861 if (agno >= mp->m_sb.sb_agcount) {
3862 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3863 return -EINVAL;
3864 }
3865 agbno = be32_to_cpu(icl->icl_agbno);
3866 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3867 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3868 return -EINVAL;
3869 }
3870 isize = be32_to_cpu(icl->icl_isize);
3871 if (isize != mp->m_sb.sb_inodesize) {
3872 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3873 return -EINVAL;
3874 }
3875 count = be32_to_cpu(icl->icl_count);
3876 if (!count) {
3877 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3878 return -EINVAL;
3879 }
3880 length = be32_to_cpu(icl->icl_length);
3881 if (!length || length >= mp->m_sb.sb_agblocks) {
3882 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3883 return -EINVAL;
3884 }
3885
3886 /*
3887 * The inode chunk is either full or sparse and we only support
3888 * m_ialloc_min_blks sized sparse allocations at this time.
3889 */
3890 if (length != mp->m_ialloc_blks &&
3891 length != mp->m_ialloc_min_blks) {
3892 xfs_warn(log->l_mp,
3893 "%s: unsupported chunk length", __FUNCTION__);
3894 return -EINVAL;
3895 }
3896
3897 /* verify inode count is consistent with extent length */
3898 if ((count >> mp->m_sb.sb_inopblog) != length) {
3899 xfs_warn(log->l_mp,
3900 "%s: inconsistent inode count and chunk length",
3901 __FUNCTION__);
3902 return -EINVAL;
3903 }
3904
3905 /*
3906 * The icreate transaction can cover multiple cluster buffers and these
3907 * buffers could have been freed and reused. Check the individual
3908 * buffers for cancellation so we don't overwrite anything written after
3909 * a cancellation.
3910 */
3911 blks_per_cluster = xfs_icluster_size_fsb(mp);
3912 bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3913 nbufs = length / blks_per_cluster;
3914 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3915 xfs_daddr_t daddr;
3916
3917 daddr = XFS_AGB_TO_DADDR(mp, agno,
3918 agbno + i * blks_per_cluster);
3919 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3920 cancel_count++;
3921 }
3922
3923 /*
3924 * We currently only use icreate for a single allocation at a time. This
3925 * means we should expect either all or none of the buffers to be
3926 * cancelled. Be conservative and skip replay if at least one buffer is
3927 * cancelled, but warn the user that something is awry if the buffers
3928 * are not consistent.
3929 *
3930 * XXX: This must be refined to only skip cancelled clusters once we use
3931 * icreate for multiple chunk allocations.
3932 */
3933 ASSERT(!cancel_count || cancel_count == nbufs);
3934 if (cancel_count) {
3935 if (cancel_count != nbufs)
3936 xfs_warn(mp,
3937 "WARNING: partial inode chunk cancellation, skipped icreate.");
3938 trace_xfs_log_recover_icreate_cancel(log, icl);
3939 return 0;
3940 }
3941
3942 trace_xfs_log_recover_icreate_recover(log, icl);
3943 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3944 length, be32_to_cpu(icl->icl_gen));
3945 }
3946
3947 STATIC void
3948 xlog_recover_buffer_ra_pass2(
3949 struct xlog *log,
3950 struct xlog_recover_item *item)
3951 {
3952 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3953 struct xfs_mount *mp = log->l_mp;
3954
3955 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3956 buf_f->blf_len, buf_f->blf_flags)) {
3957 return;
3958 }
3959
3960 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3961 buf_f->blf_len, NULL);
3962 }
3963
3964 STATIC void
3965 xlog_recover_inode_ra_pass2(
3966 struct xlog *log,
3967 struct xlog_recover_item *item)
3968 {
3969 struct xfs_inode_log_format ilf_buf;
3970 struct xfs_inode_log_format *ilfp;
3971 struct xfs_mount *mp = log->l_mp;
3972 int error;
3973
3974 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3975 ilfp = item->ri_buf[0].i_addr;
3976 } else {
3977 ilfp = &ilf_buf;
3978 memset(ilfp, 0, sizeof(*ilfp));
3979 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3980 if (error)
3981 return;
3982 }
3983
3984 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3985 return;
3986
3987 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3988 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3989 }
3990
3991 STATIC void
3992 xlog_recover_dquot_ra_pass2(
3993 struct xlog *log,
3994 struct xlog_recover_item *item)
3995 {
3996 struct xfs_mount *mp = log->l_mp;
3997 struct xfs_disk_dquot *recddq;
3998 struct xfs_dq_logformat *dq_f;
3999 uint type;
4000 int len;
4001
4002
4003 if (mp->m_qflags == 0)
4004 return;
4005
4006 recddq = item->ri_buf[1].i_addr;
4007 if (recddq == NULL)
4008 return;
4009 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
4010 return;
4011
4012 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
4013 ASSERT(type);
4014 if (log->l_quotaoffs_flag & type)
4015 return;
4016
4017 dq_f = item->ri_buf[0].i_addr;
4018 ASSERT(dq_f);
4019 ASSERT(dq_f->qlf_len == 1);
4020
4021 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
4022 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
4023 return;
4024
4025 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
4026 &xfs_dquot_buf_ra_ops);
4027 }
4028
4029 STATIC void
4030 xlog_recover_ra_pass2(
4031 struct xlog *log,
4032 struct xlog_recover_item *item)
4033 {
4034 switch (ITEM_TYPE(item)) {
4035 case XFS_LI_BUF:
4036 xlog_recover_buffer_ra_pass2(log, item);
4037 break;
4038 case XFS_LI_INODE:
4039 xlog_recover_inode_ra_pass2(log, item);
4040 break;
4041 case XFS_LI_DQUOT:
4042 xlog_recover_dquot_ra_pass2(log, item);
4043 break;
4044 case XFS_LI_EFI:
4045 case XFS_LI_EFD:
4046 case XFS_LI_QUOTAOFF:
4047 case XFS_LI_RUI:
4048 case XFS_LI_RUD:
4049 case XFS_LI_CUI:
4050 case XFS_LI_CUD:
4051 case XFS_LI_BUI:
4052 case XFS_LI_BUD:
4053 default:
4054 break;
4055 }
4056 }
4057
4058 STATIC int
4059 xlog_recover_commit_pass1(
4060 struct xlog *log,
4061 struct xlog_recover *trans,
4062 struct xlog_recover_item *item)
4063 {
4064 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
4065
4066 switch (ITEM_TYPE(item)) {
4067 case XFS_LI_BUF:
4068 return xlog_recover_buffer_pass1(log, item);
4069 case XFS_LI_QUOTAOFF:
4070 return xlog_recover_quotaoff_pass1(log, item);
4071 case XFS_LI_INODE:
4072 case XFS_LI_EFI:
4073 case XFS_LI_EFD:
4074 case XFS_LI_DQUOT:
4075 case XFS_LI_ICREATE:
4076 case XFS_LI_RUI:
4077 case XFS_LI_RUD:
4078 case XFS_LI_CUI:
4079 case XFS_LI_CUD:
4080 case XFS_LI_BUI:
4081 case XFS_LI_BUD:
4082 /* nothing to do in pass 1 */
4083 return 0;
4084 default:
4085 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4086 __func__, ITEM_TYPE(item));
4087 ASSERT(0);
4088 return -EIO;
4089 }
4090 }
4091
4092 STATIC int
4093 xlog_recover_commit_pass2(
4094 struct xlog *log,
4095 struct xlog_recover *trans,
4096 struct list_head *buffer_list,
4097 struct xlog_recover_item *item)
4098 {
4099 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4100
4101 switch (ITEM_TYPE(item)) {
4102 case XFS_LI_BUF:
4103 return xlog_recover_buffer_pass2(log, buffer_list, item,
4104 trans->r_lsn);
4105 case XFS_LI_INODE:
4106 return xlog_recover_inode_pass2(log, buffer_list, item,
4107 trans->r_lsn);
4108 case XFS_LI_EFI:
4109 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4110 case XFS_LI_EFD:
4111 return xlog_recover_efd_pass2(log, item);
4112 case XFS_LI_RUI:
4113 return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4114 case XFS_LI_RUD:
4115 return xlog_recover_rud_pass2(log, item);
4116 case XFS_LI_CUI:
4117 return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4118 case XFS_LI_CUD:
4119 return xlog_recover_cud_pass2(log, item);
4120 case XFS_LI_BUI:
4121 return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4122 case XFS_LI_BUD:
4123 return xlog_recover_bud_pass2(log, item);
4124 case XFS_LI_DQUOT:
4125 return xlog_recover_dquot_pass2(log, buffer_list, item,
4126 trans->r_lsn);
4127 case XFS_LI_ICREATE:
4128 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4129 case XFS_LI_QUOTAOFF:
4130 /* nothing to do in pass2 */
4131 return 0;
4132 default:
4133 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4134 __func__, ITEM_TYPE(item));
4135 ASSERT(0);
4136 return -EIO;
4137 }
4138 }
4139
4140 STATIC int
4141 xlog_recover_items_pass2(
4142 struct xlog *log,
4143 struct xlog_recover *trans,
4144 struct list_head *buffer_list,
4145 struct list_head *item_list)
4146 {
4147 struct xlog_recover_item *item;
4148 int error = 0;
4149
4150 list_for_each_entry(item, item_list, ri_list) {
4151 error = xlog_recover_commit_pass2(log, trans,
4152 buffer_list, item);
4153 if (error)
4154 return error;
4155 }
4156
4157 return error;
4158 }
4159
4160 /*
4161 * Perform the transaction.
4162 *
4163 * If the transaction modifies a buffer or inode, do it now. Otherwise,
4164 * EFIs and EFDs get queued up by adding entries into the AIL for them.
4165 */
4166 STATIC int
4167 xlog_recover_commit_trans(
4168 struct xlog *log,
4169 struct xlog_recover *trans,
4170 int pass,
4171 struct list_head *buffer_list)
4172 {
4173 int error = 0;
4174 int items_queued = 0;
4175 struct xlog_recover_item *item;
4176 struct xlog_recover_item *next;
4177 LIST_HEAD (ra_list);
4178 LIST_HEAD (done_list);
4179
4180 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4181
4182 hlist_del_init(&trans->r_list);
4183
4184 error = xlog_recover_reorder_trans(log, trans, pass);
4185 if (error)
4186 return error;
4187
4188 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4189 switch (pass) {
4190 case XLOG_RECOVER_PASS1:
4191 error = xlog_recover_commit_pass1(log, trans, item);
4192 break;
4193 case XLOG_RECOVER_PASS2:
4194 xlog_recover_ra_pass2(log, item);
4195 list_move_tail(&item->ri_list, &ra_list);
4196 items_queued++;
4197 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4198 error = xlog_recover_items_pass2(log, trans,
4199 buffer_list, &ra_list);
4200 list_splice_tail_init(&ra_list, &done_list);
4201 items_queued = 0;
4202 }
4203
4204 break;
4205 default:
4206 ASSERT(0);
4207 }
4208
4209 if (error)
4210 goto out;
4211 }
4212
4213 out:
4214 if (!list_empty(&ra_list)) {
4215 if (!error)
4216 error = xlog_recover_items_pass2(log, trans,
4217 buffer_list, &ra_list);
4218 list_splice_tail_init(&ra_list, &done_list);
4219 }
4220
4221 if (!list_empty(&done_list))
4222 list_splice_init(&done_list, &trans->r_itemq);
4223
4224 return error;
4225 }
4226
4227 STATIC void
4228 xlog_recover_add_item(
4229 struct list_head *head)
4230 {
4231 xlog_recover_item_t *item;
4232
4233 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
4234 INIT_LIST_HEAD(&item->ri_list);
4235 list_add_tail(&item->ri_list, head);
4236 }
4237
4238 STATIC int
4239 xlog_recover_add_to_cont_trans(
4240 struct xlog *log,
4241 struct xlog_recover *trans,
4242 char *dp,
4243 int len)
4244 {
4245 xlog_recover_item_t *item;
4246 char *ptr, *old_ptr;
4247 int old_len;
4248
4249 /*
4250 * If the transaction is empty, the header was split across this and the
4251 * previous record. Copy the rest of the header.
4252 */
4253 if (list_empty(&trans->r_itemq)) {
4254 ASSERT(len <= sizeof(struct xfs_trans_header));
4255 if (len > sizeof(struct xfs_trans_header)) {
4256 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4257 return -EIO;
4258 }
4259
4260 xlog_recover_add_item(&trans->r_itemq);
4261 ptr = (char *)&trans->r_theader +
4262 sizeof(struct xfs_trans_header) - len;
4263 memcpy(ptr, dp, len);
4264 return 0;
4265 }
4266
4267 /* take the tail entry */
4268 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4269
4270 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4271 old_len = item->ri_buf[item->ri_cnt-1].i_len;
4272
4273 ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
4274 memcpy(&ptr[old_len], dp, len);
4275 item->ri_buf[item->ri_cnt-1].i_len += len;
4276 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4277 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4278 return 0;
4279 }
4280
4281 /*
4282 * The next region to add is the start of a new region. It could be
4283 * a whole region or it could be the first part of a new region. Because
4284 * of this, the assumption here is that the type and size fields of all
4285 * format structures fit into the first 32 bits of the structure.
4286 *
4287 * This works because all regions must be 32 bit aligned. Therefore, we
4288 * either have both fields or we have neither field. In the case we have
4289 * neither field, the data part of the region is zero length. We only have
4290 * a log_op_header and can throw away the header since a new one will appear
4291 * later. If we have at least 4 bytes, then we can determine how many regions
4292 * will appear in the current log item.
4293 */
4294 STATIC int
4295 xlog_recover_add_to_trans(
4296 struct xlog *log,
4297 struct xlog_recover *trans,
4298 char *dp,
4299 int len)
4300 {
4301 struct xfs_inode_log_format *in_f; /* any will do */
4302 xlog_recover_item_t *item;
4303 char *ptr;
4304
4305 if (!len)
4306 return 0;
4307 if (list_empty(&trans->r_itemq)) {
4308 /* we need to catch log corruptions here */
4309 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4310 xfs_warn(log->l_mp, "%s: bad header magic number",
4311 __func__);
4312 ASSERT(0);
4313 return -EIO;
4314 }
4315
4316 if (len > sizeof(struct xfs_trans_header)) {
4317 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4318 ASSERT(0);
4319 return -EIO;
4320 }
4321
4322 /*
4323 * The transaction header can be arbitrarily split across op
4324 * records. If we don't have the whole thing here, copy what we
4325 * do have and handle the rest in the next record.
4326 */
4327 if (len == sizeof(struct xfs_trans_header))
4328 xlog_recover_add_item(&trans->r_itemq);
4329 memcpy(&trans->r_theader, dp, len);
4330 return 0;
4331 }
4332
4333 ptr = kmem_alloc(len, KM_SLEEP);
4334 memcpy(ptr, dp, len);
4335 in_f = (struct xfs_inode_log_format *)ptr;
4336
4337 /* take the tail entry */
4338 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4339 if (item->ri_total != 0 &&
4340 item->ri_total == item->ri_cnt) {
4341 /* tail item is in use, get a new one */
4342 xlog_recover_add_item(&trans->r_itemq);
4343 item = list_entry(trans->r_itemq.prev,
4344 xlog_recover_item_t, ri_list);
4345 }
4346
4347 if (item->ri_total == 0) { /* first region to be added */
4348 if (in_f->ilf_size == 0 ||
4349 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4350 xfs_warn(log->l_mp,
4351 "bad number of regions (%d) in inode log format",
4352 in_f->ilf_size);
4353 ASSERT(0);
4354 kmem_free(ptr);
4355 return -EIO;
4356 }
4357
4358 item->ri_total = in_f->ilf_size;
4359 item->ri_buf =
4360 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4361 KM_SLEEP);
4362 }
4363 ASSERT(item->ri_total > item->ri_cnt);
4364 /* Description region is ri_buf[0] */
4365 item->ri_buf[item->ri_cnt].i_addr = ptr;
4366 item->ri_buf[item->ri_cnt].i_len = len;
4367 item->ri_cnt++;
4368 trace_xfs_log_recover_item_add(log, trans, item, 0);
4369 return 0;
4370 }
4371
4372 /*
4373 * Free up any resources allocated by the transaction
4374 *
4375 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4376 */
4377 STATIC void
4378 xlog_recover_free_trans(
4379 struct xlog_recover *trans)
4380 {
4381 xlog_recover_item_t *item, *n;
4382 int i;
4383
4384 hlist_del_init(&trans->r_list);
4385
4386 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4387 /* Free the regions in the item. */
4388 list_del(&item->ri_list);
4389 for (i = 0; i < item->ri_cnt; i++)
4390 kmem_free(item->ri_buf[i].i_addr);
4391 /* Free the item itself */
4392 kmem_free(item->ri_buf);
4393 kmem_free(item);
4394 }
4395 /* Free the transaction recover structure */
4396 kmem_free(trans);
4397 }
4398
4399 /*
4400 * On error or completion, trans is freed.
4401 */
4402 STATIC int
4403 xlog_recovery_process_trans(
4404 struct xlog *log,
4405 struct xlog_recover *trans,
4406 char *dp,
4407 unsigned int len,
4408 unsigned int flags,
4409 int pass,
4410 struct list_head *buffer_list)
4411 {
4412 int error = 0;
4413 bool freeit = false;
4414
4415 /* mask off ophdr transaction container flags */
4416 flags &= ~XLOG_END_TRANS;
4417 if (flags & XLOG_WAS_CONT_TRANS)
4418 flags &= ~XLOG_CONTINUE_TRANS;
4419
4420 /*
4421 * Callees must not free the trans structure. We'll decide if we need to
4422 * free it or not based on the operation being done and it's result.
4423 */
4424 switch (flags) {
4425 /* expected flag values */
4426 case 0:
4427 case XLOG_CONTINUE_TRANS:
4428 error = xlog_recover_add_to_trans(log, trans, dp, len);
4429 break;
4430 case XLOG_WAS_CONT_TRANS:
4431 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4432 break;
4433 case XLOG_COMMIT_TRANS:
4434 error = xlog_recover_commit_trans(log, trans, pass,
4435 buffer_list);
4436 /* success or fail, we are now done with this transaction. */
4437 freeit = true;
4438 break;
4439
4440 /* unexpected flag values */
4441 case XLOG_UNMOUNT_TRANS:
4442 /* just skip trans */
4443 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4444 freeit = true;
4445 break;
4446 case XLOG_START_TRANS:
4447 default:
4448 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4449 ASSERT(0);
4450 error = -EIO;
4451 break;
4452 }
4453 if (error || freeit)
4454 xlog_recover_free_trans(trans);
4455 return error;
4456 }
4457
4458 /*
4459 * Lookup the transaction recovery structure associated with the ID in the
4460 * current ophdr. If the transaction doesn't exist and the start flag is set in
4461 * the ophdr, then allocate a new transaction for future ID matches to find.
4462 * Either way, return what we found during the lookup - an existing transaction
4463 * or nothing.
4464 */
4465 STATIC struct xlog_recover *
4466 xlog_recover_ophdr_to_trans(
4467 struct hlist_head rhash[],
4468 struct xlog_rec_header *rhead,
4469 struct xlog_op_header *ohead)
4470 {
4471 struct xlog_recover *trans;
4472 xlog_tid_t tid;
4473 struct hlist_head *rhp;
4474
4475 tid = be32_to_cpu(ohead->oh_tid);
4476 rhp = &rhash[XLOG_RHASH(tid)];
4477 hlist_for_each_entry(trans, rhp, r_list) {
4478 if (trans->r_log_tid == tid)
4479 return trans;
4480 }
4481
4482 /*
4483 * skip over non-start transaction headers - we could be
4484 * processing slack space before the next transaction starts
4485 */
4486 if (!(ohead->oh_flags & XLOG_START_TRANS))
4487 return NULL;
4488
4489 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4490
4491 /*
4492 * This is a new transaction so allocate a new recovery container to
4493 * hold the recovery ops that will follow.
4494 */
4495 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4496 trans->r_log_tid = tid;
4497 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4498 INIT_LIST_HEAD(&trans->r_itemq);
4499 INIT_HLIST_NODE(&trans->r_list);
4500 hlist_add_head(&trans->r_list, rhp);
4501
4502 /*
4503 * Nothing more to do for this ophdr. Items to be added to this new
4504 * transaction will be in subsequent ophdr containers.
4505 */
4506 return NULL;
4507 }
4508
4509 STATIC int
4510 xlog_recover_process_ophdr(
4511 struct xlog *log,
4512 struct hlist_head rhash[],
4513 struct xlog_rec_header *rhead,
4514 struct xlog_op_header *ohead,
4515 char *dp,
4516 char *end,
4517 int pass,
4518 struct list_head *buffer_list)
4519 {
4520 struct xlog_recover *trans;
4521 unsigned int len;
4522 int error;
4523
4524 /* Do we understand who wrote this op? */
4525 if (ohead->oh_clientid != XFS_TRANSACTION &&
4526 ohead->oh_clientid != XFS_LOG) {
4527 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4528 __func__, ohead->oh_clientid);
4529 ASSERT(0);
4530 return -EIO;
4531 }
4532
4533 /*
4534 * Check the ophdr contains all the data it is supposed to contain.
4535 */
4536 len = be32_to_cpu(ohead->oh_len);
4537 if (dp + len > end) {
4538 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4539 WARN_ON(1);
4540 return -EIO;
4541 }
4542
4543 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4544 if (!trans) {
4545 /* nothing to do, so skip over this ophdr */
4546 return 0;
4547 }
4548
4549 /*
4550 * The recovered buffer queue is drained only once we know that all
4551 * recovery items for the current LSN have been processed. This is
4552 * required because:
4553 *
4554 * - Buffer write submission updates the metadata LSN of the buffer.
4555 * - Log recovery skips items with a metadata LSN >= the current LSN of
4556 * the recovery item.
4557 * - Separate recovery items against the same metadata buffer can share
4558 * a current LSN. I.e., consider that the LSN of a recovery item is
4559 * defined as the starting LSN of the first record in which its
4560 * transaction appears, that a record can hold multiple transactions,
4561 * and/or that a transaction can span multiple records.
4562 *
4563 * In other words, we are allowed to submit a buffer from log recovery
4564 * once per current LSN. Otherwise, we may incorrectly skip recovery
4565 * items and cause corruption.
4566 *
4567 * We don't know up front whether buffers are updated multiple times per
4568 * LSN. Therefore, track the current LSN of each commit log record as it
4569 * is processed and drain the queue when it changes. Use commit records
4570 * because they are ordered correctly by the logging code.
4571 */
4572 if (log->l_recovery_lsn != trans->r_lsn &&
4573 ohead->oh_flags & XLOG_COMMIT_TRANS) {
4574 error = xfs_buf_delwri_submit(buffer_list);
4575 if (error)
4576 return error;
4577 log->l_recovery_lsn = trans->r_lsn;
4578 }
4579
4580 return xlog_recovery_process_trans(log, trans, dp, len,
4581 ohead->oh_flags, pass, buffer_list);
4582 }
4583
4584 /*
4585 * There are two valid states of the r_state field. 0 indicates that the
4586 * transaction structure is in a normal state. We have either seen the
4587 * start of the transaction or the last operation we added was not a partial
4588 * operation. If the last operation we added to the transaction was a
4589 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4590 *
4591 * NOTE: skip LRs with 0 data length.
4592 */
4593 STATIC int
4594 xlog_recover_process_data(
4595 struct xlog *log,
4596 struct hlist_head rhash[],
4597 struct xlog_rec_header *rhead,
4598 char *dp,
4599 int pass,
4600 struct list_head *buffer_list)
4601 {
4602 struct xlog_op_header *ohead;
4603 char *end;
4604 int num_logops;
4605 int error;
4606
4607 end = dp + be32_to_cpu(rhead->h_len);
4608 num_logops = be32_to_cpu(rhead->h_num_logops);
4609
4610 /* check the log format matches our own - else we can't recover */
4611 if (xlog_header_check_recover(log->l_mp, rhead))
4612 return -EIO;
4613
4614 trace_xfs_log_recover_record(log, rhead, pass);
4615 while ((dp < end) && num_logops) {
4616
4617 ohead = (struct xlog_op_header *)dp;
4618 dp += sizeof(*ohead);
4619 ASSERT(dp <= end);
4620
4621 /* errors will abort recovery */
4622 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4623 dp, end, pass, buffer_list);
4624 if (error)
4625 return error;
4626
4627 dp += be32_to_cpu(ohead->oh_len);
4628 num_logops--;
4629 }
4630 return 0;
4631 }
4632
4633 /* Recover the EFI if necessary. */
4634 STATIC int
4635 xlog_recover_process_efi(
4636 struct xfs_mount *mp,
4637 struct xfs_ail *ailp,
4638 struct xfs_log_item *lip)
4639 {
4640 struct xfs_efi_log_item *efip;
4641 int error;
4642
4643 /*
4644 * Skip EFIs that we've already processed.
4645 */
4646 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4647 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4648 return 0;
4649
4650 spin_unlock(&ailp->ail_lock);
4651 error = xfs_efi_recover(mp, efip);
4652 spin_lock(&ailp->ail_lock);
4653
4654 return error;
4655 }
4656
4657 /* Release the EFI since we're cancelling everything. */
4658 STATIC void
4659 xlog_recover_cancel_efi(
4660 struct xfs_mount *mp,
4661 struct xfs_ail *ailp,
4662 struct xfs_log_item *lip)
4663 {
4664 struct xfs_efi_log_item *efip;
4665
4666 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4667
4668 spin_unlock(&ailp->ail_lock);
4669 xfs_efi_release(efip);
4670 spin_lock(&ailp->ail_lock);
4671 }
4672
4673 /* Recover the RUI if necessary. */
4674 STATIC int
4675 xlog_recover_process_rui(
4676 struct xfs_mount *mp,
4677 struct xfs_ail *ailp,
4678 struct xfs_log_item *lip)
4679 {
4680 struct xfs_rui_log_item *ruip;
4681 int error;
4682
4683 /*
4684 * Skip RUIs that we've already processed.
4685 */
4686 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4687 if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4688 return 0;
4689
4690 spin_unlock(&ailp->ail_lock);
4691 error = xfs_rui_recover(mp, ruip);
4692 spin_lock(&ailp->ail_lock);
4693
4694 return error;
4695 }
4696
4697 /* Release the RUI since we're cancelling everything. */
4698 STATIC void
4699 xlog_recover_cancel_rui(
4700 struct xfs_mount *mp,
4701 struct xfs_ail *ailp,
4702 struct xfs_log_item *lip)
4703 {
4704 struct xfs_rui_log_item *ruip;
4705
4706 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4707
4708 spin_unlock(&ailp->ail_lock);
4709 xfs_rui_release(ruip);
4710 spin_lock(&ailp->ail_lock);
4711 }
4712
4713 /* Recover the CUI if necessary. */
4714 STATIC int
4715 xlog_recover_process_cui(
4716 struct xfs_mount *mp,
4717 struct xfs_ail *ailp,
4718 struct xfs_log_item *lip,
4719 struct xfs_defer_ops *dfops)
4720 {
4721 struct xfs_cui_log_item *cuip;
4722 int error;
4723
4724 /*
4725 * Skip CUIs that we've already processed.
4726 */
4727 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4728 if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4729 return 0;
4730
4731 spin_unlock(&ailp->ail_lock);
4732 error = xfs_cui_recover(mp, cuip, dfops);
4733 spin_lock(&ailp->ail_lock);
4734
4735 return error;
4736 }
4737
4738 /* Release the CUI since we're cancelling everything. */
4739 STATIC void
4740 xlog_recover_cancel_cui(
4741 struct xfs_mount *mp,
4742 struct xfs_ail *ailp,
4743 struct xfs_log_item *lip)
4744 {
4745 struct xfs_cui_log_item *cuip;
4746
4747 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4748
4749 spin_unlock(&ailp->ail_lock);
4750 xfs_cui_release(cuip);
4751 spin_lock(&ailp->ail_lock);
4752 }
4753
4754 /* Recover the BUI if necessary. */
4755 STATIC int
4756 xlog_recover_process_bui(
4757 struct xfs_mount *mp,
4758 struct xfs_ail *ailp,
4759 struct xfs_log_item *lip,
4760 struct xfs_defer_ops *dfops)
4761 {
4762 struct xfs_bui_log_item *buip;
4763 int error;
4764
4765 /*
4766 * Skip BUIs that we've already processed.
4767 */
4768 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4769 if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4770 return 0;
4771
4772 spin_unlock(&ailp->ail_lock);
4773 error = xfs_bui_recover(mp, buip, dfops);
4774 spin_lock(&ailp->ail_lock);
4775
4776 return error;
4777 }
4778
4779 /* Release the BUI since we're cancelling everything. */
4780 STATIC void
4781 xlog_recover_cancel_bui(
4782 struct xfs_mount *mp,
4783 struct xfs_ail *ailp,
4784 struct xfs_log_item *lip)
4785 {
4786 struct xfs_bui_log_item *buip;
4787
4788 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4789
4790 spin_unlock(&ailp->ail_lock);
4791 xfs_bui_release(buip);
4792 spin_lock(&ailp->ail_lock);
4793 }
4794
4795 /* Is this log item a deferred action intent? */
4796 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4797 {
4798 switch (lip->li_type) {
4799 case XFS_LI_EFI:
4800 case XFS_LI_RUI:
4801 case XFS_LI_CUI:
4802 case XFS_LI_BUI:
4803 return true;
4804 default:
4805 return false;
4806 }
4807 }
4808
4809 /* Take all the collected deferred ops and finish them in order. */
4810 static int
4811 xlog_finish_defer_ops(
4812 struct xfs_mount *mp,
4813 struct xfs_defer_ops *dfops)
4814 {
4815 struct xfs_trans *tp;
4816 int64_t freeblks;
4817 uint resblks;
4818 int error;
4819
4820 /*
4821 * We're finishing the defer_ops that accumulated as a result of
4822 * recovering unfinished intent items during log recovery. We
4823 * reserve an itruncate transaction because it is the largest
4824 * permanent transaction type. Since we're the only user of the fs
4825 * right now, take 93% (15/16) of the available free blocks. Use
4826 * weird math to avoid a 64-bit division.
4827 */
4828 freeblks = percpu_counter_sum(&mp->m_fdblocks);
4829 if (freeblks <= 0)
4830 return -ENOSPC;
4831 resblks = min_t(int64_t, UINT_MAX, freeblks);
4832 resblks = (resblks * 15) >> 4;
4833 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4834 0, XFS_TRANS_RESERVE, &tp);
4835 if (error)
4836 return error;
4837
4838 error = xfs_defer_finish(&tp, dfops);
4839 if (error)
4840 goto out_cancel;
4841
4842 return xfs_trans_commit(tp);
4843
4844 out_cancel:
4845 xfs_trans_cancel(tp);
4846 return error;
4847 }
4848
4849 /*
4850 * When this is called, all of the log intent items which did not have
4851 * corresponding log done items should be in the AIL. What we do now
4852 * is update the data structures associated with each one.
4853 *
4854 * Since we process the log intent items in normal transactions, they
4855 * will be removed at some point after the commit. This prevents us
4856 * from just walking down the list processing each one. We'll use a
4857 * flag in the intent item to skip those that we've already processed
4858 * and use the AIL iteration mechanism's generation count to try to
4859 * speed this up at least a bit.
4860 *
4861 * When we start, we know that the intents are the only things in the
4862 * AIL. As we process them, however, other items are added to the
4863 * AIL.
4864 */
4865 STATIC int
4866 xlog_recover_process_intents(
4867 struct xlog *log)
4868 {
4869 struct xfs_defer_ops dfops;
4870 struct xfs_ail_cursor cur;
4871 struct xfs_log_item *lip;
4872 struct xfs_ail *ailp;
4873 xfs_fsblock_t firstfsb;
4874 int error = 0;
4875 #if defined(DEBUG) || defined(XFS_WARN)
4876 xfs_lsn_t last_lsn;
4877 #endif
4878
4879 ailp = log->l_ailp;
4880 spin_lock(&ailp->ail_lock);
4881 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4882 #if defined(DEBUG) || defined(XFS_WARN)
4883 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4884 #endif
4885 xfs_defer_init(&dfops, &firstfsb);
4886 while (lip != NULL) {
4887 /*
4888 * We're done when we see something other than an intent.
4889 * There should be no intents left in the AIL now.
4890 */
4891 if (!xlog_item_is_intent(lip)) {
4892 #ifdef DEBUG
4893 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4894 ASSERT(!xlog_item_is_intent(lip));
4895 #endif
4896 break;
4897 }
4898
4899 /*
4900 * We should never see a redo item with a LSN higher than
4901 * the last transaction we found in the log at the start
4902 * of recovery.
4903 */
4904 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4905
4906 /*
4907 * NOTE: If your intent processing routine can create more
4908 * deferred ops, you /must/ attach them to the dfops in this
4909 * routine or else those subsequent intents will get
4910 * replayed in the wrong order!
4911 */
4912 switch (lip->li_type) {
4913 case XFS_LI_EFI:
4914 error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4915 break;
4916 case XFS_LI_RUI:
4917 error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4918 break;
4919 case XFS_LI_CUI:
4920 error = xlog_recover_process_cui(log->l_mp, ailp, lip,
4921 &dfops);
4922 break;
4923 case XFS_LI_BUI:
4924 error = xlog_recover_process_bui(log->l_mp, ailp, lip,
4925 &dfops);
4926 break;
4927 }
4928 if (error)
4929 goto out;
4930 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4931 }
4932 out:
4933 xfs_trans_ail_cursor_done(&cur);
4934 spin_unlock(&ailp->ail_lock);
4935 if (error)
4936 xfs_defer_cancel(&dfops);
4937 else
4938 error = xlog_finish_defer_ops(log->l_mp, &dfops);
4939
4940 return error;
4941 }
4942
4943 /*
4944 * A cancel occurs when the mount has failed and we're bailing out.
4945 * Release all pending log intent items so they don't pin the AIL.
4946 */
4947 STATIC int
4948 xlog_recover_cancel_intents(
4949 struct xlog *log)
4950 {
4951 struct xfs_log_item *lip;
4952 int error = 0;
4953 struct xfs_ail_cursor cur;
4954 struct xfs_ail *ailp;
4955
4956 ailp = log->l_ailp;
4957 spin_lock(&ailp->ail_lock);
4958 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4959 while (lip != NULL) {
4960 /*
4961 * We're done when we see something other than an intent.
4962 * There should be no intents left in the AIL now.
4963 */
4964 if (!xlog_item_is_intent(lip)) {
4965 #ifdef DEBUG
4966 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4967 ASSERT(!xlog_item_is_intent(lip));
4968 #endif
4969 break;
4970 }
4971
4972 switch (lip->li_type) {
4973 case XFS_LI_EFI:
4974 xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4975 break;
4976 case XFS_LI_RUI:
4977 xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4978 break;
4979 case XFS_LI_CUI:
4980 xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4981 break;
4982 case XFS_LI_BUI:
4983 xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4984 break;
4985 }
4986
4987 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4988 }
4989
4990 xfs_trans_ail_cursor_done(&cur);
4991 spin_unlock(&ailp->ail_lock);
4992 return error;
4993 }
4994
4995 /*
4996 * This routine performs a transaction to null out a bad inode pointer
4997 * in an agi unlinked inode hash bucket.
4998 */
4999 STATIC void
5000 xlog_recover_clear_agi_bucket(
5001 xfs_mount_t *mp,
5002 xfs_agnumber_t agno,
5003 int bucket)
5004 {
5005 xfs_trans_t *tp;
5006 xfs_agi_t *agi;
5007 xfs_buf_t *agibp;
5008 int offset;
5009 int error;
5010
5011 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
5012 if (error)
5013 goto out_error;
5014
5015 error = xfs_read_agi(mp, tp, agno, &agibp);
5016 if (error)
5017 goto out_abort;
5018
5019 agi = XFS_BUF_TO_AGI(agibp);
5020 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
5021 offset = offsetof(xfs_agi_t, agi_unlinked) +
5022 (sizeof(xfs_agino_t) * bucket);
5023 xfs_trans_log_buf(tp, agibp, offset,
5024 (offset + sizeof(xfs_agino_t) - 1));
5025
5026 error = xfs_trans_commit(tp);
5027 if (error)
5028 goto out_error;
5029 return;
5030
5031 out_abort:
5032 xfs_trans_cancel(tp);
5033 out_error:
5034 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
5035 return;
5036 }
5037
5038 STATIC xfs_agino_t
5039 xlog_recover_process_one_iunlink(
5040 struct xfs_mount *mp,
5041 xfs_agnumber_t agno,
5042 xfs_agino_t agino,
5043 int bucket)
5044 {
5045 struct xfs_buf *ibp;
5046 struct xfs_dinode *dip;
5047 struct xfs_inode *ip;
5048 xfs_ino_t ino;
5049 int error;
5050
5051 ino = XFS_AGINO_TO_INO(mp, agno, agino);
5052 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
5053 if (error)
5054 goto fail;
5055
5056 /*
5057 * Get the on disk inode to find the next inode in the bucket.
5058 */
5059 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
5060 if (error)
5061 goto fail_iput;
5062
5063 xfs_iflags_clear(ip, XFS_IRECOVERY);
5064 ASSERT(VFS_I(ip)->i_nlink == 0);
5065 ASSERT(VFS_I(ip)->i_mode != 0);
5066
5067 /* setup for the next pass */
5068 agino = be32_to_cpu(dip->di_next_unlinked);
5069 xfs_buf_relse(ibp);
5070
5071 /*
5072 * Prevent any DMAPI event from being sent when the reference on
5073 * the inode is dropped.
5074 */
5075 ip->i_d.di_dmevmask = 0;
5076
5077 IRELE(ip);
5078 return agino;
5079
5080 fail_iput:
5081 IRELE(ip);
5082 fail:
5083 /*
5084 * We can't read in the inode this bucket points to, or this inode
5085 * is messed up. Just ditch this bucket of inodes. We will lose
5086 * some inodes and space, but at least we won't hang.
5087 *
5088 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5089 * clear the inode pointer in the bucket.
5090 */
5091 xlog_recover_clear_agi_bucket(mp, agno, bucket);
5092 return NULLAGINO;
5093 }
5094
5095 /*
5096 * xlog_iunlink_recover
5097 *
5098 * This is called during recovery to process any inodes which
5099 * we unlinked but not freed when the system crashed. These
5100 * inodes will be on the lists in the AGI blocks. What we do
5101 * here is scan all the AGIs and fully truncate and free any
5102 * inodes found on the lists. Each inode is removed from the
5103 * lists when it has been fully truncated and is freed. The
5104 * freeing of the inode and its removal from the list must be
5105 * atomic.
5106 */
5107 STATIC void
5108 xlog_recover_process_iunlinks(
5109 struct xlog *log)
5110 {
5111 xfs_mount_t *mp;
5112 xfs_agnumber_t agno;
5113 xfs_agi_t *agi;
5114 xfs_buf_t *agibp;
5115 xfs_agino_t agino;
5116 int bucket;
5117 int error;
5118
5119 mp = log->l_mp;
5120
5121 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5122 /*
5123 * Find the agi for this ag.
5124 */
5125 error = xfs_read_agi(mp, NULL, agno, &agibp);
5126 if (error) {
5127 /*
5128 * AGI is b0rked. Don't process it.
5129 *
5130 * We should probably mark the filesystem as corrupt
5131 * after we've recovered all the ag's we can....
5132 */
5133 continue;
5134 }
5135 /*
5136 * Unlock the buffer so that it can be acquired in the normal
5137 * course of the transaction to truncate and free each inode.
5138 * Because we are not racing with anyone else here for the AGI
5139 * buffer, we don't even need to hold it locked to read the
5140 * initial unlinked bucket entries out of the buffer. We keep
5141 * buffer reference though, so that it stays pinned in memory
5142 * while we need the buffer.
5143 */
5144 agi = XFS_BUF_TO_AGI(agibp);
5145 xfs_buf_unlock(agibp);
5146
5147 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5148 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5149 while (agino != NULLAGINO) {
5150 agino = xlog_recover_process_one_iunlink(mp,
5151 agno, agino, bucket);
5152 }
5153 }
5154 xfs_buf_rele(agibp);
5155 }
5156 }
5157
5158 STATIC int
5159 xlog_unpack_data(
5160 struct xlog_rec_header *rhead,
5161 char *dp,
5162 struct xlog *log)
5163 {
5164 int i, j, k;
5165
5166 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5167 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5168 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5169 dp += BBSIZE;
5170 }
5171
5172 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5173 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5174 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5175 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5176 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5177 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5178 dp += BBSIZE;
5179 }
5180 }
5181
5182 return 0;
5183 }
5184
5185 /*
5186 * CRC check, unpack and process a log record.
5187 */
5188 STATIC int
5189 xlog_recover_process(
5190 struct xlog *log,
5191 struct hlist_head rhash[],
5192 struct xlog_rec_header *rhead,
5193 char *dp,
5194 int pass,
5195 struct list_head *buffer_list)
5196 {
5197 int error;
5198 __le32 old_crc = rhead->h_crc;
5199 __le32 crc;
5200
5201
5202 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5203
5204 /*
5205 * Nothing else to do if this is a CRC verification pass. Just return
5206 * if this a record with a non-zero crc. Unfortunately, mkfs always
5207 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5208 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5209 * know precisely what failed.
5210 */
5211 if (pass == XLOG_RECOVER_CRCPASS) {
5212 if (old_crc && crc != old_crc)
5213 return -EFSBADCRC;
5214 return 0;
5215 }
5216
5217 /*
5218 * We're in the normal recovery path. Issue a warning if and only if the
5219 * CRC in the header is non-zero. This is an advisory warning and the
5220 * zero CRC check prevents warnings from being emitted when upgrading
5221 * the kernel from one that does not add CRCs by default.
5222 */
5223 if (crc != old_crc) {
5224 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5225 xfs_alert(log->l_mp,
5226 "log record CRC mismatch: found 0x%x, expected 0x%x.",
5227 le32_to_cpu(old_crc),
5228 le32_to_cpu(crc));
5229 xfs_hex_dump(dp, 32);
5230 }
5231
5232 /*
5233 * If the filesystem is CRC enabled, this mismatch becomes a
5234 * fatal log corruption failure.
5235 */
5236 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5237 return -EFSCORRUPTED;
5238 }
5239
5240 error = xlog_unpack_data(rhead, dp, log);
5241 if (error)
5242 return error;
5243
5244 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5245 buffer_list);
5246 }
5247
5248 STATIC int
5249 xlog_valid_rec_header(
5250 struct xlog *log,
5251 struct xlog_rec_header *rhead,
5252 xfs_daddr_t blkno)
5253 {
5254 int hlen;
5255
5256 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5257 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5258 XFS_ERRLEVEL_LOW, log->l_mp);
5259 return -EFSCORRUPTED;
5260 }
5261 if (unlikely(
5262 (!rhead->h_version ||
5263 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5264 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5265 __func__, be32_to_cpu(rhead->h_version));
5266 return -EIO;
5267 }
5268
5269 /* LR body must have data or it wouldn't have been written */
5270 hlen = be32_to_cpu(rhead->h_len);
5271 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5272 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5273 XFS_ERRLEVEL_LOW, log->l_mp);
5274 return -EFSCORRUPTED;
5275 }
5276 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5277 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5278 XFS_ERRLEVEL_LOW, log->l_mp);
5279 return -EFSCORRUPTED;
5280 }
5281 return 0;
5282 }
5283
5284 /*
5285 * Read the log from tail to head and process the log records found.
5286 * Handle the two cases where the tail and head are in the same cycle
5287 * and where the active portion of the log wraps around the end of
5288 * the physical log separately. The pass parameter is passed through
5289 * to the routines called to process the data and is not looked at
5290 * here.
5291 */
5292 STATIC int
5293 xlog_do_recovery_pass(
5294 struct xlog *log,
5295 xfs_daddr_t head_blk,
5296 xfs_daddr_t tail_blk,
5297 int pass,
5298 xfs_daddr_t *first_bad) /* out: first bad log rec */
5299 {
5300 xlog_rec_header_t *rhead;
5301 xfs_daddr_t blk_no, rblk_no;
5302 xfs_daddr_t rhead_blk;
5303 char *offset;
5304 xfs_buf_t *hbp, *dbp;
5305 int error = 0, h_size, h_len;
5306 int error2 = 0;
5307 int bblks, split_bblks;
5308 int hblks, split_hblks, wrapped_hblks;
5309 int i;
5310 struct hlist_head rhash[XLOG_RHASH_SIZE];
5311 LIST_HEAD (buffer_list);
5312
5313 ASSERT(head_blk != tail_blk);
5314 blk_no = rhead_blk = tail_blk;
5315
5316 for (i = 0; i < XLOG_RHASH_SIZE; i++)
5317 INIT_HLIST_HEAD(&rhash[i]);
5318
5319 /*
5320 * Read the header of the tail block and get the iclog buffer size from
5321 * h_size. Use this to tell how many sectors make up the log header.
5322 */
5323 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5324 /*
5325 * When using variable length iclogs, read first sector of
5326 * iclog header and extract the header size from it. Get a
5327 * new hbp that is the correct size.
5328 */
5329 hbp = xlog_get_bp(log, 1);
5330 if (!hbp)
5331 return -ENOMEM;
5332
5333 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5334 if (error)
5335 goto bread_err1;
5336
5337 rhead = (xlog_rec_header_t *)offset;
5338 error = xlog_valid_rec_header(log, rhead, tail_blk);
5339 if (error)
5340 goto bread_err1;
5341
5342 /*
5343 * xfsprogs has a bug where record length is based on lsunit but
5344 * h_size (iclog size) is hardcoded to 32k. Now that we
5345 * unconditionally CRC verify the unmount record, this means the
5346 * log buffer can be too small for the record and cause an
5347 * overrun.
5348 *
5349 * Detect this condition here. Use lsunit for the buffer size as
5350 * long as this looks like the mkfs case. Otherwise, return an
5351 * error to avoid a buffer overrun.
5352 */
5353 h_size = be32_to_cpu(rhead->h_size);
5354 h_len = be32_to_cpu(rhead->h_len);
5355 if (h_len > h_size) {
5356 if (h_len <= log->l_mp->m_logbsize &&
5357 be32_to_cpu(rhead->h_num_logops) == 1) {
5358 xfs_warn(log->l_mp,
5359 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5360 h_size, log->l_mp->m_logbsize);
5361 h_size = log->l_mp->m_logbsize;
5362 } else
5363 return -EFSCORRUPTED;
5364 }
5365
5366 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5367 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5368 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5369 if (h_size % XLOG_HEADER_CYCLE_SIZE)
5370 hblks++;
5371 xlog_put_bp(hbp);
5372 hbp = xlog_get_bp(log, hblks);
5373 } else {
5374 hblks = 1;
5375 }
5376 } else {
5377 ASSERT(log->l_sectBBsize == 1);
5378 hblks = 1;
5379 hbp = xlog_get_bp(log, 1);
5380 h_size = XLOG_BIG_RECORD_BSIZE;
5381 }
5382
5383 if (!hbp)
5384 return -ENOMEM;
5385 dbp = xlog_get_bp(log, BTOBB(h_size));
5386 if (!dbp) {
5387 xlog_put_bp(hbp);
5388 return -ENOMEM;
5389 }
5390
5391 memset(rhash, 0, sizeof(rhash));
5392 if (tail_blk > head_blk) {
5393 /*
5394 * Perform recovery around the end of the physical log.
5395 * When the head is not on the same cycle number as the tail,
5396 * we can't do a sequential recovery.
5397 */
5398 while (blk_no < log->l_logBBsize) {
5399 /*
5400 * Check for header wrapping around physical end-of-log
5401 */
5402 offset = hbp->b_addr;
5403 split_hblks = 0;
5404 wrapped_hblks = 0;
5405 if (blk_no + hblks <= log->l_logBBsize) {
5406 /* Read header in one read */
5407 error = xlog_bread(log, blk_no, hblks, hbp,
5408 &offset);
5409 if (error)
5410 goto bread_err2;
5411 } else {
5412 /* This LR is split across physical log end */
5413 if (blk_no != log->l_logBBsize) {
5414 /* some data before physical log end */
5415 ASSERT(blk_no <= INT_MAX);
5416 split_hblks = log->l_logBBsize - (int)blk_no;
5417 ASSERT(split_hblks > 0);
5418 error = xlog_bread(log, blk_no,
5419 split_hblks, hbp,
5420 &offset);
5421 if (error)
5422 goto bread_err2;
5423 }
5424
5425 /*
5426 * Note: this black magic still works with
5427 * large sector sizes (non-512) only because:
5428 * - we increased the buffer size originally
5429 * by 1 sector giving us enough extra space
5430 * for the second read;
5431 * - the log start is guaranteed to be sector
5432 * aligned;
5433 * - we read the log end (LR header start)
5434 * _first_, then the log start (LR header end)
5435 * - order is important.
5436 */
5437 wrapped_hblks = hblks - split_hblks;
5438 error = xlog_bread_offset(log, 0,
5439 wrapped_hblks, hbp,
5440 offset + BBTOB(split_hblks));
5441 if (error)
5442 goto bread_err2;
5443 }
5444 rhead = (xlog_rec_header_t *)offset;
5445 error = xlog_valid_rec_header(log, rhead,
5446 split_hblks ? blk_no : 0);
5447 if (error)
5448 goto bread_err2;
5449
5450 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5451 blk_no += hblks;
5452
5453 /*
5454 * Read the log record data in multiple reads if it
5455 * wraps around the end of the log. Note that if the
5456 * header already wrapped, blk_no could point past the
5457 * end of the log. The record data is contiguous in
5458 * that case.
5459 */
5460 if (blk_no + bblks <= log->l_logBBsize ||
5461 blk_no >= log->l_logBBsize) {
5462 /* mod blk_no in case the header wrapped and
5463 * pushed it beyond the end of the log */
5464 rblk_no = do_mod(blk_no, log->l_logBBsize);
5465 error = xlog_bread(log, rblk_no, bblks, dbp,
5466 &offset);
5467 if (error)
5468 goto bread_err2;
5469 } else {
5470 /* This log record is split across the
5471 * physical end of log */
5472 offset = dbp->b_addr;
5473 split_bblks = 0;
5474 if (blk_no != log->l_logBBsize) {
5475 /* some data is before the physical
5476 * end of log */
5477 ASSERT(!wrapped_hblks);
5478 ASSERT(blk_no <= INT_MAX);
5479 split_bblks =
5480 log->l_logBBsize - (int)blk_no;
5481 ASSERT(split_bblks > 0);
5482 error = xlog_bread(log, blk_no,
5483 split_bblks, dbp,
5484 &offset);
5485 if (error)
5486 goto bread_err2;
5487 }
5488
5489 /*
5490 * Note: this black magic still works with
5491 * large sector sizes (non-512) only because:
5492 * - we increased the buffer size originally
5493 * by 1 sector giving us enough extra space
5494 * for the second read;
5495 * - the log start is guaranteed to be sector
5496 * aligned;
5497 * - we read the log end (LR header start)
5498 * _first_, then the log start (LR header end)
5499 * - order is important.
5500 */
5501 error = xlog_bread_offset(log, 0,
5502 bblks - split_bblks, dbp,
5503 offset + BBTOB(split_bblks));
5504 if (error)
5505 goto bread_err2;
5506 }
5507
5508 error = xlog_recover_process(log, rhash, rhead, offset,
5509 pass, &buffer_list);
5510 if (error)
5511 goto bread_err2;
5512
5513 blk_no += bblks;
5514 rhead_blk = blk_no;
5515 }
5516
5517 ASSERT(blk_no >= log->l_logBBsize);
5518 blk_no -= log->l_logBBsize;
5519 rhead_blk = blk_no;
5520 }
5521
5522 /* read first part of physical log */
5523 while (blk_no < head_blk) {
5524 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5525 if (error)
5526 goto bread_err2;
5527
5528 rhead = (xlog_rec_header_t *)offset;
5529 error = xlog_valid_rec_header(log, rhead, blk_no);
5530 if (error)
5531 goto bread_err2;
5532
5533 /* blocks in data section */
5534 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5535 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5536 &offset);
5537 if (error)
5538 goto bread_err2;
5539
5540 error = xlog_recover_process(log, rhash, rhead, offset, pass,
5541 &buffer_list);
5542 if (error)
5543 goto bread_err2;
5544
5545 blk_no += bblks + hblks;
5546 rhead_blk = blk_no;
5547 }
5548
5549 bread_err2:
5550 xlog_put_bp(dbp);
5551 bread_err1:
5552 xlog_put_bp(hbp);
5553
5554 /*
5555 * Submit buffers that have been added from the last record processed,
5556 * regardless of error status.
5557 */
5558 if (!list_empty(&buffer_list))
5559 error2 = xfs_buf_delwri_submit(&buffer_list);
5560
5561 if (error && first_bad)
5562 *first_bad = rhead_blk;
5563
5564 /*
5565 * Transactions are freed at commit time but transactions without commit
5566 * records on disk are never committed. Free any that may be left in the
5567 * hash table.
5568 */
5569 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5570 struct hlist_node *tmp;
5571 struct xlog_recover *trans;
5572
5573 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5574 xlog_recover_free_trans(trans);
5575 }
5576
5577 return error ? error : error2;
5578 }
5579
5580 /*
5581 * Do the recovery of the log. We actually do this in two phases.
5582 * The two passes are necessary in order to implement the function
5583 * of cancelling a record written into the log. The first pass
5584 * determines those things which have been cancelled, and the
5585 * second pass replays log items normally except for those which
5586 * have been cancelled. The handling of the replay and cancellations
5587 * takes place in the log item type specific routines.
5588 *
5589 * The table of items which have cancel records in the log is allocated
5590 * and freed at this level, since only here do we know when all of
5591 * the log recovery has been completed.
5592 */
5593 STATIC int
5594 xlog_do_log_recovery(
5595 struct xlog *log,
5596 xfs_daddr_t head_blk,
5597 xfs_daddr_t tail_blk)
5598 {
5599 int error, i;
5600
5601 ASSERT(head_blk != tail_blk);
5602
5603 /*
5604 * First do a pass to find all of the cancelled buf log items.
5605 * Store them in the buf_cancel_table for use in the second pass.
5606 */
5607 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5608 sizeof(struct list_head),
5609 KM_SLEEP);
5610 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5611 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5612
5613 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5614 XLOG_RECOVER_PASS1, NULL);
5615 if (error != 0) {
5616 kmem_free(log->l_buf_cancel_table);
5617 log->l_buf_cancel_table = NULL;
5618 return error;
5619 }
5620 /*
5621 * Then do a second pass to actually recover the items in the log.
5622 * When it is complete free the table of buf cancel items.
5623 */
5624 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5625 XLOG_RECOVER_PASS2, NULL);
5626 #ifdef DEBUG
5627 if (!error) {
5628 int i;
5629
5630 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5631 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5632 }
5633 #endif /* DEBUG */
5634
5635 kmem_free(log->l_buf_cancel_table);
5636 log->l_buf_cancel_table = NULL;
5637
5638 return error;
5639 }
5640
5641 /*
5642 * Do the actual recovery
5643 */
5644 STATIC int
5645 xlog_do_recover(
5646 struct xlog *log,
5647 xfs_daddr_t head_blk,
5648 xfs_daddr_t tail_blk)
5649 {
5650 struct xfs_mount *mp = log->l_mp;
5651 int error;
5652 xfs_buf_t *bp;
5653 xfs_sb_t *sbp;
5654
5655 trace_xfs_log_recover(log, head_blk, tail_blk);
5656
5657 /*
5658 * First replay the images in the log.
5659 */
5660 error = xlog_do_log_recovery(log, head_blk, tail_blk);
5661 if (error)
5662 return error;
5663
5664 /*
5665 * If IO errors happened during recovery, bail out.
5666 */
5667 if (XFS_FORCED_SHUTDOWN(mp)) {
5668 return -EIO;
5669 }
5670
5671 /*
5672 * We now update the tail_lsn since much of the recovery has completed
5673 * and there may be space available to use. If there were no extent
5674 * or iunlinks, we can free up the entire log and set the tail_lsn to
5675 * be the last_sync_lsn. This was set in xlog_find_tail to be the
5676 * lsn of the last known good LR on disk. If there are extent frees
5677 * or iunlinks they will have some entries in the AIL; so we look at
5678 * the AIL to determine how to set the tail_lsn.
5679 */
5680 xlog_assign_tail_lsn(mp);
5681
5682 /*
5683 * Now that we've finished replaying all buffer and inode
5684 * updates, re-read in the superblock and reverify it.
5685 */
5686 bp = xfs_getsb(mp, 0);
5687 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5688 ASSERT(!(bp->b_flags & XBF_WRITE));
5689 bp->b_flags |= XBF_READ;
5690 bp->b_ops = &xfs_sb_buf_ops;
5691
5692 error = xfs_buf_submit_wait(bp);
5693 if (error) {
5694 if (!XFS_FORCED_SHUTDOWN(mp)) {
5695 xfs_buf_ioerror_alert(bp, __func__);
5696 ASSERT(0);
5697 }
5698 xfs_buf_relse(bp);
5699 return error;
5700 }
5701
5702 /* Convert superblock from on-disk format */
5703 sbp = &mp->m_sb;
5704 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5705 xfs_buf_relse(bp);
5706
5707 /* re-initialise in-core superblock and geometry structures */
5708 xfs_reinit_percpu_counters(mp);
5709 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5710 if (error) {
5711 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5712 return error;
5713 }
5714 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5715
5716 xlog_recover_check_summary(log);
5717
5718 /* Normal transactions can now occur */
5719 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5720 return 0;
5721 }
5722
5723 /*
5724 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5725 *
5726 * Return error or zero.
5727 */
5728 int
5729 xlog_recover(
5730 struct xlog *log)
5731 {
5732 xfs_daddr_t head_blk, tail_blk;
5733 int error;
5734
5735 /* find the tail of the log */
5736 error = xlog_find_tail(log, &head_blk, &tail_blk);
5737 if (error)
5738 return error;
5739
5740 /*
5741 * The superblock was read before the log was available and thus the LSN
5742 * could not be verified. Check the superblock LSN against the current
5743 * LSN now that it's known.
5744 */
5745 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5746 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5747 return -EINVAL;
5748
5749 if (tail_blk != head_blk) {
5750 /* There used to be a comment here:
5751 *
5752 * disallow recovery on read-only mounts. note -- mount
5753 * checks for ENOSPC and turns it into an intelligent
5754 * error message.
5755 * ...but this is no longer true. Now, unless you specify
5756 * NORECOVERY (in which case this function would never be
5757 * called), we just go ahead and recover. We do this all
5758 * under the vfs layer, so we can get away with it unless
5759 * the device itself is read-only, in which case we fail.
5760 */
5761 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5762 return error;
5763 }
5764
5765 /*
5766 * Version 5 superblock log feature mask validation. We know the
5767 * log is dirty so check if there are any unknown log features
5768 * in what we need to recover. If there are unknown features
5769 * (e.g. unsupported transactions, then simply reject the
5770 * attempt at recovery before touching anything.
5771 */
5772 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5773 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5774 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5775 xfs_warn(log->l_mp,
5776 "Superblock has unknown incompatible log features (0x%x) enabled.",
5777 (log->l_mp->m_sb.sb_features_log_incompat &
5778 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5779 xfs_warn(log->l_mp,
5780 "The log can not be fully and/or safely recovered by this kernel.");
5781 xfs_warn(log->l_mp,
5782 "Please recover the log on a kernel that supports the unknown features.");
5783 return -EINVAL;
5784 }
5785
5786 /*
5787 * Delay log recovery if the debug hook is set. This is debug
5788 * instrumention to coordinate simulation of I/O failures with
5789 * log recovery.
5790 */
5791 if (xfs_globals.log_recovery_delay) {
5792 xfs_notice(log->l_mp,
5793 "Delaying log recovery for %d seconds.",
5794 xfs_globals.log_recovery_delay);
5795 msleep(xfs_globals.log_recovery_delay * 1000);
5796 }
5797
5798 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5799 log->l_mp->m_logname ? log->l_mp->m_logname
5800 : "internal");
5801
5802 error = xlog_do_recover(log, head_blk, tail_blk);
5803 log->l_flags |= XLOG_RECOVERY_NEEDED;
5804 }
5805 return error;
5806 }
5807
5808 /*
5809 * In the first part of recovery we replay inodes and buffers and build
5810 * up the list of extent free items which need to be processed. Here
5811 * we process the extent free items and clean up the on disk unlinked
5812 * inode lists. This is separated from the first part of recovery so
5813 * that the root and real-time bitmap inodes can be read in from disk in
5814 * between the two stages. This is necessary so that we can free space
5815 * in the real-time portion of the file system.
5816 */
5817 int
5818 xlog_recover_finish(
5819 struct xlog *log)
5820 {
5821 /*
5822 * Now we're ready to do the transactions needed for the
5823 * rest of recovery. Start with completing all the extent
5824 * free intent records and then process the unlinked inode
5825 * lists. At this point, we essentially run in normal mode
5826 * except that we're still performing recovery actions
5827 * rather than accepting new requests.
5828 */
5829 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5830 int error;
5831 error = xlog_recover_process_intents(log);
5832 if (error) {
5833 xfs_alert(log->l_mp, "Failed to recover intents");
5834 return error;
5835 }
5836
5837 /*
5838 * Sync the log to get all the intents out of the AIL.
5839 * This isn't absolutely necessary, but it helps in
5840 * case the unlink transactions would have problems
5841 * pushing the intents out of the way.
5842 */
5843 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5844
5845 xlog_recover_process_iunlinks(log);
5846
5847 xlog_recover_check_summary(log);
5848
5849 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5850 log->l_mp->m_logname ? log->l_mp->m_logname
5851 : "internal");
5852 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5853 } else {
5854 xfs_info(log->l_mp, "Ending clean mount");
5855 }
5856 return 0;
5857 }
5858
5859 int
5860 xlog_recover_cancel(
5861 struct xlog *log)
5862 {
5863 int error = 0;
5864
5865 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5866 error = xlog_recover_cancel_intents(log);
5867
5868 return error;
5869 }
5870
5871 #if defined(DEBUG)
5872 /*
5873 * Read all of the agf and agi counters and check that they
5874 * are consistent with the superblock counters.
5875 */
5876 STATIC void
5877 xlog_recover_check_summary(
5878 struct xlog *log)
5879 {
5880 xfs_mount_t *mp;
5881 xfs_agf_t *agfp;
5882 xfs_buf_t *agfbp;
5883 xfs_buf_t *agibp;
5884 xfs_agnumber_t agno;
5885 uint64_t freeblks;
5886 uint64_t itotal;
5887 uint64_t ifree;
5888 int error;
5889
5890 mp = log->l_mp;
5891
5892 freeblks = 0LL;
5893 itotal = 0LL;
5894 ifree = 0LL;
5895 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5896 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5897 if (error) {
5898 xfs_alert(mp, "%s agf read failed agno %d error %d",
5899 __func__, agno, error);
5900 } else {
5901 agfp = XFS_BUF_TO_AGF(agfbp);
5902 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5903 be32_to_cpu(agfp->agf_flcount);
5904 xfs_buf_relse(agfbp);
5905 }
5906
5907 error = xfs_read_agi(mp, NULL, agno, &agibp);
5908 if (error) {
5909 xfs_alert(mp, "%s agi read failed agno %d error %d",
5910 __func__, agno, error);
5911 } else {
5912 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5913
5914 itotal += be32_to_cpu(agi->agi_count);
5915 ifree += be32_to_cpu(agi->agi_freecount);
5916 xfs_buf_relse(agibp);
5917 }
5918 }
5919 }
5920 #endif /* DEBUG */