2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_trans.h"
32 #include "xfs_log_priv.h"
33 #include "xfs_log_recover.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_alloc.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_cksum.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_bmap_btree.h"
44 #include "xfs_error.h"
46 #include "xfs_rmap_item.h"
47 #include "xfs_buf_item.h"
49 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
56 xlog_clear_stale_blocks(
61 xlog_recover_check_summary(
64 #define xlog_recover_check_summary(log)
67 xlog_do_recovery_pass(
68 struct xlog
*, xfs_daddr_t
, xfs_daddr_t
, int, xfs_daddr_t
*);
71 * This structure is used during recovery to record the buf log items which
72 * have been canceled and should not be replayed.
74 struct xfs_buf_cancel
{
78 struct list_head bc_list
;
82 * Sector aligned buffer routines for buffer create/read/write/access
86 * Verify the given count of basic blocks is valid number of blocks
87 * to specify for an operation involving the given XFS log buffer.
88 * Returns nonzero if the count is valid, 0 otherwise.
92 xlog_buf_bbcount_valid(
96 return bbcount
> 0 && bbcount
<= log
->l_logBBsize
;
100 * Allocate a buffer to hold log data. The buffer needs to be able
101 * to map to a range of nbblks basic blocks at any valid (basic
102 * block) offset within the log.
111 if (!xlog_buf_bbcount_valid(log
, nbblks
)) {
112 xfs_warn(log
->l_mp
, "Invalid block length (0x%x) for buffer",
114 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_HIGH
, log
->l_mp
);
119 * We do log I/O in units of log sectors (a power-of-2
120 * multiple of the basic block size), so we round up the
121 * requested size to accommodate the basic blocks required
122 * for complete log sectors.
124 * In addition, the buffer may be used for a non-sector-
125 * aligned block offset, in which case an I/O of the
126 * requested size could extend beyond the end of the
127 * buffer. If the requested size is only 1 basic block it
128 * will never straddle a sector boundary, so this won't be
129 * an issue. Nor will this be a problem if the log I/O is
130 * done in basic blocks (sector size 1). But otherwise we
131 * extend the buffer by one extra log sector to ensure
132 * there's space to accommodate this possibility.
134 if (nbblks
> 1 && log
->l_sectBBsize
> 1)
135 nbblks
+= log
->l_sectBBsize
;
136 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
138 bp
= xfs_buf_get_uncached(log
->l_mp
->m_logdev_targp
, nbblks
, 0);
152 * Return the address of the start of the given block number's data
153 * in a log buffer. The buffer covers a log sector-aligned region.
162 xfs_daddr_t offset
= blk_no
& ((xfs_daddr_t
)log
->l_sectBBsize
- 1);
164 ASSERT(offset
+ nbblks
<= bp
->b_length
);
165 return bp
->b_addr
+ BBTOB(offset
);
170 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
181 if (!xlog_buf_bbcount_valid(log
, nbblks
)) {
182 xfs_warn(log
->l_mp
, "Invalid block length (0x%x) for buffer",
184 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_HIGH
, log
->l_mp
);
185 return -EFSCORRUPTED
;
188 blk_no
= round_down(blk_no
, log
->l_sectBBsize
);
189 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
192 ASSERT(nbblks
<= bp
->b_length
);
194 XFS_BUF_SET_ADDR(bp
, log
->l_logBBstart
+ blk_no
);
195 bp
->b_flags
|= XBF_READ
;
196 bp
->b_io_length
= nbblks
;
199 error
= xfs_buf_submit_wait(bp
);
200 if (error
&& !XFS_FORCED_SHUTDOWN(log
->l_mp
))
201 xfs_buf_ioerror_alert(bp
, __func__
);
215 error
= xlog_bread_noalign(log
, blk_no
, nbblks
, bp
);
219 *offset
= xlog_align(log
, blk_no
, nbblks
, bp
);
224 * Read at an offset into the buffer. Returns with the buffer in it's original
225 * state regardless of the result of the read.
230 xfs_daddr_t blk_no
, /* block to read from */
231 int nbblks
, /* blocks to read */
235 char *orig_offset
= bp
->b_addr
;
236 int orig_len
= BBTOB(bp
->b_length
);
239 error
= xfs_buf_associate_memory(bp
, offset
, BBTOB(nbblks
));
243 error
= xlog_bread_noalign(log
, blk_no
, nbblks
, bp
);
245 /* must reset buffer pointer even on error */
246 error2
= xfs_buf_associate_memory(bp
, orig_offset
, orig_len
);
253 * Write out the buffer at the given block for the given number of blocks.
254 * The buffer is kept locked across the write and is returned locked.
255 * This can only be used for synchronous log writes.
266 if (!xlog_buf_bbcount_valid(log
, nbblks
)) {
267 xfs_warn(log
->l_mp
, "Invalid block length (0x%x) for buffer",
269 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_HIGH
, log
->l_mp
);
270 return -EFSCORRUPTED
;
273 blk_no
= round_down(blk_no
, log
->l_sectBBsize
);
274 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
277 ASSERT(nbblks
<= bp
->b_length
);
279 XFS_BUF_SET_ADDR(bp
, log
->l_logBBstart
+ blk_no
);
282 bp
->b_io_length
= nbblks
;
285 error
= xfs_bwrite(bp
);
287 xfs_buf_ioerror_alert(bp
, __func__
);
294 * dump debug superblock and log record information
297 xlog_header_check_dump(
299 xlog_rec_header_t
*head
)
301 xfs_debug(mp
, "%s: SB : uuid = %pU, fmt = %d",
302 __func__
, &mp
->m_sb
.sb_uuid
, XLOG_FMT
);
303 xfs_debug(mp
, " log : uuid = %pU, fmt = %d",
304 &head
->h_fs_uuid
, be32_to_cpu(head
->h_fmt
));
307 #define xlog_header_check_dump(mp, head)
311 * check log record header for recovery
314 xlog_header_check_recover(
316 xlog_rec_header_t
*head
)
318 ASSERT(head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
));
321 * IRIX doesn't write the h_fmt field and leaves it zeroed
322 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
323 * a dirty log created in IRIX.
325 if (unlikely(head
->h_fmt
!= cpu_to_be32(XLOG_FMT
))) {
327 "dirty log written in incompatible format - can't recover");
328 xlog_header_check_dump(mp
, head
);
329 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
330 XFS_ERRLEVEL_HIGH
, mp
);
331 return -EFSCORRUPTED
;
332 } else if (unlikely(!uuid_equal(&mp
->m_sb
.sb_uuid
, &head
->h_fs_uuid
))) {
334 "dirty log entry has mismatched uuid - can't recover");
335 xlog_header_check_dump(mp
, head
);
336 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
337 XFS_ERRLEVEL_HIGH
, mp
);
338 return -EFSCORRUPTED
;
344 * read the head block of the log and check the header
347 xlog_header_check_mount(
349 xlog_rec_header_t
*head
)
351 ASSERT(head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
));
353 if (uuid_is_nil(&head
->h_fs_uuid
)) {
355 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
356 * h_fs_uuid is nil, we assume this log was last mounted
357 * by IRIX and continue.
359 xfs_warn(mp
, "nil uuid in log - IRIX style log");
360 } else if (unlikely(!uuid_equal(&mp
->m_sb
.sb_uuid
, &head
->h_fs_uuid
))) {
361 xfs_warn(mp
, "log has mismatched uuid - can't recover");
362 xlog_header_check_dump(mp
, head
);
363 XFS_ERROR_REPORT("xlog_header_check_mount",
364 XFS_ERRLEVEL_HIGH
, mp
);
365 return -EFSCORRUPTED
;
376 * We're not going to bother about retrying
377 * this during recovery. One strike!
379 if (!XFS_FORCED_SHUTDOWN(bp
->b_target
->bt_mount
)) {
380 xfs_buf_ioerror_alert(bp
, __func__
);
381 xfs_force_shutdown(bp
->b_target
->bt_mount
,
382 SHUTDOWN_META_IO_ERROR
);
387 * On v5 supers, a bli could be attached to update the metadata LSN.
391 xfs_buf_item_relse(bp
);
392 ASSERT(bp
->b_fspriv
== NULL
);
399 * This routine finds (to an approximation) the first block in the physical
400 * log which contains the given cycle. It uses a binary search algorithm.
401 * Note that the algorithm can not be perfect because the disk will not
402 * necessarily be perfect.
405 xlog_find_cycle_start(
408 xfs_daddr_t first_blk
,
409 xfs_daddr_t
*last_blk
,
419 mid_blk
= BLK_AVG(first_blk
, end_blk
);
420 while (mid_blk
!= first_blk
&& mid_blk
!= end_blk
) {
421 error
= xlog_bread(log
, mid_blk
, 1, bp
, &offset
);
424 mid_cycle
= xlog_get_cycle(offset
);
425 if (mid_cycle
== cycle
)
426 end_blk
= mid_blk
; /* last_half_cycle == mid_cycle */
428 first_blk
= mid_blk
; /* first_half_cycle == mid_cycle */
429 mid_blk
= BLK_AVG(first_blk
, end_blk
);
431 ASSERT((mid_blk
== first_blk
&& mid_blk
+1 == end_blk
) ||
432 (mid_blk
== end_blk
&& mid_blk
-1 == first_blk
));
440 * Check that a range of blocks does not contain stop_on_cycle_no.
441 * Fill in *new_blk with the block offset where such a block is
442 * found, or with -1 (an invalid block number) if there is no such
443 * block in the range. The scan needs to occur from front to back
444 * and the pointer into the region must be updated since a later
445 * routine will need to perform another test.
448 xlog_find_verify_cycle(
450 xfs_daddr_t start_blk
,
452 uint stop_on_cycle_no
,
453 xfs_daddr_t
*new_blk
)
463 * Greedily allocate a buffer big enough to handle the full
464 * range of basic blocks we'll be examining. If that fails,
465 * try a smaller size. We need to be able to read at least
466 * a log sector, or we're out of luck.
468 bufblks
= 1 << ffs(nbblks
);
469 while (bufblks
> log
->l_logBBsize
)
471 while (!(bp
= xlog_get_bp(log
, bufblks
))) {
473 if (bufblks
< log
->l_sectBBsize
)
477 for (i
= start_blk
; i
< start_blk
+ nbblks
; i
+= bufblks
) {
480 bcount
= min(bufblks
, (start_blk
+ nbblks
- i
));
482 error
= xlog_bread(log
, i
, bcount
, bp
, &buf
);
486 for (j
= 0; j
< bcount
; j
++) {
487 cycle
= xlog_get_cycle(buf
);
488 if (cycle
== stop_on_cycle_no
) {
505 * Potentially backup over partial log record write.
507 * In the typical case, last_blk is the number of the block directly after
508 * a good log record. Therefore, we subtract one to get the block number
509 * of the last block in the given buffer. extra_bblks contains the number
510 * of blocks we would have read on a previous read. This happens when the
511 * last log record is split over the end of the physical log.
513 * extra_bblks is the number of blocks potentially verified on a previous
514 * call to this routine.
517 xlog_find_verify_log_record(
519 xfs_daddr_t start_blk
,
520 xfs_daddr_t
*last_blk
,
526 xlog_rec_header_t
*head
= NULL
;
529 int num_blks
= *last_blk
- start_blk
;
532 ASSERT(start_blk
!= 0 || *last_blk
!= start_blk
);
534 if (!(bp
= xlog_get_bp(log
, num_blks
))) {
535 if (!(bp
= xlog_get_bp(log
, 1)))
539 error
= xlog_bread(log
, start_blk
, num_blks
, bp
, &offset
);
542 offset
+= ((num_blks
- 1) << BBSHIFT
);
545 for (i
= (*last_blk
) - 1; i
>= 0; i
--) {
547 /* valid log record not found */
549 "Log inconsistent (didn't find previous header)");
556 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
561 head
= (xlog_rec_header_t
*)offset
;
563 if (head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
))
571 * We hit the beginning of the physical log & still no header. Return
572 * to caller. If caller can handle a return of -1, then this routine
573 * will be called again for the end of the physical log.
581 * We have the final block of the good log (the first block
582 * of the log record _before_ the head. So we check the uuid.
584 if ((error
= xlog_header_check_mount(log
->l_mp
, head
)))
588 * We may have found a log record header before we expected one.
589 * last_blk will be the 1st block # with a given cycle #. We may end
590 * up reading an entire log record. In this case, we don't want to
591 * reset last_blk. Only when last_blk points in the middle of a log
592 * record do we update last_blk.
594 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
595 uint h_size
= be32_to_cpu(head
->h_size
);
597 xhdrs
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
598 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
604 if (*last_blk
- i
+ extra_bblks
!=
605 BTOBB(be32_to_cpu(head
->h_len
)) + xhdrs
)
614 * Head is defined to be the point of the log where the next log write
615 * could go. This means that incomplete LR writes at the end are
616 * eliminated when calculating the head. We aren't guaranteed that previous
617 * LR have complete transactions. We only know that a cycle number of
618 * current cycle number -1 won't be present in the log if we start writing
619 * from our current block number.
621 * last_blk contains the block number of the first block with a given
624 * Return: zero if normal, non-zero if error.
629 xfs_daddr_t
*return_head_blk
)
633 xfs_daddr_t new_blk
, first_blk
, start_blk
, last_blk
, head_blk
;
635 uint first_half_cycle
, last_half_cycle
;
637 int error
, log_bbnum
= log
->l_logBBsize
;
639 /* Is the end of the log device zeroed? */
640 error
= xlog_find_zeroed(log
, &first_blk
);
642 xfs_warn(log
->l_mp
, "empty log check failed");
646 *return_head_blk
= first_blk
;
648 /* Is the whole lot zeroed? */
650 /* Linux XFS shouldn't generate totally zeroed logs -
651 * mkfs etc write a dummy unmount record to a fresh
652 * log so we can store the uuid in there
654 xfs_warn(log
->l_mp
, "totally zeroed log");
660 first_blk
= 0; /* get cycle # of 1st block */
661 bp
= xlog_get_bp(log
, 1);
665 error
= xlog_bread(log
, 0, 1, bp
, &offset
);
669 first_half_cycle
= xlog_get_cycle(offset
);
671 last_blk
= head_blk
= log_bbnum
- 1; /* get cycle # of last block */
672 error
= xlog_bread(log
, last_blk
, 1, bp
, &offset
);
676 last_half_cycle
= xlog_get_cycle(offset
);
677 ASSERT(last_half_cycle
!= 0);
680 * If the 1st half cycle number is equal to the last half cycle number,
681 * then the entire log is stamped with the same cycle number. In this
682 * case, head_blk can't be set to zero (which makes sense). The below
683 * math doesn't work out properly with head_blk equal to zero. Instead,
684 * we set it to log_bbnum which is an invalid block number, but this
685 * value makes the math correct. If head_blk doesn't changed through
686 * all the tests below, *head_blk is set to zero at the very end rather
687 * than log_bbnum. In a sense, log_bbnum and zero are the same block
688 * in a circular file.
690 if (first_half_cycle
== last_half_cycle
) {
692 * In this case we believe that the entire log should have
693 * cycle number last_half_cycle. We need to scan backwards
694 * from the end verifying that there are no holes still
695 * containing last_half_cycle - 1. If we find such a hole,
696 * then the start of that hole will be the new head. The
697 * simple case looks like
698 * x | x ... | x - 1 | x
699 * Another case that fits this picture would be
700 * x | x + 1 | x ... | x
701 * In this case the head really is somewhere at the end of the
702 * log, as one of the latest writes at the beginning was
705 * x | x + 1 | x ... | x - 1 | x
706 * This is really the combination of the above two cases, and
707 * the head has to end up at the start of the x-1 hole at the
710 * In the 256k log case, we will read from the beginning to the
711 * end of the log and search for cycle numbers equal to x-1.
712 * We don't worry about the x+1 blocks that we encounter,
713 * because we know that they cannot be the head since the log
716 head_blk
= log_bbnum
;
717 stop_on_cycle
= last_half_cycle
- 1;
720 * In this case we want to find the first block with cycle
721 * number matching last_half_cycle. We expect the log to be
723 * x + 1 ... | x ... | x
724 * The first block with cycle number x (last_half_cycle) will
725 * be where the new head belongs. First we do a binary search
726 * for the first occurrence of last_half_cycle. The binary
727 * search may not be totally accurate, so then we scan back
728 * from there looking for occurrences of last_half_cycle before
729 * us. If that backwards scan wraps around the beginning of
730 * the log, then we look for occurrences of last_half_cycle - 1
731 * at the end of the log. The cases we're looking for look
733 * v binary search stopped here
734 * x + 1 ... | x | x + 1 | x ... | x
735 * ^ but we want to locate this spot
737 * <---------> less than scan distance
738 * x + 1 ... | x ... | x - 1 | x
739 * ^ we want to locate this spot
741 stop_on_cycle
= last_half_cycle
;
742 if ((error
= xlog_find_cycle_start(log
, bp
, first_blk
,
743 &head_blk
, last_half_cycle
)))
748 * Now validate the answer. Scan back some number of maximum possible
749 * blocks and make sure each one has the expected cycle number. The
750 * maximum is determined by the total possible amount of buffering
751 * in the in-core log. The following number can be made tighter if
752 * we actually look at the block size of the filesystem.
754 num_scan_bblks
= XLOG_TOTAL_REC_SHIFT(log
);
755 if (head_blk
>= num_scan_bblks
) {
757 * We are guaranteed that the entire check can be performed
760 start_blk
= head_blk
- num_scan_bblks
;
761 if ((error
= xlog_find_verify_cycle(log
,
762 start_blk
, num_scan_bblks
,
763 stop_on_cycle
, &new_blk
)))
767 } else { /* need to read 2 parts of log */
769 * We are going to scan backwards in the log in two parts.
770 * First we scan the physical end of the log. In this part
771 * of the log, we are looking for blocks with cycle number
772 * last_half_cycle - 1.
773 * If we find one, then we know that the log starts there, as
774 * we've found a hole that didn't get written in going around
775 * the end of the physical log. The simple case for this is
776 * x + 1 ... | x ... | x - 1 | x
777 * <---------> less than scan distance
778 * If all of the blocks at the end of the log have cycle number
779 * last_half_cycle, then we check the blocks at the start of
780 * the log looking for occurrences of last_half_cycle. If we
781 * find one, then our current estimate for the location of the
782 * first occurrence of last_half_cycle is wrong and we move
783 * back to the hole we've found. This case looks like
784 * x + 1 ... | x | x + 1 | x ...
785 * ^ binary search stopped here
786 * Another case we need to handle that only occurs in 256k
788 * x + 1 ... | x ... | x+1 | x ...
789 * ^ binary search stops here
790 * In a 256k log, the scan at the end of the log will see the
791 * x + 1 blocks. We need to skip past those since that is
792 * certainly not the head of the log. By searching for
793 * last_half_cycle-1 we accomplish that.
795 ASSERT(head_blk
<= INT_MAX
&&
796 (xfs_daddr_t
) num_scan_bblks
>= head_blk
);
797 start_blk
= log_bbnum
- (num_scan_bblks
- head_blk
);
798 if ((error
= xlog_find_verify_cycle(log
, start_blk
,
799 num_scan_bblks
- (int)head_blk
,
800 (stop_on_cycle
- 1), &new_blk
)))
808 * Scan beginning of log now. The last part of the physical
809 * log is good. This scan needs to verify that it doesn't find
810 * the last_half_cycle.
813 ASSERT(head_blk
<= INT_MAX
);
814 if ((error
= xlog_find_verify_cycle(log
,
815 start_blk
, (int)head_blk
,
816 stop_on_cycle
, &new_blk
)))
824 * Now we need to make sure head_blk is not pointing to a block in
825 * the middle of a log record.
827 num_scan_bblks
= XLOG_REC_SHIFT(log
);
828 if (head_blk
>= num_scan_bblks
) {
829 start_blk
= head_blk
- num_scan_bblks
; /* don't read head_blk */
831 /* start ptr at last block ptr before head_blk */
832 error
= xlog_find_verify_log_record(log
, start_blk
, &head_blk
, 0);
839 ASSERT(head_blk
<= INT_MAX
);
840 error
= xlog_find_verify_log_record(log
, start_blk
, &head_blk
, 0);
844 /* We hit the beginning of the log during our search */
845 start_blk
= log_bbnum
- (num_scan_bblks
- head_blk
);
847 ASSERT(start_blk
<= INT_MAX
&&
848 (xfs_daddr_t
) log_bbnum
-start_blk
>= 0);
849 ASSERT(head_blk
<= INT_MAX
);
850 error
= xlog_find_verify_log_record(log
, start_blk
,
851 &new_blk
, (int)head_blk
);
856 if (new_blk
!= log_bbnum
)
863 if (head_blk
== log_bbnum
)
864 *return_head_blk
= 0;
866 *return_head_blk
= head_blk
;
868 * When returning here, we have a good block number. Bad block
869 * means that during a previous crash, we didn't have a clean break
870 * from cycle number N to cycle number N-1. In this case, we need
871 * to find the first block with cycle number N-1.
879 xfs_warn(log
->l_mp
, "failed to find log head");
884 * Seek backwards in the log for log record headers.
886 * Given a starting log block, walk backwards until we find the provided number
887 * of records or hit the provided tail block. The return value is the number of
888 * records encountered or a negative error code. The log block and buffer
889 * pointer of the last record seen are returned in rblk and rhead respectively.
892 xlog_rseek_logrec_hdr(
894 xfs_daddr_t head_blk
,
895 xfs_daddr_t tail_blk
,
899 struct xlog_rec_header
**rhead
,
911 * Walk backwards from the head block until we hit the tail or the first
914 end_blk
= head_blk
> tail_blk
? tail_blk
: 0;
915 for (i
= (int) head_blk
- 1; i
>= end_blk
; i
--) {
916 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
920 if (*(__be32
*) offset
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
922 *rhead
= (struct xlog_rec_header
*) offset
;
923 if (++found
== count
)
929 * If we haven't hit the tail block or the log record header count,
930 * start looking again from the end of the physical log. Note that
931 * callers can pass head == tail if the tail is not yet known.
933 if (tail_blk
>= head_blk
&& found
!= count
) {
934 for (i
= log
->l_logBBsize
- 1; i
>= (int) tail_blk
; i
--) {
935 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
939 if (*(__be32
*)offset
==
940 cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
943 *rhead
= (struct xlog_rec_header
*) offset
;
944 if (++found
== count
)
957 * Seek forward in the log for log record headers.
959 * Given head and tail blocks, walk forward from the tail block until we find
960 * the provided number of records or hit the head block. The return value is the
961 * number of records encountered or a negative error code. The log block and
962 * buffer pointer of the last record seen are returned in rblk and rhead
966 xlog_seek_logrec_hdr(
968 xfs_daddr_t head_blk
,
969 xfs_daddr_t tail_blk
,
973 struct xlog_rec_header
**rhead
,
985 * Walk forward from the tail block until we hit the head or the last
988 end_blk
= head_blk
> tail_blk
? head_blk
: log
->l_logBBsize
- 1;
989 for (i
= (int) tail_blk
; i
<= end_blk
; i
++) {
990 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
994 if (*(__be32
*) offset
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
996 *rhead
= (struct xlog_rec_header
*) offset
;
997 if (++found
== count
)
1003 * If we haven't hit the head block or the log record header count,
1004 * start looking again from the start of the physical log.
1006 if (tail_blk
> head_blk
&& found
!= count
) {
1007 for (i
= 0; i
< (int) head_blk
; i
++) {
1008 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
1012 if (*(__be32
*)offset
==
1013 cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
1016 *rhead
= (struct xlog_rec_header
*) offset
;
1017 if (++found
== count
)
1030 * Check the log tail for torn writes. This is required when torn writes are
1031 * detected at the head and the head had to be walked back to a previous record.
1032 * The tail of the previous record must now be verified to ensure the torn
1033 * writes didn't corrupt the previous tail.
1035 * Return an error if CRC verification fails as recovery cannot proceed.
1040 xfs_daddr_t head_blk
,
1041 xfs_daddr_t tail_blk
)
1043 struct xlog_rec_header
*thead
;
1045 xfs_daddr_t first_bad
;
1049 xfs_daddr_t tmp_head
;
1051 bp
= xlog_get_bp(log
, 1);
1056 * Seek XLOG_MAX_ICLOGS + 1 records past the current tail record to get
1057 * a temporary head block that points after the last possible
1058 * concurrently written record of the tail.
1060 count
= xlog_seek_logrec_hdr(log
, head_blk
, tail_blk
,
1061 XLOG_MAX_ICLOGS
+ 1, bp
, &tmp_head
, &thead
,
1069 * If the call above didn't find XLOG_MAX_ICLOGS + 1 records, we ran
1070 * into the actual log head. tmp_head points to the start of the record
1071 * so update it to the actual head block.
1073 if (count
< XLOG_MAX_ICLOGS
+ 1)
1074 tmp_head
= head_blk
;
1077 * We now have a tail and temporary head block that covers at least
1078 * XLOG_MAX_ICLOGS records from the tail. We need to verify that these
1079 * records were completely written. Run a CRC verification pass from
1080 * tail to head and return the result.
1082 error
= xlog_do_recovery_pass(log
, tmp_head
, tail_blk
,
1083 XLOG_RECOVER_CRCPASS
, &first_bad
);
1091 * Detect and trim torn writes from the head of the log.
1093 * Storage without sector atomicity guarantees can result in torn writes in the
1094 * log in the event of a crash. Our only means to detect this scenario is via
1095 * CRC verification. While we can't always be certain that CRC verification
1096 * failure is due to a torn write vs. an unrelated corruption, we do know that
1097 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1098 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1099 * the log and treat failures in this range as torn writes as a matter of
1100 * policy. In the event of CRC failure, the head is walked back to the last good
1101 * record in the log and the tail is updated from that record and verified.
1106 xfs_daddr_t
*head_blk
, /* in/out: unverified head */
1107 xfs_daddr_t
*tail_blk
, /* out: tail block */
1109 xfs_daddr_t
*rhead_blk
, /* start blk of last record */
1110 struct xlog_rec_header
**rhead
, /* ptr to last record */
1111 bool *wrapped
) /* last rec. wraps phys. log */
1113 struct xlog_rec_header
*tmp_rhead
;
1114 struct xfs_buf
*tmp_bp
;
1115 xfs_daddr_t first_bad
;
1116 xfs_daddr_t tmp_rhead_blk
;
1122 * Check the head of the log for torn writes. Search backwards from the
1123 * head until we hit the tail or the maximum number of log record I/Os
1124 * that could have been in flight at one time. Use a temporary buffer so
1125 * we don't trash the rhead/bp pointers from the caller.
1127 tmp_bp
= xlog_get_bp(log
, 1);
1130 error
= xlog_rseek_logrec_hdr(log
, *head_blk
, *tail_blk
,
1131 XLOG_MAX_ICLOGS
, tmp_bp
, &tmp_rhead_blk
,
1132 &tmp_rhead
, &tmp_wrapped
);
1133 xlog_put_bp(tmp_bp
);
1138 * Now run a CRC verification pass over the records starting at the
1139 * block found above to the current head. If a CRC failure occurs, the
1140 * log block of the first bad record is saved in first_bad.
1142 error
= xlog_do_recovery_pass(log
, *head_blk
, tmp_rhead_blk
,
1143 XLOG_RECOVER_CRCPASS
, &first_bad
);
1144 if (error
== -EFSBADCRC
) {
1146 * We've hit a potential torn write. Reset the error and warn
1151 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1152 first_bad
, *head_blk
);
1155 * Get the header block and buffer pointer for the last good
1156 * record before the bad record.
1158 * Note that xlog_find_tail() clears the blocks at the new head
1159 * (i.e., the records with invalid CRC) if the cycle number
1160 * matches the the current cycle.
1162 found
= xlog_rseek_logrec_hdr(log
, first_bad
, *tail_blk
, 1, bp
,
1163 rhead_blk
, rhead
, wrapped
);
1166 if (found
== 0) /* XXX: right thing to do here? */
1170 * Reset the head block to the starting block of the first bad
1171 * log record and set the tail block based on the last good
1174 * Bail out if the updated head/tail match as this indicates
1175 * possible corruption outside of the acceptable
1176 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1178 *head_blk
= first_bad
;
1179 *tail_blk
= BLOCK_LSN(be64_to_cpu((*rhead
)->h_tail_lsn
));
1180 if (*head_blk
== *tail_blk
) {
1186 * Now verify the tail based on the updated head. This is
1187 * required because the torn writes trimmed from the head could
1188 * have been written over the tail of a previous record. Return
1189 * any errors since recovery cannot proceed if the tail is
1192 * XXX: This leaves a gap in truly robust protection from torn
1193 * writes in the log. If the head is behind the tail, the tail
1194 * pushes forward to create some space and then a crash occurs
1195 * causing the writes into the previous record's tail region to
1196 * tear, log recovery isn't able to recover.
1198 * How likely is this to occur? If possible, can we do something
1199 * more intelligent here? Is it safe to push the tail forward if
1200 * we can determine that the tail is within the range of the
1201 * torn write (e.g., the kernel can only overwrite the tail if
1202 * it has actually been pushed forward)? Alternatively, could we
1203 * somehow prevent this condition at runtime?
1205 error
= xlog_verify_tail(log
, *head_blk
, *tail_blk
);
1212 * Check whether the head of the log points to an unmount record. In other
1213 * words, determine whether the log is clean. If so, update the in-core state
1217 xlog_check_unmount_rec(
1219 xfs_daddr_t
*head_blk
,
1220 xfs_daddr_t
*tail_blk
,
1221 struct xlog_rec_header
*rhead
,
1222 xfs_daddr_t rhead_blk
,
1226 struct xlog_op_header
*op_head
;
1227 xfs_daddr_t umount_data_blk
;
1228 xfs_daddr_t after_umount_blk
;
1236 * Look for unmount record. If we find it, then we know there was a
1237 * clean unmount. Since 'i' could be the last block in the physical
1238 * log, we convert to a log block before comparing to the head_blk.
1240 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1241 * below. We won't want to clear the unmount record if there is one, so
1242 * we pass the lsn of the unmount record rather than the block after it.
1244 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
1245 int h_size
= be32_to_cpu(rhead
->h_size
);
1246 int h_version
= be32_to_cpu(rhead
->h_version
);
1248 if ((h_version
& XLOG_VERSION_2
) &&
1249 (h_size
> XLOG_HEADER_CYCLE_SIZE
)) {
1250 hblks
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
1251 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
1259 after_umount_blk
= rhead_blk
+ hblks
+ BTOBB(be32_to_cpu(rhead
->h_len
));
1260 after_umount_blk
= do_mod(after_umount_blk
, log
->l_logBBsize
);
1261 if (*head_blk
== after_umount_blk
&&
1262 be32_to_cpu(rhead
->h_num_logops
) == 1) {
1263 umount_data_blk
= rhead_blk
+ hblks
;
1264 umount_data_blk
= do_mod(umount_data_blk
, log
->l_logBBsize
);
1265 error
= xlog_bread(log
, umount_data_blk
, 1, bp
, &offset
);
1269 op_head
= (struct xlog_op_header
*)offset
;
1270 if (op_head
->oh_flags
& XLOG_UNMOUNT_TRANS
) {
1272 * Set tail and last sync so that newly written log
1273 * records will point recovery to after the current
1276 xlog_assign_atomic_lsn(&log
->l_tail_lsn
,
1277 log
->l_curr_cycle
, after_umount_blk
);
1278 xlog_assign_atomic_lsn(&log
->l_last_sync_lsn
,
1279 log
->l_curr_cycle
, after_umount_blk
);
1280 *tail_blk
= after_umount_blk
;
1292 xfs_daddr_t head_blk
,
1293 struct xlog_rec_header
*rhead
,
1294 xfs_daddr_t rhead_blk
,
1298 * Reset log values according to the state of the log when we
1299 * crashed. In the case where head_blk == 0, we bump curr_cycle
1300 * one because the next write starts a new cycle rather than
1301 * continuing the cycle of the last good log record. At this
1302 * point we have guaranteed that all partial log records have been
1303 * accounted for. Therefore, we know that the last good log record
1304 * written was complete and ended exactly on the end boundary
1305 * of the physical log.
1307 log
->l_prev_block
= rhead_blk
;
1308 log
->l_curr_block
= (int)head_blk
;
1309 log
->l_curr_cycle
= be32_to_cpu(rhead
->h_cycle
);
1311 log
->l_curr_cycle
++;
1312 atomic64_set(&log
->l_tail_lsn
, be64_to_cpu(rhead
->h_tail_lsn
));
1313 atomic64_set(&log
->l_last_sync_lsn
, be64_to_cpu(rhead
->h_lsn
));
1314 xlog_assign_grant_head(&log
->l_reserve_head
.grant
, log
->l_curr_cycle
,
1315 BBTOB(log
->l_curr_block
));
1316 xlog_assign_grant_head(&log
->l_write_head
.grant
, log
->l_curr_cycle
,
1317 BBTOB(log
->l_curr_block
));
1321 * Find the sync block number or the tail of the log.
1323 * This will be the block number of the last record to have its
1324 * associated buffers synced to disk. Every log record header has
1325 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1326 * to get a sync block number. The only concern is to figure out which
1327 * log record header to believe.
1329 * The following algorithm uses the log record header with the largest
1330 * lsn. The entire log record does not need to be valid. We only care
1331 * that the header is valid.
1333 * We could speed up search by using current head_blk buffer, but it is not
1339 xfs_daddr_t
*head_blk
,
1340 xfs_daddr_t
*tail_blk
)
1342 xlog_rec_header_t
*rhead
;
1343 char *offset
= NULL
;
1346 xfs_daddr_t rhead_blk
;
1348 bool wrapped
= false;
1352 * Find previous log record
1354 if ((error
= xlog_find_head(log
, head_blk
)))
1356 ASSERT(*head_blk
< INT_MAX
);
1358 bp
= xlog_get_bp(log
, 1);
1361 if (*head_blk
== 0) { /* special case */
1362 error
= xlog_bread(log
, 0, 1, bp
, &offset
);
1366 if (xlog_get_cycle(offset
) == 0) {
1368 /* leave all other log inited values alone */
1374 * Search backwards through the log looking for the log record header
1375 * block. This wraps all the way back around to the head so something is
1376 * seriously wrong if we can't find it.
1378 error
= xlog_rseek_logrec_hdr(log
, *head_blk
, *head_blk
, 1, bp
,
1379 &rhead_blk
, &rhead
, &wrapped
);
1383 xfs_warn(log
->l_mp
, "%s: couldn't find sync record", __func__
);
1386 *tail_blk
= BLOCK_LSN(be64_to_cpu(rhead
->h_tail_lsn
));
1389 * Set the log state based on the current head record.
1391 xlog_set_state(log
, *head_blk
, rhead
, rhead_blk
, wrapped
);
1392 tail_lsn
= atomic64_read(&log
->l_tail_lsn
);
1395 * Look for an unmount record at the head of the log. This sets the log
1396 * state to determine whether recovery is necessary.
1398 error
= xlog_check_unmount_rec(log
, head_blk
, tail_blk
, rhead
,
1399 rhead_blk
, bp
, &clean
);
1404 * Verify the log head if the log is not clean (e.g., we have anything
1405 * but an unmount record at the head). This uses CRC verification to
1406 * detect and trim torn writes. If discovered, CRC failures are
1407 * considered torn writes and the log head is trimmed accordingly.
1409 * Note that we can only run CRC verification when the log is dirty
1410 * because there's no guarantee that the log data behind an unmount
1411 * record is compatible with the current architecture.
1414 xfs_daddr_t orig_head
= *head_blk
;
1416 error
= xlog_verify_head(log
, head_blk
, tail_blk
, bp
,
1417 &rhead_blk
, &rhead
, &wrapped
);
1421 /* update in-core state again if the head changed */
1422 if (*head_blk
!= orig_head
) {
1423 xlog_set_state(log
, *head_blk
, rhead
, rhead_blk
,
1425 tail_lsn
= atomic64_read(&log
->l_tail_lsn
);
1426 error
= xlog_check_unmount_rec(log
, head_blk
, tail_blk
,
1427 rhead
, rhead_blk
, bp
,
1435 * Note that the unmount was clean. If the unmount was not clean, we
1436 * need to know this to rebuild the superblock counters from the perag
1437 * headers if we have a filesystem using non-persistent counters.
1440 log
->l_mp
->m_flags
|= XFS_MOUNT_WAS_CLEAN
;
1443 * Make sure that there are no blocks in front of the head
1444 * with the same cycle number as the head. This can happen
1445 * because we allow multiple outstanding log writes concurrently,
1446 * and the later writes might make it out before earlier ones.
1448 * We use the lsn from before modifying it so that we'll never
1449 * overwrite the unmount record after a clean unmount.
1451 * Do this only if we are going to recover the filesystem
1453 * NOTE: This used to say "if (!readonly)"
1454 * However on Linux, we can & do recover a read-only filesystem.
1455 * We only skip recovery if NORECOVERY is specified on mount,
1456 * in which case we would not be here.
1458 * But... if the -device- itself is readonly, just skip this.
1459 * We can't recover this device anyway, so it won't matter.
1461 if (!xfs_readonly_buftarg(log
->l_mp
->m_logdev_targp
))
1462 error
= xlog_clear_stale_blocks(log
, tail_lsn
);
1468 xfs_warn(log
->l_mp
, "failed to locate log tail");
1473 * Is the log zeroed at all?
1475 * The last binary search should be changed to perform an X block read
1476 * once X becomes small enough. You can then search linearly through
1477 * the X blocks. This will cut down on the number of reads we need to do.
1479 * If the log is partially zeroed, this routine will pass back the blkno
1480 * of the first block with cycle number 0. It won't have a complete LR
1484 * 0 => the log is completely written to
1485 * 1 => use *blk_no as the first block of the log
1486 * <0 => error has occurred
1491 xfs_daddr_t
*blk_no
)
1495 uint first_cycle
, last_cycle
;
1496 xfs_daddr_t new_blk
, last_blk
, start_blk
;
1497 xfs_daddr_t num_scan_bblks
;
1498 int error
, log_bbnum
= log
->l_logBBsize
;
1502 /* check totally zeroed log */
1503 bp
= xlog_get_bp(log
, 1);
1506 error
= xlog_bread(log
, 0, 1, bp
, &offset
);
1510 first_cycle
= xlog_get_cycle(offset
);
1511 if (first_cycle
== 0) { /* completely zeroed log */
1517 /* check partially zeroed log */
1518 error
= xlog_bread(log
, log_bbnum
-1, 1, bp
, &offset
);
1522 last_cycle
= xlog_get_cycle(offset
);
1523 if (last_cycle
!= 0) { /* log completely written to */
1526 } else if (first_cycle
!= 1) {
1528 * If the cycle of the last block is zero, the cycle of
1529 * the first block must be 1. If it's not, maybe we're
1530 * not looking at a log... Bail out.
1533 "Log inconsistent or not a log (last==0, first!=1)");
1538 /* we have a partially zeroed log */
1539 last_blk
= log_bbnum
-1;
1540 if ((error
= xlog_find_cycle_start(log
, bp
, 0, &last_blk
, 0)))
1544 * Validate the answer. Because there is no way to guarantee that
1545 * the entire log is made up of log records which are the same size,
1546 * we scan over the defined maximum blocks. At this point, the maximum
1547 * is not chosen to mean anything special. XXXmiken
1549 num_scan_bblks
= XLOG_TOTAL_REC_SHIFT(log
);
1550 ASSERT(num_scan_bblks
<= INT_MAX
);
1552 if (last_blk
< num_scan_bblks
)
1553 num_scan_bblks
= last_blk
;
1554 start_blk
= last_blk
- num_scan_bblks
;
1557 * We search for any instances of cycle number 0 that occur before
1558 * our current estimate of the head. What we're trying to detect is
1559 * 1 ... | 0 | 1 | 0...
1560 * ^ binary search ends here
1562 if ((error
= xlog_find_verify_cycle(log
, start_blk
,
1563 (int)num_scan_bblks
, 0, &new_blk
)))
1569 * Potentially backup over partial log record write. We don't need
1570 * to search the end of the log because we know it is zero.
1572 error
= xlog_find_verify_log_record(log
, start_blk
, &last_blk
, 0);
1587 * These are simple subroutines used by xlog_clear_stale_blocks() below
1588 * to initialize a buffer full of empty log record headers and write
1589 * them into the log.
1600 xlog_rec_header_t
*recp
= (xlog_rec_header_t
*)buf
;
1602 memset(buf
, 0, BBSIZE
);
1603 recp
->h_magicno
= cpu_to_be32(XLOG_HEADER_MAGIC_NUM
);
1604 recp
->h_cycle
= cpu_to_be32(cycle
);
1605 recp
->h_version
= cpu_to_be32(
1606 xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
) ? 2 : 1);
1607 recp
->h_lsn
= cpu_to_be64(xlog_assign_lsn(cycle
, block
));
1608 recp
->h_tail_lsn
= cpu_to_be64(xlog_assign_lsn(tail_cycle
, tail_block
));
1609 recp
->h_fmt
= cpu_to_be32(XLOG_FMT
);
1610 memcpy(&recp
->h_fs_uuid
, &log
->l_mp
->m_sb
.sb_uuid
, sizeof(uuid_t
));
1614 xlog_write_log_records(
1625 int sectbb
= log
->l_sectBBsize
;
1626 int end_block
= start_block
+ blocks
;
1632 * Greedily allocate a buffer big enough to handle the full
1633 * range of basic blocks to be written. If that fails, try
1634 * a smaller size. We need to be able to write at least a
1635 * log sector, or we're out of luck.
1637 bufblks
= 1 << ffs(blocks
);
1638 while (bufblks
> log
->l_logBBsize
)
1640 while (!(bp
= xlog_get_bp(log
, bufblks
))) {
1642 if (bufblks
< sectbb
)
1646 /* We may need to do a read at the start to fill in part of
1647 * the buffer in the starting sector not covered by the first
1650 balign
= round_down(start_block
, sectbb
);
1651 if (balign
!= start_block
) {
1652 error
= xlog_bread_noalign(log
, start_block
, 1, bp
);
1656 j
= start_block
- balign
;
1659 for (i
= start_block
; i
< end_block
; i
+= bufblks
) {
1660 int bcount
, endcount
;
1662 bcount
= min(bufblks
, end_block
- start_block
);
1663 endcount
= bcount
- j
;
1665 /* We may need to do a read at the end to fill in part of
1666 * the buffer in the final sector not covered by the write.
1667 * If this is the same sector as the above read, skip it.
1669 ealign
= round_down(end_block
, sectbb
);
1670 if (j
== 0 && (start_block
+ endcount
> ealign
)) {
1671 offset
= bp
->b_addr
+ BBTOB(ealign
- start_block
);
1672 error
= xlog_bread_offset(log
, ealign
, sectbb
,
1679 offset
= xlog_align(log
, start_block
, endcount
, bp
);
1680 for (; j
< endcount
; j
++) {
1681 xlog_add_record(log
, offset
, cycle
, i
+j
,
1682 tail_cycle
, tail_block
);
1685 error
= xlog_bwrite(log
, start_block
, endcount
, bp
);
1688 start_block
+= endcount
;
1698 * This routine is called to blow away any incomplete log writes out
1699 * in front of the log head. We do this so that we won't become confused
1700 * if we come up, write only a little bit more, and then crash again.
1701 * If we leave the partial log records out there, this situation could
1702 * cause us to think those partial writes are valid blocks since they
1703 * have the current cycle number. We get rid of them by overwriting them
1704 * with empty log records with the old cycle number rather than the
1707 * The tail lsn is passed in rather than taken from
1708 * the log so that we will not write over the unmount record after a
1709 * clean unmount in a 512 block log. Doing so would leave the log without
1710 * any valid log records in it until a new one was written. If we crashed
1711 * during that time we would not be able to recover.
1714 xlog_clear_stale_blocks(
1718 int tail_cycle
, head_cycle
;
1719 int tail_block
, head_block
;
1720 int tail_distance
, max_distance
;
1724 tail_cycle
= CYCLE_LSN(tail_lsn
);
1725 tail_block
= BLOCK_LSN(tail_lsn
);
1726 head_cycle
= log
->l_curr_cycle
;
1727 head_block
= log
->l_curr_block
;
1730 * Figure out the distance between the new head of the log
1731 * and the tail. We want to write over any blocks beyond the
1732 * head that we may have written just before the crash, but
1733 * we don't want to overwrite the tail of the log.
1735 if (head_cycle
== tail_cycle
) {
1737 * The tail is behind the head in the physical log,
1738 * so the distance from the head to the tail is the
1739 * distance from the head to the end of the log plus
1740 * the distance from the beginning of the log to the
1743 if (unlikely(head_block
< tail_block
|| head_block
>= log
->l_logBBsize
)) {
1744 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1745 XFS_ERRLEVEL_LOW
, log
->l_mp
);
1746 return -EFSCORRUPTED
;
1748 tail_distance
= tail_block
+ (log
->l_logBBsize
- head_block
);
1751 * The head is behind the tail in the physical log,
1752 * so the distance from the head to the tail is just
1753 * the tail block minus the head block.
1755 if (unlikely(head_block
>= tail_block
|| head_cycle
!= (tail_cycle
+ 1))){
1756 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1757 XFS_ERRLEVEL_LOW
, log
->l_mp
);
1758 return -EFSCORRUPTED
;
1760 tail_distance
= tail_block
- head_block
;
1764 * If the head is right up against the tail, we can't clear
1767 if (tail_distance
<= 0) {
1768 ASSERT(tail_distance
== 0);
1772 max_distance
= XLOG_TOTAL_REC_SHIFT(log
);
1774 * Take the smaller of the maximum amount of outstanding I/O
1775 * we could have and the distance to the tail to clear out.
1776 * We take the smaller so that we don't overwrite the tail and
1777 * we don't waste all day writing from the head to the tail
1780 max_distance
= MIN(max_distance
, tail_distance
);
1782 if ((head_block
+ max_distance
) <= log
->l_logBBsize
) {
1784 * We can stomp all the blocks we need to without
1785 * wrapping around the end of the log. Just do it
1786 * in a single write. Use the cycle number of the
1787 * current cycle minus one so that the log will look like:
1790 error
= xlog_write_log_records(log
, (head_cycle
- 1),
1791 head_block
, max_distance
, tail_cycle
,
1797 * We need to wrap around the end of the physical log in
1798 * order to clear all the blocks. Do it in two separate
1799 * I/Os. The first write should be from the head to the
1800 * end of the physical log, and it should use the current
1801 * cycle number minus one just like above.
1803 distance
= log
->l_logBBsize
- head_block
;
1804 error
= xlog_write_log_records(log
, (head_cycle
- 1),
1805 head_block
, distance
, tail_cycle
,
1812 * Now write the blocks at the start of the physical log.
1813 * This writes the remainder of the blocks we want to clear.
1814 * It uses the current cycle number since we're now on the
1815 * same cycle as the head so that we get:
1816 * n ... n ... | n - 1 ...
1817 * ^^^^^ blocks we're writing
1819 distance
= max_distance
- (log
->l_logBBsize
- head_block
);
1820 error
= xlog_write_log_records(log
, head_cycle
, 0, distance
,
1821 tail_cycle
, tail_block
);
1829 /******************************************************************************
1831 * Log recover routines
1833 ******************************************************************************
1837 * Sort the log items in the transaction.
1839 * The ordering constraints are defined by the inode allocation and unlink
1840 * behaviour. The rules are:
1842 * 1. Every item is only logged once in a given transaction. Hence it
1843 * represents the last logged state of the item. Hence ordering is
1844 * dependent on the order in which operations need to be performed so
1845 * required initial conditions are always met.
1847 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1848 * there's nothing to replay from them so we can simply cull them
1849 * from the transaction. However, we can't do that until after we've
1850 * replayed all the other items because they may be dependent on the
1851 * cancelled buffer and replaying the cancelled buffer can remove it
1852 * form the cancelled buffer table. Hence they have tobe done last.
1854 * 3. Inode allocation buffers must be replayed before inode items that
1855 * read the buffer and replay changes into it. For filesystems using the
1856 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1857 * treated the same as inode allocation buffers as they create and
1858 * initialise the buffers directly.
1860 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1861 * This ensures that inodes are completely flushed to the inode buffer
1862 * in a "free" state before we remove the unlinked inode list pointer.
1864 * Hence the ordering needs to be inode allocation buffers first, inode items
1865 * second, inode unlink buffers third and cancelled buffers last.
1867 * But there's a problem with that - we can't tell an inode allocation buffer
1868 * apart from a regular buffer, so we can't separate them. We can, however,
1869 * tell an inode unlink buffer from the others, and so we can separate them out
1870 * from all the other buffers and move them to last.
1872 * Hence, 4 lists, in order from head to tail:
1873 * - buffer_list for all buffers except cancelled/inode unlink buffers
1874 * - item_list for all non-buffer items
1875 * - inode_buffer_list for inode unlink buffers
1876 * - cancel_list for the cancelled buffers
1878 * Note that we add objects to the tail of the lists so that first-to-last
1879 * ordering is preserved within the lists. Adding objects to the head of the
1880 * list means when we traverse from the head we walk them in last-to-first
1881 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1882 * but for all other items there may be specific ordering that we need to
1886 xlog_recover_reorder_trans(
1888 struct xlog_recover
*trans
,
1891 xlog_recover_item_t
*item
, *n
;
1893 LIST_HEAD(sort_list
);
1894 LIST_HEAD(cancel_list
);
1895 LIST_HEAD(buffer_list
);
1896 LIST_HEAD(inode_buffer_list
);
1897 LIST_HEAD(inode_list
);
1899 list_splice_init(&trans
->r_itemq
, &sort_list
);
1900 list_for_each_entry_safe(item
, n
, &sort_list
, ri_list
) {
1901 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
1903 switch (ITEM_TYPE(item
)) {
1904 case XFS_LI_ICREATE
:
1905 list_move_tail(&item
->ri_list
, &buffer_list
);
1908 if (buf_f
->blf_flags
& XFS_BLF_CANCEL
) {
1909 trace_xfs_log_recover_item_reorder_head(log
,
1911 list_move(&item
->ri_list
, &cancel_list
);
1914 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
) {
1915 list_move(&item
->ri_list
, &inode_buffer_list
);
1918 list_move_tail(&item
->ri_list
, &buffer_list
);
1922 case XFS_LI_QUOTAOFF
:
1927 trace_xfs_log_recover_item_reorder_tail(log
,
1929 list_move_tail(&item
->ri_list
, &inode_list
);
1933 "%s: unrecognized type of log operation",
1937 * return the remaining items back to the transaction
1938 * item list so they can be freed in caller.
1940 if (!list_empty(&sort_list
))
1941 list_splice_init(&sort_list
, &trans
->r_itemq
);
1947 ASSERT(list_empty(&sort_list
));
1948 if (!list_empty(&buffer_list
))
1949 list_splice(&buffer_list
, &trans
->r_itemq
);
1950 if (!list_empty(&inode_list
))
1951 list_splice_tail(&inode_list
, &trans
->r_itemq
);
1952 if (!list_empty(&inode_buffer_list
))
1953 list_splice_tail(&inode_buffer_list
, &trans
->r_itemq
);
1954 if (!list_empty(&cancel_list
))
1955 list_splice_tail(&cancel_list
, &trans
->r_itemq
);
1960 * Build up the table of buf cancel records so that we don't replay
1961 * cancelled data in the second pass. For buffer records that are
1962 * not cancel records, there is nothing to do here so we just return.
1964 * If we get a cancel record which is already in the table, this indicates
1965 * that the buffer was cancelled multiple times. In order to ensure
1966 * that during pass 2 we keep the record in the table until we reach its
1967 * last occurrence in the log, we keep a reference count in the cancel
1968 * record in the table to tell us how many times we expect to see this
1969 * record during the second pass.
1972 xlog_recover_buffer_pass1(
1974 struct xlog_recover_item
*item
)
1976 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
1977 struct list_head
*bucket
;
1978 struct xfs_buf_cancel
*bcp
;
1981 * If this isn't a cancel buffer item, then just return.
1983 if (!(buf_f
->blf_flags
& XFS_BLF_CANCEL
)) {
1984 trace_xfs_log_recover_buf_not_cancel(log
, buf_f
);
1989 * Insert an xfs_buf_cancel record into the hash table of them.
1990 * If there is already an identical record, bump its reference count.
1992 bucket
= XLOG_BUF_CANCEL_BUCKET(log
, buf_f
->blf_blkno
);
1993 list_for_each_entry(bcp
, bucket
, bc_list
) {
1994 if (bcp
->bc_blkno
== buf_f
->blf_blkno
&&
1995 bcp
->bc_len
== buf_f
->blf_len
) {
1997 trace_xfs_log_recover_buf_cancel_ref_inc(log
, buf_f
);
2002 bcp
= kmem_alloc(sizeof(struct xfs_buf_cancel
), KM_SLEEP
);
2003 bcp
->bc_blkno
= buf_f
->blf_blkno
;
2004 bcp
->bc_len
= buf_f
->blf_len
;
2005 bcp
->bc_refcount
= 1;
2006 list_add_tail(&bcp
->bc_list
, bucket
);
2008 trace_xfs_log_recover_buf_cancel_add(log
, buf_f
);
2013 * Check to see whether the buffer being recovered has a corresponding
2014 * entry in the buffer cancel record table. If it is, return the cancel
2015 * buffer structure to the caller.
2017 STATIC
struct xfs_buf_cancel
*
2018 xlog_peek_buffer_cancelled(
2024 struct list_head
*bucket
;
2025 struct xfs_buf_cancel
*bcp
;
2027 if (!log
->l_buf_cancel_table
) {
2028 /* empty table means no cancelled buffers in the log */
2029 ASSERT(!(flags
& XFS_BLF_CANCEL
));
2033 bucket
= XLOG_BUF_CANCEL_BUCKET(log
, blkno
);
2034 list_for_each_entry(bcp
, bucket
, bc_list
) {
2035 if (bcp
->bc_blkno
== blkno
&& bcp
->bc_len
== len
)
2040 * We didn't find a corresponding entry in the table, so return 0 so
2041 * that the buffer is NOT cancelled.
2043 ASSERT(!(flags
& XFS_BLF_CANCEL
));
2048 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2049 * otherwise return 0. If the buffer is actually a buffer cancel item
2050 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2051 * table and remove it from the table if this is the last reference.
2053 * We remove the cancel record from the table when we encounter its last
2054 * occurrence in the log so that if the same buffer is re-used again after its
2055 * last cancellation we actually replay the changes made at that point.
2058 xlog_check_buffer_cancelled(
2064 struct xfs_buf_cancel
*bcp
;
2066 bcp
= xlog_peek_buffer_cancelled(log
, blkno
, len
, flags
);
2071 * We've go a match, so return 1 so that the recovery of this buffer
2072 * is cancelled. If this buffer is actually a buffer cancel log
2073 * item, then decrement the refcount on the one in the table and
2074 * remove it if this is the last reference.
2076 if (flags
& XFS_BLF_CANCEL
) {
2077 if (--bcp
->bc_refcount
== 0) {
2078 list_del(&bcp
->bc_list
);
2086 * Perform recovery for a buffer full of inodes. In these buffers, the only
2087 * data which should be recovered is that which corresponds to the
2088 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2089 * data for the inodes is always logged through the inodes themselves rather
2090 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2092 * The only time when buffers full of inodes are fully recovered is when the
2093 * buffer is full of newly allocated inodes. In this case the buffer will
2094 * not be marked as an inode buffer and so will be sent to
2095 * xlog_recover_do_reg_buffer() below during recovery.
2098 xlog_recover_do_inode_buffer(
2099 struct xfs_mount
*mp
,
2100 xlog_recover_item_t
*item
,
2102 xfs_buf_log_format_t
*buf_f
)
2108 int reg_buf_offset
= 0;
2109 int reg_buf_bytes
= 0;
2110 int next_unlinked_offset
;
2112 xfs_agino_t
*logged_nextp
;
2113 xfs_agino_t
*buffer_nextp
;
2115 trace_xfs_log_recover_buf_inode_buf(mp
->m_log
, buf_f
);
2118 * Post recovery validation only works properly on CRC enabled
2121 if (xfs_sb_version_hascrc(&mp
->m_sb
))
2122 bp
->b_ops
= &xfs_inode_buf_ops
;
2124 inodes_per_buf
= BBTOB(bp
->b_io_length
) >> mp
->m_sb
.sb_inodelog
;
2125 for (i
= 0; i
< inodes_per_buf
; i
++) {
2126 next_unlinked_offset
= (i
* mp
->m_sb
.sb_inodesize
) +
2127 offsetof(xfs_dinode_t
, di_next_unlinked
);
2129 while (next_unlinked_offset
>=
2130 (reg_buf_offset
+ reg_buf_bytes
)) {
2132 * The next di_next_unlinked field is beyond
2133 * the current logged region. Find the next
2134 * logged region that contains or is beyond
2135 * the current di_next_unlinked field.
2138 bit
= xfs_next_bit(buf_f
->blf_data_map
,
2139 buf_f
->blf_map_size
, bit
);
2142 * If there are no more logged regions in the
2143 * buffer, then we're done.
2148 nbits
= xfs_contig_bits(buf_f
->blf_data_map
,
2149 buf_f
->blf_map_size
, bit
);
2151 reg_buf_offset
= bit
<< XFS_BLF_SHIFT
;
2152 reg_buf_bytes
= nbits
<< XFS_BLF_SHIFT
;
2157 * If the current logged region starts after the current
2158 * di_next_unlinked field, then move on to the next
2159 * di_next_unlinked field.
2161 if (next_unlinked_offset
< reg_buf_offset
)
2164 ASSERT(item
->ri_buf
[item_index
].i_addr
!= NULL
);
2165 ASSERT((item
->ri_buf
[item_index
].i_len
% XFS_BLF_CHUNK
) == 0);
2166 ASSERT((reg_buf_offset
+ reg_buf_bytes
) <=
2167 BBTOB(bp
->b_io_length
));
2170 * The current logged region contains a copy of the
2171 * current di_next_unlinked field. Extract its value
2172 * and copy it to the buffer copy.
2174 logged_nextp
= item
->ri_buf
[item_index
].i_addr
+
2175 next_unlinked_offset
- reg_buf_offset
;
2176 if (unlikely(*logged_nextp
== 0)) {
2178 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2179 "Trying to replay bad (0) inode di_next_unlinked field.",
2181 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2182 XFS_ERRLEVEL_LOW
, mp
);
2183 return -EFSCORRUPTED
;
2186 buffer_nextp
= xfs_buf_offset(bp
, next_unlinked_offset
);
2187 *buffer_nextp
= *logged_nextp
;
2190 * If necessary, recalculate the CRC in the on-disk inode. We
2191 * have to leave the inode in a consistent state for whoever
2194 xfs_dinode_calc_crc(mp
,
2195 xfs_buf_offset(bp
, i
* mp
->m_sb
.sb_inodesize
));
2203 * V5 filesystems know the age of the buffer on disk being recovered. We can
2204 * have newer objects on disk than we are replaying, and so for these cases we
2205 * don't want to replay the current change as that will make the buffer contents
2206 * temporarily invalid on disk.
2208 * The magic number might not match the buffer type we are going to recover
2209 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2210 * extract the LSN of the existing object in the buffer based on it's current
2211 * magic number. If we don't recognise the magic number in the buffer, then
2212 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2213 * so can recover the buffer.
2215 * Note: we cannot rely solely on magic number matches to determine that the
2216 * buffer has a valid LSN - we also need to verify that it belongs to this
2217 * filesystem, so we need to extract the object's LSN and compare it to that
2218 * which we read from the superblock. If the UUIDs don't match, then we've got a
2219 * stale metadata block from an old filesystem instance that we need to recover
2223 xlog_recover_get_buf_lsn(
2224 struct xfs_mount
*mp
,
2230 void *blk
= bp
->b_addr
;
2234 /* v4 filesystems always recover immediately */
2235 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
2236 goto recover_immediately
;
2238 magic32
= be32_to_cpu(*(__be32
*)blk
);
2240 case XFS_ABTB_CRC_MAGIC
:
2241 case XFS_ABTC_CRC_MAGIC
:
2242 case XFS_ABTB_MAGIC
:
2243 case XFS_ABTC_MAGIC
:
2244 case XFS_RMAP_CRC_MAGIC
:
2245 case XFS_IBT_CRC_MAGIC
:
2246 case XFS_IBT_MAGIC
: {
2247 struct xfs_btree_block
*btb
= blk
;
2249 lsn
= be64_to_cpu(btb
->bb_u
.s
.bb_lsn
);
2250 uuid
= &btb
->bb_u
.s
.bb_uuid
;
2253 case XFS_BMAP_CRC_MAGIC
:
2254 case XFS_BMAP_MAGIC
: {
2255 struct xfs_btree_block
*btb
= blk
;
2257 lsn
= be64_to_cpu(btb
->bb_u
.l
.bb_lsn
);
2258 uuid
= &btb
->bb_u
.l
.bb_uuid
;
2262 lsn
= be64_to_cpu(((struct xfs_agf
*)blk
)->agf_lsn
);
2263 uuid
= &((struct xfs_agf
*)blk
)->agf_uuid
;
2265 case XFS_AGFL_MAGIC
:
2266 lsn
= be64_to_cpu(((struct xfs_agfl
*)blk
)->agfl_lsn
);
2267 uuid
= &((struct xfs_agfl
*)blk
)->agfl_uuid
;
2270 lsn
= be64_to_cpu(((struct xfs_agi
*)blk
)->agi_lsn
);
2271 uuid
= &((struct xfs_agi
*)blk
)->agi_uuid
;
2273 case XFS_SYMLINK_MAGIC
:
2274 lsn
= be64_to_cpu(((struct xfs_dsymlink_hdr
*)blk
)->sl_lsn
);
2275 uuid
= &((struct xfs_dsymlink_hdr
*)blk
)->sl_uuid
;
2277 case XFS_DIR3_BLOCK_MAGIC
:
2278 case XFS_DIR3_DATA_MAGIC
:
2279 case XFS_DIR3_FREE_MAGIC
:
2280 lsn
= be64_to_cpu(((struct xfs_dir3_blk_hdr
*)blk
)->lsn
);
2281 uuid
= &((struct xfs_dir3_blk_hdr
*)blk
)->uuid
;
2283 case XFS_ATTR3_RMT_MAGIC
:
2285 * Remote attr blocks are written synchronously, rather than
2286 * being logged. That means they do not contain a valid LSN
2287 * (i.e. transactionally ordered) in them, and hence any time we
2288 * see a buffer to replay over the top of a remote attribute
2289 * block we should simply do so.
2291 goto recover_immediately
;
2294 * superblock uuids are magic. We may or may not have a
2295 * sb_meta_uuid on disk, but it will be set in the in-core
2296 * superblock. We set the uuid pointer for verification
2297 * according to the superblock feature mask to ensure we check
2298 * the relevant UUID in the superblock.
2300 lsn
= be64_to_cpu(((struct xfs_dsb
*)blk
)->sb_lsn
);
2301 if (xfs_sb_version_hasmetauuid(&mp
->m_sb
))
2302 uuid
= &((struct xfs_dsb
*)blk
)->sb_meta_uuid
;
2304 uuid
= &((struct xfs_dsb
*)blk
)->sb_uuid
;
2310 if (lsn
!= (xfs_lsn_t
)-1) {
2311 if (!uuid_equal(&mp
->m_sb
.sb_meta_uuid
, uuid
))
2312 goto recover_immediately
;
2316 magicda
= be16_to_cpu(((struct xfs_da_blkinfo
*)blk
)->magic
);
2318 case XFS_DIR3_LEAF1_MAGIC
:
2319 case XFS_DIR3_LEAFN_MAGIC
:
2320 case XFS_DA3_NODE_MAGIC
:
2321 lsn
= be64_to_cpu(((struct xfs_da3_blkinfo
*)blk
)->lsn
);
2322 uuid
= &((struct xfs_da3_blkinfo
*)blk
)->uuid
;
2328 if (lsn
!= (xfs_lsn_t
)-1) {
2329 if (!uuid_equal(&mp
->m_sb
.sb_uuid
, uuid
))
2330 goto recover_immediately
;
2335 * We do individual object checks on dquot and inode buffers as they
2336 * have their own individual LSN records. Also, we could have a stale
2337 * buffer here, so we have to at least recognise these buffer types.
2339 * A notd complexity here is inode unlinked list processing - it logs
2340 * the inode directly in the buffer, but we don't know which inodes have
2341 * been modified, and there is no global buffer LSN. Hence we need to
2342 * recover all inode buffer types immediately. This problem will be
2343 * fixed by logical logging of the unlinked list modifications.
2345 magic16
= be16_to_cpu(*(__be16
*)blk
);
2347 case XFS_DQUOT_MAGIC
:
2348 case XFS_DINODE_MAGIC
:
2349 goto recover_immediately
;
2354 /* unknown buffer contents, recover immediately */
2356 recover_immediately
:
2357 return (xfs_lsn_t
)-1;
2362 * Validate the recovered buffer is of the correct type and attach the
2363 * appropriate buffer operations to them for writeback. Magic numbers are in a
2365 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2366 * the first 32 bits of the buffer (most blocks),
2367 * inside a struct xfs_da_blkinfo at the start of the buffer.
2370 xlog_recover_validate_buf_type(
2371 struct xfs_mount
*mp
,
2373 xfs_buf_log_format_t
*buf_f
,
2374 xfs_lsn_t current_lsn
)
2376 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
2380 char *warnmsg
= NULL
;
2383 * We can only do post recovery validation on items on CRC enabled
2384 * fielsystems as we need to know when the buffer was written to be able
2385 * to determine if we should have replayed the item. If we replay old
2386 * metadata over a newer buffer, then it will enter a temporarily
2387 * inconsistent state resulting in verification failures. Hence for now
2388 * just avoid the verification stage for non-crc filesystems
2390 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
2393 magic32
= be32_to_cpu(*(__be32
*)bp
->b_addr
);
2394 magic16
= be16_to_cpu(*(__be16
*)bp
->b_addr
);
2395 magicda
= be16_to_cpu(info
->magic
);
2396 switch (xfs_blft_from_flags(buf_f
)) {
2397 case XFS_BLFT_BTREE_BUF
:
2399 case XFS_ABTB_CRC_MAGIC
:
2400 case XFS_ABTC_CRC_MAGIC
:
2401 case XFS_ABTB_MAGIC
:
2402 case XFS_ABTC_MAGIC
:
2403 bp
->b_ops
= &xfs_allocbt_buf_ops
;
2405 case XFS_IBT_CRC_MAGIC
:
2406 case XFS_FIBT_CRC_MAGIC
:
2408 case XFS_FIBT_MAGIC
:
2409 bp
->b_ops
= &xfs_inobt_buf_ops
;
2411 case XFS_BMAP_CRC_MAGIC
:
2412 case XFS_BMAP_MAGIC
:
2413 bp
->b_ops
= &xfs_bmbt_buf_ops
;
2415 case XFS_RMAP_CRC_MAGIC
:
2416 bp
->b_ops
= &xfs_rmapbt_buf_ops
;
2419 warnmsg
= "Bad btree block magic!";
2423 case XFS_BLFT_AGF_BUF
:
2424 if (magic32
!= XFS_AGF_MAGIC
) {
2425 warnmsg
= "Bad AGF block magic!";
2428 bp
->b_ops
= &xfs_agf_buf_ops
;
2430 case XFS_BLFT_AGFL_BUF
:
2431 if (magic32
!= XFS_AGFL_MAGIC
) {
2432 warnmsg
= "Bad AGFL block magic!";
2435 bp
->b_ops
= &xfs_agfl_buf_ops
;
2437 case XFS_BLFT_AGI_BUF
:
2438 if (magic32
!= XFS_AGI_MAGIC
) {
2439 warnmsg
= "Bad AGI block magic!";
2442 bp
->b_ops
= &xfs_agi_buf_ops
;
2444 case XFS_BLFT_UDQUOT_BUF
:
2445 case XFS_BLFT_PDQUOT_BUF
:
2446 case XFS_BLFT_GDQUOT_BUF
:
2447 #ifdef CONFIG_XFS_QUOTA
2448 if (magic16
!= XFS_DQUOT_MAGIC
) {
2449 warnmsg
= "Bad DQUOT block magic!";
2452 bp
->b_ops
= &xfs_dquot_buf_ops
;
2455 "Trying to recover dquots without QUOTA support built in!");
2459 case XFS_BLFT_DINO_BUF
:
2460 if (magic16
!= XFS_DINODE_MAGIC
) {
2461 warnmsg
= "Bad INODE block magic!";
2464 bp
->b_ops
= &xfs_inode_buf_ops
;
2466 case XFS_BLFT_SYMLINK_BUF
:
2467 if (magic32
!= XFS_SYMLINK_MAGIC
) {
2468 warnmsg
= "Bad symlink block magic!";
2471 bp
->b_ops
= &xfs_symlink_buf_ops
;
2473 case XFS_BLFT_DIR_BLOCK_BUF
:
2474 if (magic32
!= XFS_DIR2_BLOCK_MAGIC
&&
2475 magic32
!= XFS_DIR3_BLOCK_MAGIC
) {
2476 warnmsg
= "Bad dir block magic!";
2479 bp
->b_ops
= &xfs_dir3_block_buf_ops
;
2481 case XFS_BLFT_DIR_DATA_BUF
:
2482 if (magic32
!= XFS_DIR2_DATA_MAGIC
&&
2483 magic32
!= XFS_DIR3_DATA_MAGIC
) {
2484 warnmsg
= "Bad dir data magic!";
2487 bp
->b_ops
= &xfs_dir3_data_buf_ops
;
2489 case XFS_BLFT_DIR_FREE_BUF
:
2490 if (magic32
!= XFS_DIR2_FREE_MAGIC
&&
2491 magic32
!= XFS_DIR3_FREE_MAGIC
) {
2492 warnmsg
= "Bad dir3 free magic!";
2495 bp
->b_ops
= &xfs_dir3_free_buf_ops
;
2497 case XFS_BLFT_DIR_LEAF1_BUF
:
2498 if (magicda
!= XFS_DIR2_LEAF1_MAGIC
&&
2499 magicda
!= XFS_DIR3_LEAF1_MAGIC
) {
2500 warnmsg
= "Bad dir leaf1 magic!";
2503 bp
->b_ops
= &xfs_dir3_leaf1_buf_ops
;
2505 case XFS_BLFT_DIR_LEAFN_BUF
:
2506 if (magicda
!= XFS_DIR2_LEAFN_MAGIC
&&
2507 magicda
!= XFS_DIR3_LEAFN_MAGIC
) {
2508 warnmsg
= "Bad dir leafn magic!";
2511 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
2513 case XFS_BLFT_DA_NODE_BUF
:
2514 if (magicda
!= XFS_DA_NODE_MAGIC
&&
2515 magicda
!= XFS_DA3_NODE_MAGIC
) {
2516 warnmsg
= "Bad da node magic!";
2519 bp
->b_ops
= &xfs_da3_node_buf_ops
;
2521 case XFS_BLFT_ATTR_LEAF_BUF
:
2522 if (magicda
!= XFS_ATTR_LEAF_MAGIC
&&
2523 magicda
!= XFS_ATTR3_LEAF_MAGIC
) {
2524 warnmsg
= "Bad attr leaf magic!";
2527 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
2529 case XFS_BLFT_ATTR_RMT_BUF
:
2530 if (magic32
!= XFS_ATTR3_RMT_MAGIC
) {
2531 warnmsg
= "Bad attr remote magic!";
2534 bp
->b_ops
= &xfs_attr3_rmt_buf_ops
;
2536 case XFS_BLFT_SB_BUF
:
2537 if (magic32
!= XFS_SB_MAGIC
) {
2538 warnmsg
= "Bad SB block magic!";
2541 bp
->b_ops
= &xfs_sb_buf_ops
;
2543 #ifdef CONFIG_XFS_RT
2544 case XFS_BLFT_RTBITMAP_BUF
:
2545 case XFS_BLFT_RTSUMMARY_BUF
:
2546 /* no magic numbers for verification of RT buffers */
2547 bp
->b_ops
= &xfs_rtbuf_ops
;
2549 #endif /* CONFIG_XFS_RT */
2551 xfs_warn(mp
, "Unknown buffer type %d!",
2552 xfs_blft_from_flags(buf_f
));
2557 * Nothing else to do in the case of a NULL current LSN as this means
2558 * the buffer is more recent than the change in the log and will be
2561 if (current_lsn
== NULLCOMMITLSN
)
2565 xfs_warn(mp
, warnmsg
);
2570 * We must update the metadata LSN of the buffer as it is written out to
2571 * ensure that older transactions never replay over this one and corrupt
2572 * the buffer. This can occur if log recovery is interrupted at some
2573 * point after the current transaction completes, at which point a
2574 * subsequent mount starts recovery from the beginning.
2576 * Write verifiers update the metadata LSN from log items attached to
2577 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2578 * the verifier. We'll clean it up in our ->iodone() callback.
2581 struct xfs_buf_log_item
*bip
;
2583 ASSERT(!bp
->b_iodone
|| bp
->b_iodone
== xlog_recover_iodone
);
2584 bp
->b_iodone
= xlog_recover_iodone
;
2585 xfs_buf_item_init(bp
, mp
);
2587 bip
->bli_item
.li_lsn
= current_lsn
;
2592 * Perform a 'normal' buffer recovery. Each logged region of the
2593 * buffer should be copied over the corresponding region in the
2594 * given buffer. The bitmap in the buf log format structure indicates
2595 * where to place the logged data.
2598 xlog_recover_do_reg_buffer(
2599 struct xfs_mount
*mp
,
2600 xlog_recover_item_t
*item
,
2602 xfs_buf_log_format_t
*buf_f
,
2603 xfs_lsn_t current_lsn
)
2610 trace_xfs_log_recover_buf_reg_buf(mp
->m_log
, buf_f
);
2613 i
= 1; /* 0 is the buf format structure */
2615 bit
= xfs_next_bit(buf_f
->blf_data_map
,
2616 buf_f
->blf_map_size
, bit
);
2619 nbits
= xfs_contig_bits(buf_f
->blf_data_map
,
2620 buf_f
->blf_map_size
, bit
);
2622 ASSERT(item
->ri_buf
[i
].i_addr
!= NULL
);
2623 ASSERT(item
->ri_buf
[i
].i_len
% XFS_BLF_CHUNK
== 0);
2624 ASSERT(BBTOB(bp
->b_io_length
) >=
2625 ((uint
)bit
<< XFS_BLF_SHIFT
) + (nbits
<< XFS_BLF_SHIFT
));
2628 * The dirty regions logged in the buffer, even though
2629 * contiguous, may span multiple chunks. This is because the
2630 * dirty region may span a physical page boundary in a buffer
2631 * and hence be split into two separate vectors for writing into
2632 * the log. Hence we need to trim nbits back to the length of
2633 * the current region being copied out of the log.
2635 if (item
->ri_buf
[i
].i_len
< (nbits
<< XFS_BLF_SHIFT
))
2636 nbits
= item
->ri_buf
[i
].i_len
>> XFS_BLF_SHIFT
;
2639 * Do a sanity check if this is a dquot buffer. Just checking
2640 * the first dquot in the buffer should do. XXXThis is
2641 * probably a good thing to do for other buf types also.
2644 if (buf_f
->blf_flags
&
2645 (XFS_BLF_UDQUOT_BUF
|XFS_BLF_PDQUOT_BUF
|XFS_BLF_GDQUOT_BUF
)) {
2646 if (item
->ri_buf
[i
].i_addr
== NULL
) {
2648 "XFS: NULL dquot in %s.", __func__
);
2651 if (item
->ri_buf
[i
].i_len
< sizeof(xfs_disk_dquot_t
)) {
2653 "XFS: dquot too small (%d) in %s.",
2654 item
->ri_buf
[i
].i_len
, __func__
);
2657 error
= xfs_dqcheck(mp
, item
->ri_buf
[i
].i_addr
,
2658 -1, 0, XFS_QMOPT_DOWARN
,
2659 "dquot_buf_recover");
2664 memcpy(xfs_buf_offset(bp
,
2665 (uint
)bit
<< XFS_BLF_SHIFT
), /* dest */
2666 item
->ri_buf
[i
].i_addr
, /* source */
2667 nbits
<<XFS_BLF_SHIFT
); /* length */
2673 /* Shouldn't be any more regions */
2674 ASSERT(i
== item
->ri_total
);
2676 xlog_recover_validate_buf_type(mp
, bp
, buf_f
, current_lsn
);
2680 * Perform a dquot buffer recovery.
2681 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2682 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2683 * Else, treat it as a regular buffer and do recovery.
2685 * Return false if the buffer was tossed and true if we recovered the buffer to
2686 * indicate to the caller if the buffer needs writing.
2689 xlog_recover_do_dquot_buffer(
2690 struct xfs_mount
*mp
,
2692 struct xlog_recover_item
*item
,
2694 struct xfs_buf_log_format
*buf_f
)
2698 trace_xfs_log_recover_buf_dquot_buf(log
, buf_f
);
2701 * Filesystems are required to send in quota flags at mount time.
2707 if (buf_f
->blf_flags
& XFS_BLF_UDQUOT_BUF
)
2708 type
|= XFS_DQ_USER
;
2709 if (buf_f
->blf_flags
& XFS_BLF_PDQUOT_BUF
)
2710 type
|= XFS_DQ_PROJ
;
2711 if (buf_f
->blf_flags
& XFS_BLF_GDQUOT_BUF
)
2712 type
|= XFS_DQ_GROUP
;
2714 * This type of quotas was turned off, so ignore this buffer
2716 if (log
->l_quotaoffs_flag
& type
)
2719 xlog_recover_do_reg_buffer(mp
, item
, bp
, buf_f
, NULLCOMMITLSN
);
2724 * This routine replays a modification made to a buffer at runtime.
2725 * There are actually two types of buffer, regular and inode, which
2726 * are handled differently. Inode buffers are handled differently
2727 * in that we only recover a specific set of data from them, namely
2728 * the inode di_next_unlinked fields. This is because all other inode
2729 * data is actually logged via inode records and any data we replay
2730 * here which overlaps that may be stale.
2732 * When meta-data buffers are freed at run time we log a buffer item
2733 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2734 * of the buffer in the log should not be replayed at recovery time.
2735 * This is so that if the blocks covered by the buffer are reused for
2736 * file data before we crash we don't end up replaying old, freed
2737 * meta-data into a user's file.
2739 * To handle the cancellation of buffer log items, we make two passes
2740 * over the log during recovery. During the first we build a table of
2741 * those buffers which have been cancelled, and during the second we
2742 * only replay those buffers which do not have corresponding cancel
2743 * records in the table. See xlog_recover_buffer_pass[1,2] above
2744 * for more details on the implementation of the table of cancel records.
2747 xlog_recover_buffer_pass2(
2749 struct list_head
*buffer_list
,
2750 struct xlog_recover_item
*item
,
2751 xfs_lsn_t current_lsn
)
2753 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
2754 xfs_mount_t
*mp
= log
->l_mp
;
2761 * In this pass we only want to recover all the buffers which have
2762 * not been cancelled and are not cancellation buffers themselves.
2764 if (xlog_check_buffer_cancelled(log
, buf_f
->blf_blkno
,
2765 buf_f
->blf_len
, buf_f
->blf_flags
)) {
2766 trace_xfs_log_recover_buf_cancel(log
, buf_f
);
2770 trace_xfs_log_recover_buf_recover(log
, buf_f
);
2773 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
)
2774 buf_flags
|= XBF_UNMAPPED
;
2776 bp
= xfs_buf_read(mp
->m_ddev_targp
, buf_f
->blf_blkno
, buf_f
->blf_len
,
2780 error
= bp
->b_error
;
2782 xfs_buf_ioerror_alert(bp
, "xlog_recover_do..(read#1)");
2787 * Recover the buffer only if we get an LSN from it and it's less than
2788 * the lsn of the transaction we are replaying.
2790 * Note that we have to be extremely careful of readahead here.
2791 * Readahead does not attach verfiers to the buffers so if we don't
2792 * actually do any replay after readahead because of the LSN we found
2793 * in the buffer if more recent than that current transaction then we
2794 * need to attach the verifier directly. Failure to do so can lead to
2795 * future recovery actions (e.g. EFI and unlinked list recovery) can
2796 * operate on the buffers and they won't get the verifier attached. This
2797 * can lead to blocks on disk having the correct content but a stale
2800 * It is safe to assume these clean buffers are currently up to date.
2801 * If the buffer is dirtied by a later transaction being replayed, then
2802 * the verifier will be reset to match whatever recover turns that
2805 lsn
= xlog_recover_get_buf_lsn(mp
, bp
);
2806 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
2807 trace_xfs_log_recover_buf_skip(log
, buf_f
);
2808 xlog_recover_validate_buf_type(mp
, bp
, buf_f
, NULLCOMMITLSN
);
2812 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
) {
2813 error
= xlog_recover_do_inode_buffer(mp
, item
, bp
, buf_f
);
2816 } else if (buf_f
->blf_flags
&
2817 (XFS_BLF_UDQUOT_BUF
|XFS_BLF_PDQUOT_BUF
|XFS_BLF_GDQUOT_BUF
)) {
2820 dirty
= xlog_recover_do_dquot_buffer(mp
, log
, item
, bp
, buf_f
);
2824 xlog_recover_do_reg_buffer(mp
, item
, bp
, buf_f
, current_lsn
);
2828 * Perform delayed write on the buffer. Asynchronous writes will be
2829 * slower when taking into account all the buffers to be flushed.
2831 * Also make sure that only inode buffers with good sizes stay in
2832 * the buffer cache. The kernel moves inodes in buffers of 1 block
2833 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2834 * buffers in the log can be a different size if the log was generated
2835 * by an older kernel using unclustered inode buffers or a newer kernel
2836 * running with a different inode cluster size. Regardless, if the
2837 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2838 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2839 * the buffer out of the buffer cache so that the buffer won't
2840 * overlap with future reads of those inodes.
2842 if (XFS_DINODE_MAGIC
==
2843 be16_to_cpu(*((__be16
*)xfs_buf_offset(bp
, 0))) &&
2844 (BBTOB(bp
->b_io_length
) != MAX(log
->l_mp
->m_sb
.sb_blocksize
,
2845 (__uint32_t
)log
->l_mp
->m_inode_cluster_size
))) {
2847 error
= xfs_bwrite(bp
);
2849 ASSERT(bp
->b_target
->bt_mount
== mp
);
2850 bp
->b_iodone
= xlog_recover_iodone
;
2851 xfs_buf_delwri_queue(bp
, buffer_list
);
2860 * Inode fork owner changes
2862 * If we have been told that we have to reparent the inode fork, it's because an
2863 * extent swap operation on a CRC enabled filesystem has been done and we are
2864 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2867 * The complexity here is that we don't have an inode context to work with, so
2868 * after we've replayed the inode we need to instantiate one. This is where the
2871 * We are in the middle of log recovery, so we can't run transactions. That
2872 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2873 * that will result in the corresponding iput() running the inode through
2874 * xfs_inactive(). If we've just replayed an inode core that changes the link
2875 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2876 * transactions (bad!).
2878 * So, to avoid this, we instantiate an inode directly from the inode core we've
2879 * just recovered. We have the buffer still locked, and all we really need to
2880 * instantiate is the inode core and the forks being modified. We can do this
2881 * manually, then run the inode btree owner change, and then tear down the
2882 * xfs_inode without having to run any transactions at all.
2884 * Also, because we don't have a transaction context available here but need to
2885 * gather all the buffers we modify for writeback so we pass the buffer_list
2886 * instead for the operation to use.
2890 xfs_recover_inode_owner_change(
2891 struct xfs_mount
*mp
,
2892 struct xfs_dinode
*dip
,
2893 struct xfs_inode_log_format
*in_f
,
2894 struct list_head
*buffer_list
)
2896 struct xfs_inode
*ip
;
2899 ASSERT(in_f
->ilf_fields
& (XFS_ILOG_DOWNER
|XFS_ILOG_AOWNER
));
2901 ip
= xfs_inode_alloc(mp
, in_f
->ilf_ino
);
2905 /* instantiate the inode */
2906 xfs_inode_from_disk(ip
, dip
);
2907 ASSERT(ip
->i_d
.di_version
>= 3);
2909 error
= xfs_iformat_fork(ip
, dip
);
2914 if (in_f
->ilf_fields
& XFS_ILOG_DOWNER
) {
2915 ASSERT(in_f
->ilf_fields
& XFS_ILOG_DBROOT
);
2916 error
= xfs_bmbt_change_owner(NULL
, ip
, XFS_DATA_FORK
,
2917 ip
->i_ino
, buffer_list
);
2922 if (in_f
->ilf_fields
& XFS_ILOG_AOWNER
) {
2923 ASSERT(in_f
->ilf_fields
& XFS_ILOG_ABROOT
);
2924 error
= xfs_bmbt_change_owner(NULL
, ip
, XFS_ATTR_FORK
,
2925 ip
->i_ino
, buffer_list
);
2936 xlog_recover_inode_pass2(
2938 struct list_head
*buffer_list
,
2939 struct xlog_recover_item
*item
,
2940 xfs_lsn_t current_lsn
)
2942 xfs_inode_log_format_t
*in_f
;
2943 xfs_mount_t
*mp
= log
->l_mp
;
2952 struct xfs_log_dinode
*ldip
;
2956 if (item
->ri_buf
[0].i_len
== sizeof(xfs_inode_log_format_t
)) {
2957 in_f
= item
->ri_buf
[0].i_addr
;
2959 in_f
= kmem_alloc(sizeof(xfs_inode_log_format_t
), KM_SLEEP
);
2961 error
= xfs_inode_item_format_convert(&item
->ri_buf
[0], in_f
);
2967 * Inode buffers can be freed, look out for it,
2968 * and do not replay the inode.
2970 if (xlog_check_buffer_cancelled(log
, in_f
->ilf_blkno
,
2971 in_f
->ilf_len
, 0)) {
2973 trace_xfs_log_recover_inode_cancel(log
, in_f
);
2976 trace_xfs_log_recover_inode_recover(log
, in_f
);
2978 bp
= xfs_buf_read(mp
->m_ddev_targp
, in_f
->ilf_blkno
, in_f
->ilf_len
, 0,
2979 &xfs_inode_buf_ops
);
2984 error
= bp
->b_error
;
2986 xfs_buf_ioerror_alert(bp
, "xlog_recover_do..(read#2)");
2989 ASSERT(in_f
->ilf_fields
& XFS_ILOG_CORE
);
2990 dip
= xfs_buf_offset(bp
, in_f
->ilf_boffset
);
2993 * Make sure the place we're flushing out to really looks
2996 if (unlikely(dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
))) {
2998 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2999 __func__
, dip
, bp
, in_f
->ilf_ino
);
3000 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
3001 XFS_ERRLEVEL_LOW
, mp
);
3002 error
= -EFSCORRUPTED
;
3005 ldip
= item
->ri_buf
[1].i_addr
;
3006 if (unlikely(ldip
->di_magic
!= XFS_DINODE_MAGIC
)) {
3008 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
3009 __func__
, item
, in_f
->ilf_ino
);
3010 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
3011 XFS_ERRLEVEL_LOW
, mp
);
3012 error
= -EFSCORRUPTED
;
3017 * If the inode has an LSN in it, recover the inode only if it's less
3018 * than the lsn of the transaction we are replaying. Note: we still
3019 * need to replay an owner change even though the inode is more recent
3020 * than the transaction as there is no guarantee that all the btree
3021 * blocks are more recent than this transaction, too.
3023 if (dip
->di_version
>= 3) {
3024 xfs_lsn_t lsn
= be64_to_cpu(dip
->di_lsn
);
3026 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
3027 trace_xfs_log_recover_inode_skip(log
, in_f
);
3029 goto out_owner_change
;
3034 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3035 * are transactional and if ordering is necessary we can determine that
3036 * more accurately by the LSN field in the V3 inode core. Don't trust
3037 * the inode versions we might be changing them here - use the
3038 * superblock flag to determine whether we need to look at di_flushiter
3039 * to skip replay when the on disk inode is newer than the log one
3041 if (!xfs_sb_version_hascrc(&mp
->m_sb
) &&
3042 ldip
->di_flushiter
< be16_to_cpu(dip
->di_flushiter
)) {
3044 * Deal with the wrap case, DI_MAX_FLUSH is less
3045 * than smaller numbers
3047 if (be16_to_cpu(dip
->di_flushiter
) == DI_MAX_FLUSH
&&
3048 ldip
->di_flushiter
< (DI_MAX_FLUSH
>> 1)) {
3051 trace_xfs_log_recover_inode_skip(log
, in_f
);
3057 /* Take the opportunity to reset the flush iteration count */
3058 ldip
->di_flushiter
= 0;
3060 if (unlikely(S_ISREG(ldip
->di_mode
))) {
3061 if ((ldip
->di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3062 (ldip
->di_format
!= XFS_DINODE_FMT_BTREE
)) {
3063 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3064 XFS_ERRLEVEL_LOW
, mp
, ldip
);
3066 "%s: Bad regular inode log record, rec ptr 0x%p, "
3067 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3068 __func__
, item
, dip
, bp
, in_f
->ilf_ino
);
3069 error
= -EFSCORRUPTED
;
3072 } else if (unlikely(S_ISDIR(ldip
->di_mode
))) {
3073 if ((ldip
->di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3074 (ldip
->di_format
!= XFS_DINODE_FMT_BTREE
) &&
3075 (ldip
->di_format
!= XFS_DINODE_FMT_LOCAL
)) {
3076 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3077 XFS_ERRLEVEL_LOW
, mp
, ldip
);
3079 "%s: Bad dir inode log record, rec ptr 0x%p, "
3080 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3081 __func__
, item
, dip
, bp
, in_f
->ilf_ino
);
3082 error
= -EFSCORRUPTED
;
3086 if (unlikely(ldip
->di_nextents
+ ldip
->di_anextents
> ldip
->di_nblocks
)){
3087 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3088 XFS_ERRLEVEL_LOW
, mp
, ldip
);
3090 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3091 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
3092 __func__
, item
, dip
, bp
, in_f
->ilf_ino
,
3093 ldip
->di_nextents
+ ldip
->di_anextents
,
3095 error
= -EFSCORRUPTED
;
3098 if (unlikely(ldip
->di_forkoff
> mp
->m_sb
.sb_inodesize
)) {
3099 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3100 XFS_ERRLEVEL_LOW
, mp
, ldip
);
3102 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3103 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__
,
3104 item
, dip
, bp
, in_f
->ilf_ino
, ldip
->di_forkoff
);
3105 error
= -EFSCORRUPTED
;
3108 isize
= xfs_log_dinode_size(ldip
->di_version
);
3109 if (unlikely(item
->ri_buf
[1].i_len
> isize
)) {
3110 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3111 XFS_ERRLEVEL_LOW
, mp
, ldip
);
3113 "%s: Bad inode log record length %d, rec ptr 0x%p",
3114 __func__
, item
->ri_buf
[1].i_len
, item
);
3115 error
= -EFSCORRUPTED
;
3119 /* recover the log dinode inode into the on disk inode */
3120 xfs_log_dinode_to_disk(ldip
, dip
);
3122 /* the rest is in on-disk format */
3123 if (item
->ri_buf
[1].i_len
> isize
) {
3124 memcpy((char *)dip
+ isize
,
3125 item
->ri_buf
[1].i_addr
+ isize
,
3126 item
->ri_buf
[1].i_len
- isize
);
3129 fields
= in_f
->ilf_fields
;
3130 switch (fields
& (XFS_ILOG_DEV
| XFS_ILOG_UUID
)) {
3132 xfs_dinode_put_rdev(dip
, in_f
->ilf_u
.ilfu_rdev
);
3135 memcpy(XFS_DFORK_DPTR(dip
),
3136 &in_f
->ilf_u
.ilfu_uuid
,
3141 if (in_f
->ilf_size
== 2)
3142 goto out_owner_change
;
3143 len
= item
->ri_buf
[2].i_len
;
3144 src
= item
->ri_buf
[2].i_addr
;
3145 ASSERT(in_f
->ilf_size
<= 4);
3146 ASSERT((in_f
->ilf_size
== 3) || (fields
& XFS_ILOG_AFORK
));
3147 ASSERT(!(fields
& XFS_ILOG_DFORK
) ||
3148 (len
== in_f
->ilf_dsize
));
3150 switch (fields
& XFS_ILOG_DFORK
) {
3151 case XFS_ILOG_DDATA
:
3153 memcpy(XFS_DFORK_DPTR(dip
), src
, len
);
3156 case XFS_ILOG_DBROOT
:
3157 xfs_bmbt_to_bmdr(mp
, (struct xfs_btree_block
*)src
, len
,
3158 (xfs_bmdr_block_t
*)XFS_DFORK_DPTR(dip
),
3159 XFS_DFORK_DSIZE(dip
, mp
));
3164 * There are no data fork flags set.
3166 ASSERT((fields
& XFS_ILOG_DFORK
) == 0);
3171 * If we logged any attribute data, recover it. There may or
3172 * may not have been any other non-core data logged in this
3175 if (in_f
->ilf_fields
& XFS_ILOG_AFORK
) {
3176 if (in_f
->ilf_fields
& XFS_ILOG_DFORK
) {
3181 len
= item
->ri_buf
[attr_index
].i_len
;
3182 src
= item
->ri_buf
[attr_index
].i_addr
;
3183 ASSERT(len
== in_f
->ilf_asize
);
3185 switch (in_f
->ilf_fields
& XFS_ILOG_AFORK
) {
3186 case XFS_ILOG_ADATA
:
3188 dest
= XFS_DFORK_APTR(dip
);
3189 ASSERT(len
<= XFS_DFORK_ASIZE(dip
, mp
));
3190 memcpy(dest
, src
, len
);
3193 case XFS_ILOG_ABROOT
:
3194 dest
= XFS_DFORK_APTR(dip
);
3195 xfs_bmbt_to_bmdr(mp
, (struct xfs_btree_block
*)src
,
3196 len
, (xfs_bmdr_block_t
*)dest
,
3197 XFS_DFORK_ASIZE(dip
, mp
));
3201 xfs_warn(log
->l_mp
, "%s: Invalid flag", __func__
);
3209 if (in_f
->ilf_fields
& (XFS_ILOG_DOWNER
|XFS_ILOG_AOWNER
))
3210 error
= xfs_recover_inode_owner_change(mp
, dip
, in_f
,
3212 /* re-generate the checksum. */
3213 xfs_dinode_calc_crc(log
->l_mp
, dip
);
3215 ASSERT(bp
->b_target
->bt_mount
== mp
);
3216 bp
->b_iodone
= xlog_recover_iodone
;
3217 xfs_buf_delwri_queue(bp
, buffer_list
);
3228 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3229 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3233 xlog_recover_quotaoff_pass1(
3235 struct xlog_recover_item
*item
)
3237 xfs_qoff_logformat_t
*qoff_f
= item
->ri_buf
[0].i_addr
;
3241 * The logitem format's flag tells us if this was user quotaoff,
3242 * group/project quotaoff or both.
3244 if (qoff_f
->qf_flags
& XFS_UQUOTA_ACCT
)
3245 log
->l_quotaoffs_flag
|= XFS_DQ_USER
;
3246 if (qoff_f
->qf_flags
& XFS_PQUOTA_ACCT
)
3247 log
->l_quotaoffs_flag
|= XFS_DQ_PROJ
;
3248 if (qoff_f
->qf_flags
& XFS_GQUOTA_ACCT
)
3249 log
->l_quotaoffs_flag
|= XFS_DQ_GROUP
;
3255 * Recover a dquot record
3258 xlog_recover_dquot_pass2(
3260 struct list_head
*buffer_list
,
3261 struct xlog_recover_item
*item
,
3262 xfs_lsn_t current_lsn
)
3264 xfs_mount_t
*mp
= log
->l_mp
;
3266 struct xfs_disk_dquot
*ddq
, *recddq
;
3268 xfs_dq_logformat_t
*dq_f
;
3273 * Filesystems are required to send in quota flags at mount time.
3275 if (mp
->m_qflags
== 0)
3278 recddq
= item
->ri_buf
[1].i_addr
;
3279 if (recddq
== NULL
) {
3280 xfs_alert(log
->l_mp
, "NULL dquot in %s.", __func__
);
3283 if (item
->ri_buf
[1].i_len
< sizeof(xfs_disk_dquot_t
)) {
3284 xfs_alert(log
->l_mp
, "dquot too small (%d) in %s.",
3285 item
->ri_buf
[1].i_len
, __func__
);
3290 * This type of quotas was turned off, so ignore this record.
3292 type
= recddq
->d_flags
& (XFS_DQ_USER
| XFS_DQ_PROJ
| XFS_DQ_GROUP
);
3294 if (log
->l_quotaoffs_flag
& type
)
3298 * At this point we know that quota was _not_ turned off.
3299 * Since the mount flags are not indicating to us otherwise, this
3300 * must mean that quota is on, and the dquot needs to be replayed.
3301 * Remember that we may not have fully recovered the superblock yet,
3302 * so we can't do the usual trick of looking at the SB quota bits.
3304 * The other possibility, of course, is that the quota subsystem was
3305 * removed since the last mount - ENOSYS.
3307 dq_f
= item
->ri_buf
[0].i_addr
;
3309 error
= xfs_dqcheck(mp
, recddq
, dq_f
->qlf_id
, 0, XFS_QMOPT_DOWARN
,
3310 "xlog_recover_dquot_pass2 (log copy)");
3313 ASSERT(dq_f
->qlf_len
== 1);
3316 * At this point we are assuming that the dquots have been allocated
3317 * and hence the buffer has valid dquots stamped in it. It should,
3318 * therefore, pass verifier validation. If the dquot is bad, then the
3319 * we'll return an error here, so we don't need to specifically check
3320 * the dquot in the buffer after the verifier has run.
3322 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
, dq_f
->qlf_blkno
,
3323 XFS_FSB_TO_BB(mp
, dq_f
->qlf_len
), 0, &bp
,
3324 &xfs_dquot_buf_ops
);
3329 ddq
= xfs_buf_offset(bp
, dq_f
->qlf_boffset
);
3332 * If the dquot has an LSN in it, recover the dquot only if it's less
3333 * than the lsn of the transaction we are replaying.
3335 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
3336 struct xfs_dqblk
*dqb
= (struct xfs_dqblk
*)ddq
;
3337 xfs_lsn_t lsn
= be64_to_cpu(dqb
->dd_lsn
);
3339 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
3344 memcpy(ddq
, recddq
, item
->ri_buf
[1].i_len
);
3345 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
3346 xfs_update_cksum((char *)ddq
, sizeof(struct xfs_dqblk
),
3350 ASSERT(dq_f
->qlf_size
== 2);
3351 ASSERT(bp
->b_target
->bt_mount
== mp
);
3352 bp
->b_iodone
= xlog_recover_iodone
;
3353 xfs_buf_delwri_queue(bp
, buffer_list
);
3361 * This routine is called to create an in-core extent free intent
3362 * item from the efi format structure which was logged on disk.
3363 * It allocates an in-core efi, copies the extents from the format
3364 * structure into it, and adds the efi to the AIL with the given
3368 xlog_recover_efi_pass2(
3370 struct xlog_recover_item
*item
,
3374 struct xfs_mount
*mp
= log
->l_mp
;
3375 struct xfs_efi_log_item
*efip
;
3376 struct xfs_efi_log_format
*efi_formatp
;
3378 efi_formatp
= item
->ri_buf
[0].i_addr
;
3380 efip
= xfs_efi_init(mp
, efi_formatp
->efi_nextents
);
3381 error
= xfs_efi_copy_format(&item
->ri_buf
[0], &efip
->efi_format
);
3383 xfs_efi_item_free(efip
);
3386 atomic_set(&efip
->efi_next_extent
, efi_formatp
->efi_nextents
);
3388 spin_lock(&log
->l_ailp
->xa_lock
);
3390 * The EFI has two references. One for the EFD and one for EFI to ensure
3391 * it makes it into the AIL. Insert the EFI into the AIL directly and
3392 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3395 xfs_trans_ail_update(log
->l_ailp
, &efip
->efi_item
, lsn
);
3396 xfs_efi_release(efip
);
3402 * This routine is called when an EFD format structure is found in a committed
3403 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3404 * was still in the log. To do this it searches the AIL for the EFI with an id
3405 * equal to that in the EFD format structure. If we find it we drop the EFD
3406 * reference, which removes the EFI from the AIL and frees it.
3409 xlog_recover_efd_pass2(
3411 struct xlog_recover_item
*item
)
3413 xfs_efd_log_format_t
*efd_formatp
;
3414 xfs_efi_log_item_t
*efip
= NULL
;
3415 xfs_log_item_t
*lip
;
3417 struct xfs_ail_cursor cur
;
3418 struct xfs_ail
*ailp
= log
->l_ailp
;
3420 efd_formatp
= item
->ri_buf
[0].i_addr
;
3421 ASSERT((item
->ri_buf
[0].i_len
== (sizeof(xfs_efd_log_format_32_t
) +
3422 ((efd_formatp
->efd_nextents
- 1) * sizeof(xfs_extent_32_t
)))) ||
3423 (item
->ri_buf
[0].i_len
== (sizeof(xfs_efd_log_format_64_t
) +
3424 ((efd_formatp
->efd_nextents
- 1) * sizeof(xfs_extent_64_t
)))));
3425 efi_id
= efd_formatp
->efd_efi_id
;
3428 * Search for the EFI with the id in the EFD format structure in the
3431 spin_lock(&ailp
->xa_lock
);
3432 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
3433 while (lip
!= NULL
) {
3434 if (lip
->li_type
== XFS_LI_EFI
) {
3435 efip
= (xfs_efi_log_item_t
*)lip
;
3436 if (efip
->efi_format
.efi_id
== efi_id
) {
3438 * Drop the EFD reference to the EFI. This
3439 * removes the EFI from the AIL and frees it.
3441 spin_unlock(&ailp
->xa_lock
);
3442 xfs_efi_release(efip
);
3443 spin_lock(&ailp
->xa_lock
);
3447 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3450 xfs_trans_ail_cursor_done(&cur
);
3451 spin_unlock(&ailp
->xa_lock
);
3457 * This routine is called to create an in-core extent rmap update
3458 * item from the rui format structure which was logged on disk.
3459 * It allocates an in-core rui, copies the extents from the format
3460 * structure into it, and adds the rui to the AIL with the given
3464 xlog_recover_rui_pass2(
3466 struct xlog_recover_item
*item
,
3470 struct xfs_mount
*mp
= log
->l_mp
;
3471 struct xfs_rui_log_item
*ruip
;
3472 struct xfs_rui_log_format
*rui_formatp
;
3474 rui_formatp
= item
->ri_buf
[0].i_addr
;
3476 ruip
= xfs_rui_init(mp
, rui_formatp
->rui_nextents
);
3477 error
= xfs_rui_copy_format(&item
->ri_buf
[0], &ruip
->rui_format
);
3479 xfs_rui_item_free(ruip
);
3482 atomic_set(&ruip
->rui_next_extent
, rui_formatp
->rui_nextents
);
3484 spin_lock(&log
->l_ailp
->xa_lock
);
3486 * The RUI has two references. One for the RUD and one for RUI to ensure
3487 * it makes it into the AIL. Insert the RUI into the AIL directly and
3488 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3491 xfs_trans_ail_update(log
->l_ailp
, &ruip
->rui_item
, lsn
);
3492 xfs_rui_release(ruip
);
3498 * This routine is called when an RUD format structure is found in a committed
3499 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3500 * was still in the log. To do this it searches the AIL for the RUI with an id
3501 * equal to that in the RUD format structure. If we find it we drop the RUD
3502 * reference, which removes the RUI from the AIL and frees it.
3505 xlog_recover_rud_pass2(
3507 struct xlog_recover_item
*item
)
3509 struct xfs_rud_log_format
*rud_formatp
;
3510 struct xfs_rui_log_item
*ruip
= NULL
;
3511 struct xfs_log_item
*lip
;
3513 struct xfs_ail_cursor cur
;
3514 struct xfs_ail
*ailp
= log
->l_ailp
;
3516 rud_formatp
= item
->ri_buf
[0].i_addr
;
3517 ASSERT(item
->ri_buf
[0].i_len
== sizeof(struct xfs_rud_log_format
));
3518 rui_id
= rud_formatp
->rud_rui_id
;
3521 * Search for the RUI with the id in the RUD format structure in the
3524 spin_lock(&ailp
->xa_lock
);
3525 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
3526 while (lip
!= NULL
) {
3527 if (lip
->li_type
== XFS_LI_RUI
) {
3528 ruip
= (struct xfs_rui_log_item
*)lip
;
3529 if (ruip
->rui_format
.rui_id
== rui_id
) {
3531 * Drop the RUD reference to the RUI. This
3532 * removes the RUI from the AIL and frees it.
3534 spin_unlock(&ailp
->xa_lock
);
3535 xfs_rui_release(ruip
);
3536 spin_lock(&ailp
->xa_lock
);
3540 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3543 xfs_trans_ail_cursor_done(&cur
);
3544 spin_unlock(&ailp
->xa_lock
);
3550 * This routine is called when an inode create format structure is found in a
3551 * committed transaction in the log. It's purpose is to initialise the inodes
3552 * being allocated on disk. This requires us to get inode cluster buffers that
3553 * match the range to be intialised, stamped with inode templates and written
3554 * by delayed write so that subsequent modifications will hit the cached buffer
3555 * and only need writing out at the end of recovery.
3558 xlog_recover_do_icreate_pass2(
3560 struct list_head
*buffer_list
,
3561 xlog_recover_item_t
*item
)
3563 struct xfs_mount
*mp
= log
->l_mp
;
3564 struct xfs_icreate_log
*icl
;
3565 xfs_agnumber_t agno
;
3566 xfs_agblock_t agbno
;
3569 xfs_agblock_t length
;
3570 int blks_per_cluster
;
3576 icl
= (struct xfs_icreate_log
*)item
->ri_buf
[0].i_addr
;
3577 if (icl
->icl_type
!= XFS_LI_ICREATE
) {
3578 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad type");
3582 if (icl
->icl_size
!= 1) {
3583 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad icl size");
3587 agno
= be32_to_cpu(icl
->icl_ag
);
3588 if (agno
>= mp
->m_sb
.sb_agcount
) {
3589 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad agno");
3592 agbno
= be32_to_cpu(icl
->icl_agbno
);
3593 if (!agbno
|| agbno
== NULLAGBLOCK
|| agbno
>= mp
->m_sb
.sb_agblocks
) {
3594 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad agbno");
3597 isize
= be32_to_cpu(icl
->icl_isize
);
3598 if (isize
!= mp
->m_sb
.sb_inodesize
) {
3599 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad isize");
3602 count
= be32_to_cpu(icl
->icl_count
);
3604 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad count");
3607 length
= be32_to_cpu(icl
->icl_length
);
3608 if (!length
|| length
>= mp
->m_sb
.sb_agblocks
) {
3609 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad length");
3614 * The inode chunk is either full or sparse and we only support
3615 * m_ialloc_min_blks sized sparse allocations at this time.
3617 if (length
!= mp
->m_ialloc_blks
&&
3618 length
!= mp
->m_ialloc_min_blks
) {
3620 "%s: unsupported chunk length", __FUNCTION__
);
3624 /* verify inode count is consistent with extent length */
3625 if ((count
>> mp
->m_sb
.sb_inopblog
) != length
) {
3627 "%s: inconsistent inode count and chunk length",
3633 * The icreate transaction can cover multiple cluster buffers and these
3634 * buffers could have been freed and reused. Check the individual
3635 * buffers for cancellation so we don't overwrite anything written after
3638 blks_per_cluster
= xfs_icluster_size_fsb(mp
);
3639 bb_per_cluster
= XFS_FSB_TO_BB(mp
, blks_per_cluster
);
3640 nbufs
= length
/ blks_per_cluster
;
3641 for (i
= 0, cancel_count
= 0; i
< nbufs
; i
++) {
3644 daddr
= XFS_AGB_TO_DADDR(mp
, agno
,
3645 agbno
+ i
* blks_per_cluster
);
3646 if (xlog_check_buffer_cancelled(log
, daddr
, bb_per_cluster
, 0))
3651 * We currently only use icreate for a single allocation at a time. This
3652 * means we should expect either all or none of the buffers to be
3653 * cancelled. Be conservative and skip replay if at least one buffer is
3654 * cancelled, but warn the user that something is awry if the buffers
3655 * are not consistent.
3657 * XXX: This must be refined to only skip cancelled clusters once we use
3658 * icreate for multiple chunk allocations.
3660 ASSERT(!cancel_count
|| cancel_count
== nbufs
);
3662 if (cancel_count
!= nbufs
)
3664 "WARNING: partial inode chunk cancellation, skipped icreate.");
3665 trace_xfs_log_recover_icreate_cancel(log
, icl
);
3669 trace_xfs_log_recover_icreate_recover(log
, icl
);
3670 return xfs_ialloc_inode_init(mp
, NULL
, buffer_list
, count
, agno
, agbno
,
3671 length
, be32_to_cpu(icl
->icl_gen
));
3675 xlog_recover_buffer_ra_pass2(
3677 struct xlog_recover_item
*item
)
3679 struct xfs_buf_log_format
*buf_f
= item
->ri_buf
[0].i_addr
;
3680 struct xfs_mount
*mp
= log
->l_mp
;
3682 if (xlog_peek_buffer_cancelled(log
, buf_f
->blf_blkno
,
3683 buf_f
->blf_len
, buf_f
->blf_flags
)) {
3687 xfs_buf_readahead(mp
->m_ddev_targp
, buf_f
->blf_blkno
,
3688 buf_f
->blf_len
, NULL
);
3692 xlog_recover_inode_ra_pass2(
3694 struct xlog_recover_item
*item
)
3696 struct xfs_inode_log_format ilf_buf
;
3697 struct xfs_inode_log_format
*ilfp
;
3698 struct xfs_mount
*mp
= log
->l_mp
;
3701 if (item
->ri_buf
[0].i_len
== sizeof(struct xfs_inode_log_format
)) {
3702 ilfp
= item
->ri_buf
[0].i_addr
;
3705 memset(ilfp
, 0, sizeof(*ilfp
));
3706 error
= xfs_inode_item_format_convert(&item
->ri_buf
[0], ilfp
);
3711 if (xlog_peek_buffer_cancelled(log
, ilfp
->ilf_blkno
, ilfp
->ilf_len
, 0))
3714 xfs_buf_readahead(mp
->m_ddev_targp
, ilfp
->ilf_blkno
,
3715 ilfp
->ilf_len
, &xfs_inode_buf_ra_ops
);
3719 xlog_recover_dquot_ra_pass2(
3721 struct xlog_recover_item
*item
)
3723 struct xfs_mount
*mp
= log
->l_mp
;
3724 struct xfs_disk_dquot
*recddq
;
3725 struct xfs_dq_logformat
*dq_f
;
3730 if (mp
->m_qflags
== 0)
3733 recddq
= item
->ri_buf
[1].i_addr
;
3736 if (item
->ri_buf
[1].i_len
< sizeof(struct xfs_disk_dquot
))
3739 type
= recddq
->d_flags
& (XFS_DQ_USER
| XFS_DQ_PROJ
| XFS_DQ_GROUP
);
3741 if (log
->l_quotaoffs_flag
& type
)
3744 dq_f
= item
->ri_buf
[0].i_addr
;
3746 ASSERT(dq_f
->qlf_len
== 1);
3748 len
= XFS_FSB_TO_BB(mp
, dq_f
->qlf_len
);
3749 if (xlog_peek_buffer_cancelled(log
, dq_f
->qlf_blkno
, len
, 0))
3752 xfs_buf_readahead(mp
->m_ddev_targp
, dq_f
->qlf_blkno
, len
,
3753 &xfs_dquot_buf_ra_ops
);
3757 xlog_recover_ra_pass2(
3759 struct xlog_recover_item
*item
)
3761 switch (ITEM_TYPE(item
)) {
3763 xlog_recover_buffer_ra_pass2(log
, item
);
3766 xlog_recover_inode_ra_pass2(log
, item
);
3769 xlog_recover_dquot_ra_pass2(log
, item
);
3773 case XFS_LI_QUOTAOFF
:
3782 xlog_recover_commit_pass1(
3784 struct xlog_recover
*trans
,
3785 struct xlog_recover_item
*item
)
3787 trace_xfs_log_recover_item_recover(log
, trans
, item
, XLOG_RECOVER_PASS1
);
3789 switch (ITEM_TYPE(item
)) {
3791 return xlog_recover_buffer_pass1(log
, item
);
3792 case XFS_LI_QUOTAOFF
:
3793 return xlog_recover_quotaoff_pass1(log
, item
);
3798 case XFS_LI_ICREATE
:
3801 /* nothing to do in pass 1 */
3804 xfs_warn(log
->l_mp
, "%s: invalid item type (%d)",
3805 __func__
, ITEM_TYPE(item
));
3812 xlog_recover_commit_pass2(
3814 struct xlog_recover
*trans
,
3815 struct list_head
*buffer_list
,
3816 struct xlog_recover_item
*item
)
3818 trace_xfs_log_recover_item_recover(log
, trans
, item
, XLOG_RECOVER_PASS2
);
3820 switch (ITEM_TYPE(item
)) {
3822 return xlog_recover_buffer_pass2(log
, buffer_list
, item
,
3825 return xlog_recover_inode_pass2(log
, buffer_list
, item
,
3828 return xlog_recover_efi_pass2(log
, item
, trans
->r_lsn
);
3830 return xlog_recover_efd_pass2(log
, item
);
3832 return xlog_recover_rui_pass2(log
, item
, trans
->r_lsn
);
3834 return xlog_recover_rud_pass2(log
, item
);
3836 return xlog_recover_dquot_pass2(log
, buffer_list
, item
,
3838 case XFS_LI_ICREATE
:
3839 return xlog_recover_do_icreate_pass2(log
, buffer_list
, item
);
3840 case XFS_LI_QUOTAOFF
:
3841 /* nothing to do in pass2 */
3844 xfs_warn(log
->l_mp
, "%s: invalid item type (%d)",
3845 __func__
, ITEM_TYPE(item
));
3852 xlog_recover_items_pass2(
3854 struct xlog_recover
*trans
,
3855 struct list_head
*buffer_list
,
3856 struct list_head
*item_list
)
3858 struct xlog_recover_item
*item
;
3861 list_for_each_entry(item
, item_list
, ri_list
) {
3862 error
= xlog_recover_commit_pass2(log
, trans
,
3872 * Perform the transaction.
3874 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3875 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3878 xlog_recover_commit_trans(
3880 struct xlog_recover
*trans
,
3882 struct list_head
*buffer_list
)
3885 int items_queued
= 0;
3886 struct xlog_recover_item
*item
;
3887 struct xlog_recover_item
*next
;
3888 LIST_HEAD (ra_list
);
3889 LIST_HEAD (done_list
);
3891 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3893 hlist_del(&trans
->r_list
);
3895 error
= xlog_recover_reorder_trans(log
, trans
, pass
);
3899 list_for_each_entry_safe(item
, next
, &trans
->r_itemq
, ri_list
) {
3901 case XLOG_RECOVER_PASS1
:
3902 error
= xlog_recover_commit_pass1(log
, trans
, item
);
3904 case XLOG_RECOVER_PASS2
:
3905 xlog_recover_ra_pass2(log
, item
);
3906 list_move_tail(&item
->ri_list
, &ra_list
);
3908 if (items_queued
>= XLOG_RECOVER_COMMIT_QUEUE_MAX
) {
3909 error
= xlog_recover_items_pass2(log
, trans
,
3910 buffer_list
, &ra_list
);
3911 list_splice_tail_init(&ra_list
, &done_list
);
3925 if (!list_empty(&ra_list
)) {
3927 error
= xlog_recover_items_pass2(log
, trans
,
3928 buffer_list
, &ra_list
);
3929 list_splice_tail_init(&ra_list
, &done_list
);
3932 if (!list_empty(&done_list
))
3933 list_splice_init(&done_list
, &trans
->r_itemq
);
3939 xlog_recover_add_item(
3940 struct list_head
*head
)
3942 xlog_recover_item_t
*item
;
3944 item
= kmem_zalloc(sizeof(xlog_recover_item_t
), KM_SLEEP
);
3945 INIT_LIST_HEAD(&item
->ri_list
);
3946 list_add_tail(&item
->ri_list
, head
);
3950 xlog_recover_add_to_cont_trans(
3952 struct xlog_recover
*trans
,
3956 xlog_recover_item_t
*item
;
3957 char *ptr
, *old_ptr
;
3961 * If the transaction is empty, the header was split across this and the
3962 * previous record. Copy the rest of the header.
3964 if (list_empty(&trans
->r_itemq
)) {
3965 ASSERT(len
<= sizeof(struct xfs_trans_header
));
3966 if (len
> sizeof(struct xfs_trans_header
)) {
3967 xfs_warn(log
->l_mp
, "%s: bad header length", __func__
);
3971 xlog_recover_add_item(&trans
->r_itemq
);
3972 ptr
= (char *)&trans
->r_theader
+
3973 sizeof(struct xfs_trans_header
) - len
;
3974 memcpy(ptr
, dp
, len
);
3978 /* take the tail entry */
3979 item
= list_entry(trans
->r_itemq
.prev
, xlog_recover_item_t
, ri_list
);
3981 old_ptr
= item
->ri_buf
[item
->ri_cnt
-1].i_addr
;
3982 old_len
= item
->ri_buf
[item
->ri_cnt
-1].i_len
;
3984 ptr
= kmem_realloc(old_ptr
, len
+ old_len
, KM_SLEEP
);
3985 memcpy(&ptr
[old_len
], dp
, len
);
3986 item
->ri_buf
[item
->ri_cnt
-1].i_len
+= len
;
3987 item
->ri_buf
[item
->ri_cnt
-1].i_addr
= ptr
;
3988 trace_xfs_log_recover_item_add_cont(log
, trans
, item
, 0);
3993 * The next region to add is the start of a new region. It could be
3994 * a whole region or it could be the first part of a new region. Because
3995 * of this, the assumption here is that the type and size fields of all
3996 * format structures fit into the first 32 bits of the structure.
3998 * This works because all regions must be 32 bit aligned. Therefore, we
3999 * either have both fields or we have neither field. In the case we have
4000 * neither field, the data part of the region is zero length. We only have
4001 * a log_op_header and can throw away the header since a new one will appear
4002 * later. If we have at least 4 bytes, then we can determine how many regions
4003 * will appear in the current log item.
4006 xlog_recover_add_to_trans(
4008 struct xlog_recover
*trans
,
4012 xfs_inode_log_format_t
*in_f
; /* any will do */
4013 xlog_recover_item_t
*item
;
4018 if (list_empty(&trans
->r_itemq
)) {
4019 /* we need to catch log corruptions here */
4020 if (*(uint
*)dp
!= XFS_TRANS_HEADER_MAGIC
) {
4021 xfs_warn(log
->l_mp
, "%s: bad header magic number",
4027 if (len
> sizeof(struct xfs_trans_header
)) {
4028 xfs_warn(log
->l_mp
, "%s: bad header length", __func__
);
4034 * The transaction header can be arbitrarily split across op
4035 * records. If we don't have the whole thing here, copy what we
4036 * do have and handle the rest in the next record.
4038 if (len
== sizeof(struct xfs_trans_header
))
4039 xlog_recover_add_item(&trans
->r_itemq
);
4040 memcpy(&trans
->r_theader
, dp
, len
);
4044 ptr
= kmem_alloc(len
, KM_SLEEP
);
4045 memcpy(ptr
, dp
, len
);
4046 in_f
= (xfs_inode_log_format_t
*)ptr
;
4048 /* take the tail entry */
4049 item
= list_entry(trans
->r_itemq
.prev
, xlog_recover_item_t
, ri_list
);
4050 if (item
->ri_total
!= 0 &&
4051 item
->ri_total
== item
->ri_cnt
) {
4052 /* tail item is in use, get a new one */
4053 xlog_recover_add_item(&trans
->r_itemq
);
4054 item
= list_entry(trans
->r_itemq
.prev
,
4055 xlog_recover_item_t
, ri_list
);
4058 if (item
->ri_total
== 0) { /* first region to be added */
4059 if (in_f
->ilf_size
== 0 ||
4060 in_f
->ilf_size
> XLOG_MAX_REGIONS_IN_ITEM
) {
4062 "bad number of regions (%d) in inode log format",
4069 item
->ri_total
= in_f
->ilf_size
;
4071 kmem_zalloc(item
->ri_total
* sizeof(xfs_log_iovec_t
),
4074 ASSERT(item
->ri_total
> item
->ri_cnt
);
4075 /* Description region is ri_buf[0] */
4076 item
->ri_buf
[item
->ri_cnt
].i_addr
= ptr
;
4077 item
->ri_buf
[item
->ri_cnt
].i_len
= len
;
4079 trace_xfs_log_recover_item_add(log
, trans
, item
, 0);
4084 * Free up any resources allocated by the transaction
4086 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4089 xlog_recover_free_trans(
4090 struct xlog_recover
*trans
)
4092 xlog_recover_item_t
*item
, *n
;
4095 list_for_each_entry_safe(item
, n
, &trans
->r_itemq
, ri_list
) {
4096 /* Free the regions in the item. */
4097 list_del(&item
->ri_list
);
4098 for (i
= 0; i
< item
->ri_cnt
; i
++)
4099 kmem_free(item
->ri_buf
[i
].i_addr
);
4100 /* Free the item itself */
4101 kmem_free(item
->ri_buf
);
4104 /* Free the transaction recover structure */
4109 * On error or completion, trans is freed.
4112 xlog_recovery_process_trans(
4114 struct xlog_recover
*trans
,
4119 struct list_head
*buffer_list
)
4122 bool freeit
= false;
4124 /* mask off ophdr transaction container flags */
4125 flags
&= ~XLOG_END_TRANS
;
4126 if (flags
& XLOG_WAS_CONT_TRANS
)
4127 flags
&= ~XLOG_CONTINUE_TRANS
;
4130 * Callees must not free the trans structure. We'll decide if we need to
4131 * free it or not based on the operation being done and it's result.
4134 /* expected flag values */
4136 case XLOG_CONTINUE_TRANS
:
4137 error
= xlog_recover_add_to_trans(log
, trans
, dp
, len
);
4139 case XLOG_WAS_CONT_TRANS
:
4140 error
= xlog_recover_add_to_cont_trans(log
, trans
, dp
, len
);
4142 case XLOG_COMMIT_TRANS
:
4143 error
= xlog_recover_commit_trans(log
, trans
, pass
,
4145 /* success or fail, we are now done with this transaction. */
4149 /* unexpected flag values */
4150 case XLOG_UNMOUNT_TRANS
:
4151 /* just skip trans */
4152 xfs_warn(log
->l_mp
, "%s: Unmount LR", __func__
);
4155 case XLOG_START_TRANS
:
4157 xfs_warn(log
->l_mp
, "%s: bad flag 0x%x", __func__
, flags
);
4162 if (error
|| freeit
)
4163 xlog_recover_free_trans(trans
);
4168 * Lookup the transaction recovery structure associated with the ID in the
4169 * current ophdr. If the transaction doesn't exist and the start flag is set in
4170 * the ophdr, then allocate a new transaction for future ID matches to find.
4171 * Either way, return what we found during the lookup - an existing transaction
4174 STATIC
struct xlog_recover
*
4175 xlog_recover_ophdr_to_trans(
4176 struct hlist_head rhash
[],
4177 struct xlog_rec_header
*rhead
,
4178 struct xlog_op_header
*ohead
)
4180 struct xlog_recover
*trans
;
4182 struct hlist_head
*rhp
;
4184 tid
= be32_to_cpu(ohead
->oh_tid
);
4185 rhp
= &rhash
[XLOG_RHASH(tid
)];
4186 hlist_for_each_entry(trans
, rhp
, r_list
) {
4187 if (trans
->r_log_tid
== tid
)
4192 * skip over non-start transaction headers - we could be
4193 * processing slack space before the next transaction starts
4195 if (!(ohead
->oh_flags
& XLOG_START_TRANS
))
4198 ASSERT(be32_to_cpu(ohead
->oh_len
) == 0);
4201 * This is a new transaction so allocate a new recovery container to
4202 * hold the recovery ops that will follow.
4204 trans
= kmem_zalloc(sizeof(struct xlog_recover
), KM_SLEEP
);
4205 trans
->r_log_tid
= tid
;
4206 trans
->r_lsn
= be64_to_cpu(rhead
->h_lsn
);
4207 INIT_LIST_HEAD(&trans
->r_itemq
);
4208 INIT_HLIST_NODE(&trans
->r_list
);
4209 hlist_add_head(&trans
->r_list
, rhp
);
4212 * Nothing more to do for this ophdr. Items to be added to this new
4213 * transaction will be in subsequent ophdr containers.
4219 xlog_recover_process_ophdr(
4221 struct hlist_head rhash
[],
4222 struct xlog_rec_header
*rhead
,
4223 struct xlog_op_header
*ohead
,
4227 struct list_head
*buffer_list
)
4229 struct xlog_recover
*trans
;
4233 /* Do we understand who wrote this op? */
4234 if (ohead
->oh_clientid
!= XFS_TRANSACTION
&&
4235 ohead
->oh_clientid
!= XFS_LOG
) {
4236 xfs_warn(log
->l_mp
, "%s: bad clientid 0x%x",
4237 __func__
, ohead
->oh_clientid
);
4243 * Check the ophdr contains all the data it is supposed to contain.
4245 len
= be32_to_cpu(ohead
->oh_len
);
4246 if (dp
+ len
> end
) {
4247 xfs_warn(log
->l_mp
, "%s: bad length 0x%x", __func__
, len
);
4252 trans
= xlog_recover_ophdr_to_trans(rhash
, rhead
, ohead
);
4254 /* nothing to do, so skip over this ophdr */
4259 * The recovered buffer queue is drained only once we know that all
4260 * recovery items for the current LSN have been processed. This is
4263 * - Buffer write submission updates the metadata LSN of the buffer.
4264 * - Log recovery skips items with a metadata LSN >= the current LSN of
4265 * the recovery item.
4266 * - Separate recovery items against the same metadata buffer can share
4267 * a current LSN. I.e., consider that the LSN of a recovery item is
4268 * defined as the starting LSN of the first record in which its
4269 * transaction appears, that a record can hold multiple transactions,
4270 * and/or that a transaction can span multiple records.
4272 * In other words, we are allowed to submit a buffer from log recovery
4273 * once per current LSN. Otherwise, we may incorrectly skip recovery
4274 * items and cause corruption.
4276 * We don't know up front whether buffers are updated multiple times per
4277 * LSN. Therefore, track the current LSN of each commit log record as it
4278 * is processed and drain the queue when it changes. Use commit records
4279 * because they are ordered correctly by the logging code.
4281 if (log
->l_recovery_lsn
!= trans
->r_lsn
&&
4282 ohead
->oh_flags
& XLOG_COMMIT_TRANS
) {
4283 error
= xfs_buf_delwri_submit(buffer_list
);
4286 log
->l_recovery_lsn
= trans
->r_lsn
;
4289 return xlog_recovery_process_trans(log
, trans
, dp
, len
,
4290 ohead
->oh_flags
, pass
, buffer_list
);
4294 * There are two valid states of the r_state field. 0 indicates that the
4295 * transaction structure is in a normal state. We have either seen the
4296 * start of the transaction or the last operation we added was not a partial
4297 * operation. If the last operation we added to the transaction was a
4298 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4300 * NOTE: skip LRs with 0 data length.
4303 xlog_recover_process_data(
4305 struct hlist_head rhash
[],
4306 struct xlog_rec_header
*rhead
,
4309 struct list_head
*buffer_list
)
4311 struct xlog_op_header
*ohead
;
4316 end
= dp
+ be32_to_cpu(rhead
->h_len
);
4317 num_logops
= be32_to_cpu(rhead
->h_num_logops
);
4319 /* check the log format matches our own - else we can't recover */
4320 if (xlog_header_check_recover(log
->l_mp
, rhead
))
4323 trace_xfs_log_recover_record(log
, rhead
, pass
);
4324 while ((dp
< end
) && num_logops
) {
4326 ohead
= (struct xlog_op_header
*)dp
;
4327 dp
+= sizeof(*ohead
);
4330 /* errors will abort recovery */
4331 error
= xlog_recover_process_ophdr(log
, rhash
, rhead
, ohead
,
4332 dp
, end
, pass
, buffer_list
);
4336 dp
+= be32_to_cpu(ohead
->oh_len
);
4342 /* Recover the EFI if necessary. */
4344 xlog_recover_process_efi(
4345 struct xfs_mount
*mp
,
4346 struct xfs_ail
*ailp
,
4347 struct xfs_log_item
*lip
)
4349 struct xfs_efi_log_item
*efip
;
4353 * Skip EFIs that we've already processed.
4355 efip
= container_of(lip
, struct xfs_efi_log_item
, efi_item
);
4356 if (test_bit(XFS_EFI_RECOVERED
, &efip
->efi_flags
))
4359 spin_unlock(&ailp
->xa_lock
);
4360 error
= xfs_efi_recover(mp
, efip
);
4361 spin_lock(&ailp
->xa_lock
);
4366 /* Release the EFI since we're cancelling everything. */
4368 xlog_recover_cancel_efi(
4369 struct xfs_mount
*mp
,
4370 struct xfs_ail
*ailp
,
4371 struct xfs_log_item
*lip
)
4373 struct xfs_efi_log_item
*efip
;
4375 efip
= container_of(lip
, struct xfs_efi_log_item
, efi_item
);
4377 spin_unlock(&ailp
->xa_lock
);
4378 xfs_efi_release(efip
);
4379 spin_lock(&ailp
->xa_lock
);
4382 /* Recover the RUI if necessary. */
4384 xlog_recover_process_rui(
4385 struct xfs_mount
*mp
,
4386 struct xfs_ail
*ailp
,
4387 struct xfs_log_item
*lip
)
4389 struct xfs_rui_log_item
*ruip
;
4393 * Skip RUIs that we've already processed.
4395 ruip
= container_of(lip
, struct xfs_rui_log_item
, rui_item
);
4396 if (test_bit(XFS_RUI_RECOVERED
, &ruip
->rui_flags
))
4399 spin_unlock(&ailp
->xa_lock
);
4400 error
= xfs_rui_recover(mp
, ruip
);
4401 spin_lock(&ailp
->xa_lock
);
4406 /* Release the RUI since we're cancelling everything. */
4408 xlog_recover_cancel_rui(
4409 struct xfs_mount
*mp
,
4410 struct xfs_ail
*ailp
,
4411 struct xfs_log_item
*lip
)
4413 struct xfs_rui_log_item
*ruip
;
4415 ruip
= container_of(lip
, struct xfs_rui_log_item
, rui_item
);
4417 spin_unlock(&ailp
->xa_lock
);
4418 xfs_rui_release(ruip
);
4419 spin_lock(&ailp
->xa_lock
);
4422 /* Is this log item a deferred action intent? */
4423 static inline bool xlog_item_is_intent(struct xfs_log_item
*lip
)
4425 switch (lip
->li_type
) {
4435 * When this is called, all of the log intent items which did not have
4436 * corresponding log done items should be in the AIL. What we do now
4437 * is update the data structures associated with each one.
4439 * Since we process the log intent items in normal transactions, they
4440 * will be removed at some point after the commit. This prevents us
4441 * from just walking down the list processing each one. We'll use a
4442 * flag in the intent item to skip those that we've already processed
4443 * and use the AIL iteration mechanism's generation count to try to
4444 * speed this up at least a bit.
4446 * When we start, we know that the intents are the only things in the
4447 * AIL. As we process them, however, other items are added to the
4451 xlog_recover_process_intents(
4454 struct xfs_log_item
*lip
;
4456 struct xfs_ail_cursor cur
;
4457 struct xfs_ail
*ailp
;
4461 spin_lock(&ailp
->xa_lock
);
4462 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
4463 last_lsn
= xlog_assign_lsn(log
->l_curr_cycle
, log
->l_curr_block
);
4464 while (lip
!= NULL
) {
4466 * We're done when we see something other than an intent.
4467 * There should be no intents left in the AIL now.
4469 if (!xlog_item_is_intent(lip
)) {
4471 for (; lip
; lip
= xfs_trans_ail_cursor_next(ailp
, &cur
))
4472 ASSERT(!xlog_item_is_intent(lip
));
4478 * We should never see a redo item with a LSN higher than
4479 * the last transaction we found in the log at the start
4482 ASSERT(XFS_LSN_CMP(last_lsn
, lip
->li_lsn
) >= 0);
4484 switch (lip
->li_type
) {
4486 error
= xlog_recover_process_efi(log
->l_mp
, ailp
, lip
);
4489 error
= xlog_recover_process_rui(log
->l_mp
, ailp
, lip
);
4494 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
4497 xfs_trans_ail_cursor_done(&cur
);
4498 spin_unlock(&ailp
->xa_lock
);
4503 * A cancel occurs when the mount has failed and we're bailing out.
4504 * Release all pending log intent items so they don't pin the AIL.
4507 xlog_recover_cancel_intents(
4510 struct xfs_log_item
*lip
;
4512 struct xfs_ail_cursor cur
;
4513 struct xfs_ail
*ailp
;
4516 spin_lock(&ailp
->xa_lock
);
4517 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
4518 while (lip
!= NULL
) {
4520 * We're done when we see something other than an intent.
4521 * There should be no intents left in the AIL now.
4523 if (!xlog_item_is_intent(lip
)) {
4525 for (; lip
; lip
= xfs_trans_ail_cursor_next(ailp
, &cur
))
4526 ASSERT(!xlog_item_is_intent(lip
));
4531 switch (lip
->li_type
) {
4533 xlog_recover_cancel_efi(log
->l_mp
, ailp
, lip
);
4536 xlog_recover_cancel_rui(log
->l_mp
, ailp
, lip
);
4540 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
4543 xfs_trans_ail_cursor_done(&cur
);
4544 spin_unlock(&ailp
->xa_lock
);
4549 * This routine performs a transaction to null out a bad inode pointer
4550 * in an agi unlinked inode hash bucket.
4553 xlog_recover_clear_agi_bucket(
4555 xfs_agnumber_t agno
,
4564 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_clearagi
, 0, 0, 0, &tp
);
4568 error
= xfs_read_agi(mp
, tp
, agno
, &agibp
);
4572 agi
= XFS_BUF_TO_AGI(agibp
);
4573 agi
->agi_unlinked
[bucket
] = cpu_to_be32(NULLAGINO
);
4574 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
4575 (sizeof(xfs_agino_t
) * bucket
);
4576 xfs_trans_log_buf(tp
, agibp
, offset
,
4577 (offset
+ sizeof(xfs_agino_t
) - 1));
4579 error
= xfs_trans_commit(tp
);
4585 xfs_trans_cancel(tp
);
4587 xfs_warn(mp
, "%s: failed to clear agi %d. Continuing.", __func__
, agno
);
4592 xlog_recover_process_one_iunlink(
4593 struct xfs_mount
*mp
,
4594 xfs_agnumber_t agno
,
4598 struct xfs_buf
*ibp
;
4599 struct xfs_dinode
*dip
;
4600 struct xfs_inode
*ip
;
4604 ino
= XFS_AGINO_TO_INO(mp
, agno
, agino
);
4605 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, &ip
);
4610 * Get the on disk inode to find the next inode in the bucket.
4612 error
= xfs_imap_to_bp(mp
, NULL
, &ip
->i_imap
, &dip
, &ibp
, 0, 0);
4616 ASSERT(VFS_I(ip
)->i_nlink
== 0);
4617 ASSERT(VFS_I(ip
)->i_mode
!= 0);
4619 /* setup for the next pass */
4620 agino
= be32_to_cpu(dip
->di_next_unlinked
);
4624 * Prevent any DMAPI event from being sent when the reference on
4625 * the inode is dropped.
4627 ip
->i_d
.di_dmevmask
= 0;
4636 * We can't read in the inode this bucket points to, or this inode
4637 * is messed up. Just ditch this bucket of inodes. We will lose
4638 * some inodes and space, but at least we won't hang.
4640 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
4641 * clear the inode pointer in the bucket.
4643 xlog_recover_clear_agi_bucket(mp
, agno
, bucket
);
4648 * xlog_iunlink_recover
4650 * This is called during recovery to process any inodes which
4651 * we unlinked but not freed when the system crashed. These
4652 * inodes will be on the lists in the AGI blocks. What we do
4653 * here is scan all the AGIs and fully truncate and free any
4654 * inodes found on the lists. Each inode is removed from the
4655 * lists when it has been fully truncated and is freed. The
4656 * freeing of the inode and its removal from the list must be
4660 xlog_recover_process_iunlinks(
4664 xfs_agnumber_t agno
;
4675 * Prevent any DMAPI event from being sent while in this function.
4677 mp_dmevmask
= mp
->m_dmevmask
;
4680 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
4682 * Find the agi for this ag.
4684 error
= xfs_read_agi(mp
, NULL
, agno
, &agibp
);
4687 * AGI is b0rked. Don't process it.
4689 * We should probably mark the filesystem as corrupt
4690 * after we've recovered all the ag's we can....
4695 * Unlock the buffer so that it can be acquired in the normal
4696 * course of the transaction to truncate and free each inode.
4697 * Because we are not racing with anyone else here for the AGI
4698 * buffer, we don't even need to hold it locked to read the
4699 * initial unlinked bucket entries out of the buffer. We keep
4700 * buffer reference though, so that it stays pinned in memory
4701 * while we need the buffer.
4703 agi
= XFS_BUF_TO_AGI(agibp
);
4704 xfs_buf_unlock(agibp
);
4706 for (bucket
= 0; bucket
< XFS_AGI_UNLINKED_BUCKETS
; bucket
++) {
4707 agino
= be32_to_cpu(agi
->agi_unlinked
[bucket
]);
4708 while (agino
!= NULLAGINO
) {
4709 agino
= xlog_recover_process_one_iunlink(mp
,
4710 agno
, agino
, bucket
);
4713 xfs_buf_rele(agibp
);
4716 mp
->m_dmevmask
= mp_dmevmask
;
4721 struct xlog_rec_header
*rhead
,
4727 for (i
= 0; i
< BTOBB(be32_to_cpu(rhead
->h_len
)) &&
4728 i
< (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
); i
++) {
4729 *(__be32
*)dp
= *(__be32
*)&rhead
->h_cycle_data
[i
];
4733 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
4734 xlog_in_core_2_t
*xhdr
= (xlog_in_core_2_t
*)rhead
;
4735 for ( ; i
< BTOBB(be32_to_cpu(rhead
->h_len
)); i
++) {
4736 j
= i
/ (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
4737 k
= i
% (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
4738 *(__be32
*)dp
= xhdr
[j
].hic_xheader
.xh_cycle_data
[k
];
4747 * CRC check, unpack and process a log record.
4750 xlog_recover_process(
4752 struct hlist_head rhash
[],
4753 struct xlog_rec_header
*rhead
,
4756 struct list_head
*buffer_list
)
4761 crc
= xlog_cksum(log
, rhead
, dp
, be32_to_cpu(rhead
->h_len
));
4764 * Nothing else to do if this is a CRC verification pass. Just return
4765 * if this a record with a non-zero crc. Unfortunately, mkfs always
4766 * sets h_crc to 0 so we must consider this valid even on v5 supers.
4767 * Otherwise, return EFSBADCRC on failure so the callers up the stack
4768 * know precisely what failed.
4770 if (pass
== XLOG_RECOVER_CRCPASS
) {
4771 if (rhead
->h_crc
&& crc
!= rhead
->h_crc
)
4777 * We're in the normal recovery path. Issue a warning if and only if the
4778 * CRC in the header is non-zero. This is an advisory warning and the
4779 * zero CRC check prevents warnings from being emitted when upgrading
4780 * the kernel from one that does not add CRCs by default.
4782 if (crc
!= rhead
->h_crc
) {
4783 if (rhead
->h_crc
|| xfs_sb_version_hascrc(&log
->l_mp
->m_sb
)) {
4784 xfs_alert(log
->l_mp
,
4785 "log record CRC mismatch: found 0x%x, expected 0x%x.",
4786 le32_to_cpu(rhead
->h_crc
),
4788 xfs_hex_dump(dp
, 32);
4792 * If the filesystem is CRC enabled, this mismatch becomes a
4793 * fatal log corruption failure.
4795 if (xfs_sb_version_hascrc(&log
->l_mp
->m_sb
))
4796 return -EFSCORRUPTED
;
4799 error
= xlog_unpack_data(rhead
, dp
, log
);
4803 return xlog_recover_process_data(log
, rhash
, rhead
, dp
, pass
,
4808 xlog_valid_rec_header(
4810 struct xlog_rec_header
*rhead
,
4815 if (unlikely(rhead
->h_magicno
!= cpu_to_be32(XLOG_HEADER_MAGIC_NUM
))) {
4816 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4817 XFS_ERRLEVEL_LOW
, log
->l_mp
);
4818 return -EFSCORRUPTED
;
4821 (!rhead
->h_version
||
4822 (be32_to_cpu(rhead
->h_version
) & (~XLOG_VERSION_OKBITS
))))) {
4823 xfs_warn(log
->l_mp
, "%s: unrecognised log version (%d).",
4824 __func__
, be32_to_cpu(rhead
->h_version
));
4828 /* LR body must have data or it wouldn't have been written */
4829 hlen
= be32_to_cpu(rhead
->h_len
);
4830 if (unlikely( hlen
<= 0 || hlen
> INT_MAX
)) {
4831 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4832 XFS_ERRLEVEL_LOW
, log
->l_mp
);
4833 return -EFSCORRUPTED
;
4835 if (unlikely( blkno
> log
->l_logBBsize
|| blkno
> INT_MAX
)) {
4836 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4837 XFS_ERRLEVEL_LOW
, log
->l_mp
);
4838 return -EFSCORRUPTED
;
4844 * Read the log from tail to head and process the log records found.
4845 * Handle the two cases where the tail and head are in the same cycle
4846 * and where the active portion of the log wraps around the end of
4847 * the physical log separately. The pass parameter is passed through
4848 * to the routines called to process the data and is not looked at
4852 xlog_do_recovery_pass(
4854 xfs_daddr_t head_blk
,
4855 xfs_daddr_t tail_blk
,
4857 xfs_daddr_t
*first_bad
) /* out: first bad log rec */
4859 xlog_rec_header_t
*rhead
;
4861 xfs_daddr_t rhead_blk
;
4863 xfs_buf_t
*hbp
, *dbp
;
4864 int error
= 0, h_size
, h_len
;
4866 int bblks
, split_bblks
;
4867 int hblks
, split_hblks
, wrapped_hblks
;
4868 struct hlist_head rhash
[XLOG_RHASH_SIZE
];
4869 LIST_HEAD (buffer_list
);
4871 ASSERT(head_blk
!= tail_blk
);
4875 * Read the header of the tail block and get the iclog buffer size from
4876 * h_size. Use this to tell how many sectors make up the log header.
4878 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
4880 * When using variable length iclogs, read first sector of
4881 * iclog header and extract the header size from it. Get a
4882 * new hbp that is the correct size.
4884 hbp
= xlog_get_bp(log
, 1);
4888 error
= xlog_bread(log
, tail_blk
, 1, hbp
, &offset
);
4892 rhead
= (xlog_rec_header_t
*)offset
;
4893 error
= xlog_valid_rec_header(log
, rhead
, tail_blk
);
4898 * xfsprogs has a bug where record length is based on lsunit but
4899 * h_size (iclog size) is hardcoded to 32k. Now that we
4900 * unconditionally CRC verify the unmount record, this means the
4901 * log buffer can be too small for the record and cause an
4904 * Detect this condition here. Use lsunit for the buffer size as
4905 * long as this looks like the mkfs case. Otherwise, return an
4906 * error to avoid a buffer overrun.
4908 h_size
= be32_to_cpu(rhead
->h_size
);
4909 h_len
= be32_to_cpu(rhead
->h_len
);
4910 if (h_len
> h_size
) {
4911 if (h_len
<= log
->l_mp
->m_logbsize
&&
4912 be32_to_cpu(rhead
->h_num_logops
) == 1) {
4914 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
4915 h_size
, log
->l_mp
->m_logbsize
);
4916 h_size
= log
->l_mp
->m_logbsize
;
4918 return -EFSCORRUPTED
;
4921 if ((be32_to_cpu(rhead
->h_version
) & XLOG_VERSION_2
) &&
4922 (h_size
> XLOG_HEADER_CYCLE_SIZE
)) {
4923 hblks
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
4924 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
4927 hbp
= xlog_get_bp(log
, hblks
);
4932 ASSERT(log
->l_sectBBsize
== 1);
4934 hbp
= xlog_get_bp(log
, 1);
4935 h_size
= XLOG_BIG_RECORD_BSIZE
;
4940 dbp
= xlog_get_bp(log
, BTOBB(h_size
));
4946 memset(rhash
, 0, sizeof(rhash
));
4947 blk_no
= rhead_blk
= tail_blk
;
4948 if (tail_blk
> head_blk
) {
4950 * Perform recovery around the end of the physical log.
4951 * When the head is not on the same cycle number as the tail,
4952 * we can't do a sequential recovery.
4954 while (blk_no
< log
->l_logBBsize
) {
4956 * Check for header wrapping around physical end-of-log
4958 offset
= hbp
->b_addr
;
4961 if (blk_no
+ hblks
<= log
->l_logBBsize
) {
4962 /* Read header in one read */
4963 error
= xlog_bread(log
, blk_no
, hblks
, hbp
,
4968 /* This LR is split across physical log end */
4969 if (blk_no
!= log
->l_logBBsize
) {
4970 /* some data before physical log end */
4971 ASSERT(blk_no
<= INT_MAX
);
4972 split_hblks
= log
->l_logBBsize
- (int)blk_no
;
4973 ASSERT(split_hblks
> 0);
4974 error
= xlog_bread(log
, blk_no
,
4982 * Note: this black magic still works with
4983 * large sector sizes (non-512) only because:
4984 * - we increased the buffer size originally
4985 * by 1 sector giving us enough extra space
4986 * for the second read;
4987 * - the log start is guaranteed to be sector
4989 * - we read the log end (LR header start)
4990 * _first_, then the log start (LR header end)
4991 * - order is important.
4993 wrapped_hblks
= hblks
- split_hblks
;
4994 error
= xlog_bread_offset(log
, 0,
4996 offset
+ BBTOB(split_hblks
));
5000 rhead
= (xlog_rec_header_t
*)offset
;
5001 error
= xlog_valid_rec_header(log
, rhead
,
5002 split_hblks
? blk_no
: 0);
5006 bblks
= (int)BTOBB(be32_to_cpu(rhead
->h_len
));
5009 /* Read in data for log record */
5010 if (blk_no
+ bblks
<= log
->l_logBBsize
) {
5011 error
= xlog_bread(log
, blk_no
, bblks
, dbp
,
5016 /* This log record is split across the
5017 * physical end of log */
5018 offset
= dbp
->b_addr
;
5020 if (blk_no
!= log
->l_logBBsize
) {
5021 /* some data is before the physical
5023 ASSERT(!wrapped_hblks
);
5024 ASSERT(blk_no
<= INT_MAX
);
5026 log
->l_logBBsize
- (int)blk_no
;
5027 ASSERT(split_bblks
> 0);
5028 error
= xlog_bread(log
, blk_no
,
5036 * Note: this black magic still works with
5037 * large sector sizes (non-512) only because:
5038 * - we increased the buffer size originally
5039 * by 1 sector giving us enough extra space
5040 * for the second read;
5041 * - the log start is guaranteed to be sector
5043 * - we read the log end (LR header start)
5044 * _first_, then the log start (LR header end)
5045 * - order is important.
5047 error
= xlog_bread_offset(log
, 0,
5048 bblks
- split_bblks
, dbp
,
5049 offset
+ BBTOB(split_bblks
));
5054 error
= xlog_recover_process(log
, rhash
, rhead
, offset
,
5055 pass
, &buffer_list
);
5063 ASSERT(blk_no
>= log
->l_logBBsize
);
5064 blk_no
-= log
->l_logBBsize
;
5068 /* read first part of physical log */
5069 while (blk_no
< head_blk
) {
5070 error
= xlog_bread(log
, blk_no
, hblks
, hbp
, &offset
);
5074 rhead
= (xlog_rec_header_t
*)offset
;
5075 error
= xlog_valid_rec_header(log
, rhead
, blk_no
);
5079 /* blocks in data section */
5080 bblks
= (int)BTOBB(be32_to_cpu(rhead
->h_len
));
5081 error
= xlog_bread(log
, blk_no
+hblks
, bblks
, dbp
,
5086 error
= xlog_recover_process(log
, rhash
, rhead
, offset
, pass
,
5091 blk_no
+= bblks
+ hblks
;
5101 * Submit buffers that have been added from the last record processed,
5102 * regardless of error status.
5104 if (!list_empty(&buffer_list
))
5105 error2
= xfs_buf_delwri_submit(&buffer_list
);
5107 if (error
&& first_bad
)
5108 *first_bad
= rhead_blk
;
5110 return error
? error
: error2
;
5114 * Do the recovery of the log. We actually do this in two phases.
5115 * The two passes are necessary in order to implement the function
5116 * of cancelling a record written into the log. The first pass
5117 * determines those things which have been cancelled, and the
5118 * second pass replays log items normally except for those which
5119 * have been cancelled. The handling of the replay and cancellations
5120 * takes place in the log item type specific routines.
5122 * The table of items which have cancel records in the log is allocated
5123 * and freed at this level, since only here do we know when all of
5124 * the log recovery has been completed.
5127 xlog_do_log_recovery(
5129 xfs_daddr_t head_blk
,
5130 xfs_daddr_t tail_blk
)
5134 ASSERT(head_blk
!= tail_blk
);
5137 * First do a pass to find all of the cancelled buf log items.
5138 * Store them in the buf_cancel_table for use in the second pass.
5140 log
->l_buf_cancel_table
= kmem_zalloc(XLOG_BC_TABLE_SIZE
*
5141 sizeof(struct list_head
),
5143 for (i
= 0; i
< XLOG_BC_TABLE_SIZE
; i
++)
5144 INIT_LIST_HEAD(&log
->l_buf_cancel_table
[i
]);
5146 error
= xlog_do_recovery_pass(log
, head_blk
, tail_blk
,
5147 XLOG_RECOVER_PASS1
, NULL
);
5149 kmem_free(log
->l_buf_cancel_table
);
5150 log
->l_buf_cancel_table
= NULL
;
5154 * Then do a second pass to actually recover the items in the log.
5155 * When it is complete free the table of buf cancel items.
5157 error
= xlog_do_recovery_pass(log
, head_blk
, tail_blk
,
5158 XLOG_RECOVER_PASS2
, NULL
);
5163 for (i
= 0; i
< XLOG_BC_TABLE_SIZE
; i
++)
5164 ASSERT(list_empty(&log
->l_buf_cancel_table
[i
]));
5168 kmem_free(log
->l_buf_cancel_table
);
5169 log
->l_buf_cancel_table
= NULL
;
5175 * Do the actual recovery
5180 xfs_daddr_t head_blk
,
5181 xfs_daddr_t tail_blk
)
5183 struct xfs_mount
*mp
= log
->l_mp
;
5189 * First replay the images in the log.
5191 error
= xlog_do_log_recovery(log
, head_blk
, tail_blk
);
5196 * If IO errors happened during recovery, bail out.
5198 if (XFS_FORCED_SHUTDOWN(mp
)) {
5203 * We now update the tail_lsn since much of the recovery has completed
5204 * and there may be space available to use. If there were no extent
5205 * or iunlinks, we can free up the entire log and set the tail_lsn to
5206 * be the last_sync_lsn. This was set in xlog_find_tail to be the
5207 * lsn of the last known good LR on disk. If there are extent frees
5208 * or iunlinks they will have some entries in the AIL; so we look at
5209 * the AIL to determine how to set the tail_lsn.
5211 xlog_assign_tail_lsn(mp
);
5214 * Now that we've finished replaying all buffer and inode
5215 * updates, re-read in the superblock and reverify it.
5217 bp
= xfs_getsb(mp
, 0);
5218 bp
->b_flags
&= ~(XBF_DONE
| XBF_ASYNC
);
5219 ASSERT(!(bp
->b_flags
& XBF_WRITE
));
5220 bp
->b_flags
|= XBF_READ
;
5221 bp
->b_ops
= &xfs_sb_buf_ops
;
5223 error
= xfs_buf_submit_wait(bp
);
5225 if (!XFS_FORCED_SHUTDOWN(mp
)) {
5226 xfs_buf_ioerror_alert(bp
, __func__
);
5233 /* Convert superblock from on-disk format */
5235 xfs_sb_from_disk(sbp
, XFS_BUF_TO_SBP(bp
));
5238 /* re-initialise in-core superblock and geometry structures */
5239 xfs_reinit_percpu_counters(mp
);
5240 error
= xfs_initialize_perag(mp
, sbp
->sb_agcount
, &mp
->m_maxagi
);
5242 xfs_warn(mp
, "Failed post-recovery per-ag init: %d", error
);
5245 mp
->m_alloc_set_aside
= xfs_alloc_set_aside(mp
);
5247 xlog_recover_check_summary(log
);
5249 /* Normal transactions can now occur */
5250 log
->l_flags
&= ~XLOG_ACTIVE_RECOVERY
;
5255 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5257 * Return error or zero.
5263 xfs_daddr_t head_blk
, tail_blk
;
5266 /* find the tail of the log */
5267 error
= xlog_find_tail(log
, &head_blk
, &tail_blk
);
5272 * The superblock was read before the log was available and thus the LSN
5273 * could not be verified. Check the superblock LSN against the current
5274 * LSN now that it's known.
5276 if (xfs_sb_version_hascrc(&log
->l_mp
->m_sb
) &&
5277 !xfs_log_check_lsn(log
->l_mp
, log
->l_mp
->m_sb
.sb_lsn
))
5280 if (tail_blk
!= head_blk
) {
5281 /* There used to be a comment here:
5283 * disallow recovery on read-only mounts. note -- mount
5284 * checks for ENOSPC and turns it into an intelligent
5286 * ...but this is no longer true. Now, unless you specify
5287 * NORECOVERY (in which case this function would never be
5288 * called), we just go ahead and recover. We do this all
5289 * under the vfs layer, so we can get away with it unless
5290 * the device itself is read-only, in which case we fail.
5292 if ((error
= xfs_dev_is_read_only(log
->l_mp
, "recovery"))) {
5297 * Version 5 superblock log feature mask validation. We know the
5298 * log is dirty so check if there are any unknown log features
5299 * in what we need to recover. If there are unknown features
5300 * (e.g. unsupported transactions, then simply reject the
5301 * attempt at recovery before touching anything.
5303 if (XFS_SB_VERSION_NUM(&log
->l_mp
->m_sb
) == XFS_SB_VERSION_5
&&
5304 xfs_sb_has_incompat_log_feature(&log
->l_mp
->m_sb
,
5305 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN
)) {
5307 "Superblock has unknown incompatible log features (0x%x) enabled.",
5308 (log
->l_mp
->m_sb
.sb_features_log_incompat
&
5309 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN
));
5311 "The log can not be fully and/or safely recovered by this kernel.");
5313 "Please recover the log on a kernel that supports the unknown features.");
5318 * Delay log recovery if the debug hook is set. This is debug
5319 * instrumention to coordinate simulation of I/O failures with
5322 if (xfs_globals
.log_recovery_delay
) {
5323 xfs_notice(log
->l_mp
,
5324 "Delaying log recovery for %d seconds.",
5325 xfs_globals
.log_recovery_delay
);
5326 msleep(xfs_globals
.log_recovery_delay
* 1000);
5329 xfs_notice(log
->l_mp
, "Starting recovery (logdev: %s)",
5330 log
->l_mp
->m_logname
? log
->l_mp
->m_logname
5333 error
= xlog_do_recover(log
, head_blk
, tail_blk
);
5334 log
->l_flags
|= XLOG_RECOVERY_NEEDED
;
5340 * In the first part of recovery we replay inodes and buffers and build
5341 * up the list of extent free items which need to be processed. Here
5342 * we process the extent free items and clean up the on disk unlinked
5343 * inode lists. This is separated from the first part of recovery so
5344 * that the root and real-time bitmap inodes can be read in from disk in
5345 * between the two stages. This is necessary so that we can free space
5346 * in the real-time portion of the file system.
5349 xlog_recover_finish(
5353 * Now we're ready to do the transactions needed for the
5354 * rest of recovery. Start with completing all the extent
5355 * free intent records and then process the unlinked inode
5356 * lists. At this point, we essentially run in normal mode
5357 * except that we're still performing recovery actions
5358 * rather than accepting new requests.
5360 if (log
->l_flags
& XLOG_RECOVERY_NEEDED
) {
5362 error
= xlog_recover_process_intents(log
);
5364 xfs_alert(log
->l_mp
, "Failed to recover intents");
5369 * Sync the log to get all the intents out of the AIL.
5370 * This isn't absolutely necessary, but it helps in
5371 * case the unlink transactions would have problems
5372 * pushing the intents out of the way.
5374 xfs_log_force(log
->l_mp
, XFS_LOG_SYNC
);
5376 xlog_recover_process_iunlinks(log
);
5378 xlog_recover_check_summary(log
);
5380 xfs_notice(log
->l_mp
, "Ending recovery (logdev: %s)",
5381 log
->l_mp
->m_logname
? log
->l_mp
->m_logname
5383 log
->l_flags
&= ~XLOG_RECOVERY_NEEDED
;
5385 xfs_info(log
->l_mp
, "Ending clean mount");
5391 xlog_recover_cancel(
5396 if (log
->l_flags
& XLOG_RECOVERY_NEEDED
)
5397 error
= xlog_recover_cancel_intents(log
);
5404 * Read all of the agf and agi counters and check that they
5405 * are consistent with the superblock counters.
5408 xlog_recover_check_summary(
5415 xfs_agnumber_t agno
;
5416 __uint64_t freeblks
;
5426 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
5427 error
= xfs_read_agf(mp
, NULL
, agno
, 0, &agfbp
);
5429 xfs_alert(mp
, "%s agf read failed agno %d error %d",
5430 __func__
, agno
, error
);
5432 agfp
= XFS_BUF_TO_AGF(agfbp
);
5433 freeblks
+= be32_to_cpu(agfp
->agf_freeblks
) +
5434 be32_to_cpu(agfp
->agf_flcount
);
5435 xfs_buf_relse(agfbp
);
5438 error
= xfs_read_agi(mp
, NULL
, agno
, &agibp
);
5440 xfs_alert(mp
, "%s agi read failed agno %d error %d",
5441 __func__
, agno
, error
);
5443 struct xfs_agi
*agi
= XFS_BUF_TO_AGI(agibp
);
5445 itotal
+= be32_to_cpu(agi
->agi_count
);
5446 ifree
+= be32_to_cpu(agi
->agi_freecount
);
5447 xfs_buf_relse(agibp
);