]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/xfs/xfs_log_recover.c
xfs: refactor buffer logging into buffer dirtying helper
[mirror_ubuntu-artful-kernel.git] / fs / xfs / xfs_log_recover.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_trans.h"
31 #include "xfs_log.h"
32 #include "xfs_log_priv.h"
33 #include "xfs_log_recover.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_alloc.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_cksum.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_bmap_btree.h"
44 #include "xfs_error.h"
45 #include "xfs_dir2.h"
46 #include "xfs_rmap_item.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_refcount_item.h"
49 #include "xfs_bmap_item.h"
50
51 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
52
53 STATIC int
54 xlog_find_zeroed(
55 struct xlog *,
56 xfs_daddr_t *);
57 STATIC int
58 xlog_clear_stale_blocks(
59 struct xlog *,
60 xfs_lsn_t);
61 #if defined(DEBUG)
62 STATIC void
63 xlog_recover_check_summary(
64 struct xlog *);
65 #else
66 #define xlog_recover_check_summary(log)
67 #endif
68 STATIC int
69 xlog_do_recovery_pass(
70 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
71
72 /*
73 * This structure is used during recovery to record the buf log items which
74 * have been canceled and should not be replayed.
75 */
76 struct xfs_buf_cancel {
77 xfs_daddr_t bc_blkno;
78 uint bc_len;
79 int bc_refcount;
80 struct list_head bc_list;
81 };
82
83 /*
84 * Sector aligned buffer routines for buffer create/read/write/access
85 */
86
87 /*
88 * Verify the given count of basic blocks is valid number of blocks
89 * to specify for an operation involving the given XFS log buffer.
90 * Returns nonzero if the count is valid, 0 otherwise.
91 */
92
93 static inline int
94 xlog_buf_bbcount_valid(
95 struct xlog *log,
96 int bbcount)
97 {
98 return bbcount > 0 && bbcount <= log->l_logBBsize;
99 }
100
101 /*
102 * Allocate a buffer to hold log data. The buffer needs to be able
103 * to map to a range of nbblks basic blocks at any valid (basic
104 * block) offset within the log.
105 */
106 STATIC xfs_buf_t *
107 xlog_get_bp(
108 struct xlog *log,
109 int nbblks)
110 {
111 struct xfs_buf *bp;
112
113 if (!xlog_buf_bbcount_valid(log, nbblks)) {
114 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
115 nbblks);
116 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
117 return NULL;
118 }
119
120 /*
121 * We do log I/O in units of log sectors (a power-of-2
122 * multiple of the basic block size), so we round up the
123 * requested size to accommodate the basic blocks required
124 * for complete log sectors.
125 *
126 * In addition, the buffer may be used for a non-sector-
127 * aligned block offset, in which case an I/O of the
128 * requested size could extend beyond the end of the
129 * buffer. If the requested size is only 1 basic block it
130 * will never straddle a sector boundary, so this won't be
131 * an issue. Nor will this be a problem if the log I/O is
132 * done in basic blocks (sector size 1). But otherwise we
133 * extend the buffer by one extra log sector to ensure
134 * there's space to accommodate this possibility.
135 */
136 if (nbblks > 1 && log->l_sectBBsize > 1)
137 nbblks += log->l_sectBBsize;
138 nbblks = round_up(nbblks, log->l_sectBBsize);
139
140 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
141 if (bp)
142 xfs_buf_unlock(bp);
143 return bp;
144 }
145
146 STATIC void
147 xlog_put_bp(
148 xfs_buf_t *bp)
149 {
150 xfs_buf_free(bp);
151 }
152
153 /*
154 * Return the address of the start of the given block number's data
155 * in a log buffer. The buffer covers a log sector-aligned region.
156 */
157 STATIC char *
158 xlog_align(
159 struct xlog *log,
160 xfs_daddr_t blk_no,
161 int nbblks,
162 struct xfs_buf *bp)
163 {
164 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
165
166 ASSERT(offset + nbblks <= bp->b_length);
167 return bp->b_addr + BBTOB(offset);
168 }
169
170
171 /*
172 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
173 */
174 STATIC int
175 xlog_bread_noalign(
176 struct xlog *log,
177 xfs_daddr_t blk_no,
178 int nbblks,
179 struct xfs_buf *bp)
180 {
181 int error;
182
183 if (!xlog_buf_bbcount_valid(log, nbblks)) {
184 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
185 nbblks);
186 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
187 return -EFSCORRUPTED;
188 }
189
190 blk_no = round_down(blk_no, log->l_sectBBsize);
191 nbblks = round_up(nbblks, log->l_sectBBsize);
192
193 ASSERT(nbblks > 0);
194 ASSERT(nbblks <= bp->b_length);
195
196 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
197 bp->b_flags |= XBF_READ;
198 bp->b_io_length = nbblks;
199 bp->b_error = 0;
200
201 error = xfs_buf_submit_wait(bp);
202 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
203 xfs_buf_ioerror_alert(bp, __func__);
204 return error;
205 }
206
207 STATIC int
208 xlog_bread(
209 struct xlog *log,
210 xfs_daddr_t blk_no,
211 int nbblks,
212 struct xfs_buf *bp,
213 char **offset)
214 {
215 int error;
216
217 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
218 if (error)
219 return error;
220
221 *offset = xlog_align(log, blk_no, nbblks, bp);
222 return 0;
223 }
224
225 /*
226 * Read at an offset into the buffer. Returns with the buffer in it's original
227 * state regardless of the result of the read.
228 */
229 STATIC int
230 xlog_bread_offset(
231 struct xlog *log,
232 xfs_daddr_t blk_no, /* block to read from */
233 int nbblks, /* blocks to read */
234 struct xfs_buf *bp,
235 char *offset)
236 {
237 char *orig_offset = bp->b_addr;
238 int orig_len = BBTOB(bp->b_length);
239 int error, error2;
240
241 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
242 if (error)
243 return error;
244
245 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
246
247 /* must reset buffer pointer even on error */
248 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
249 if (error)
250 return error;
251 return error2;
252 }
253
254 /*
255 * Write out the buffer at the given block for the given number of blocks.
256 * The buffer is kept locked across the write and is returned locked.
257 * This can only be used for synchronous log writes.
258 */
259 STATIC int
260 xlog_bwrite(
261 struct xlog *log,
262 xfs_daddr_t blk_no,
263 int nbblks,
264 struct xfs_buf *bp)
265 {
266 int error;
267
268 if (!xlog_buf_bbcount_valid(log, nbblks)) {
269 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
270 nbblks);
271 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
272 return -EFSCORRUPTED;
273 }
274
275 blk_no = round_down(blk_no, log->l_sectBBsize);
276 nbblks = round_up(nbblks, log->l_sectBBsize);
277
278 ASSERT(nbblks > 0);
279 ASSERT(nbblks <= bp->b_length);
280
281 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
282 xfs_buf_hold(bp);
283 xfs_buf_lock(bp);
284 bp->b_io_length = nbblks;
285 bp->b_error = 0;
286
287 error = xfs_bwrite(bp);
288 if (error)
289 xfs_buf_ioerror_alert(bp, __func__);
290 xfs_buf_relse(bp);
291 return error;
292 }
293
294 #ifdef DEBUG
295 /*
296 * dump debug superblock and log record information
297 */
298 STATIC void
299 xlog_header_check_dump(
300 xfs_mount_t *mp,
301 xlog_rec_header_t *head)
302 {
303 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
304 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
305 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
306 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
307 }
308 #else
309 #define xlog_header_check_dump(mp, head)
310 #endif
311
312 /*
313 * check log record header for recovery
314 */
315 STATIC int
316 xlog_header_check_recover(
317 xfs_mount_t *mp,
318 xlog_rec_header_t *head)
319 {
320 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
321
322 /*
323 * IRIX doesn't write the h_fmt field and leaves it zeroed
324 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
325 * a dirty log created in IRIX.
326 */
327 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
328 xfs_warn(mp,
329 "dirty log written in incompatible format - can't recover");
330 xlog_header_check_dump(mp, head);
331 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
332 XFS_ERRLEVEL_HIGH, mp);
333 return -EFSCORRUPTED;
334 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
335 xfs_warn(mp,
336 "dirty log entry has mismatched uuid - can't recover");
337 xlog_header_check_dump(mp, head);
338 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
339 XFS_ERRLEVEL_HIGH, mp);
340 return -EFSCORRUPTED;
341 }
342 return 0;
343 }
344
345 /*
346 * read the head block of the log and check the header
347 */
348 STATIC int
349 xlog_header_check_mount(
350 xfs_mount_t *mp,
351 xlog_rec_header_t *head)
352 {
353 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
354
355 if (uuid_is_null(&head->h_fs_uuid)) {
356 /*
357 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
358 * h_fs_uuid is null, we assume this log was last mounted
359 * by IRIX and continue.
360 */
361 xfs_warn(mp, "null uuid in log - IRIX style log");
362 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
363 xfs_warn(mp, "log has mismatched uuid - can't recover");
364 xlog_header_check_dump(mp, head);
365 XFS_ERROR_REPORT("xlog_header_check_mount",
366 XFS_ERRLEVEL_HIGH, mp);
367 return -EFSCORRUPTED;
368 }
369 return 0;
370 }
371
372 STATIC void
373 xlog_recover_iodone(
374 struct xfs_buf *bp)
375 {
376 if (bp->b_error) {
377 /*
378 * We're not going to bother about retrying
379 * this during recovery. One strike!
380 */
381 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
382 xfs_buf_ioerror_alert(bp, __func__);
383 xfs_force_shutdown(bp->b_target->bt_mount,
384 SHUTDOWN_META_IO_ERROR);
385 }
386 }
387
388 /*
389 * On v5 supers, a bli could be attached to update the metadata LSN.
390 * Clean it up.
391 */
392 if (bp->b_fspriv)
393 xfs_buf_item_relse(bp);
394 ASSERT(bp->b_fspriv == NULL);
395
396 bp->b_iodone = NULL;
397 xfs_buf_ioend(bp);
398 }
399
400 /*
401 * This routine finds (to an approximation) the first block in the physical
402 * log which contains the given cycle. It uses a binary search algorithm.
403 * Note that the algorithm can not be perfect because the disk will not
404 * necessarily be perfect.
405 */
406 STATIC int
407 xlog_find_cycle_start(
408 struct xlog *log,
409 struct xfs_buf *bp,
410 xfs_daddr_t first_blk,
411 xfs_daddr_t *last_blk,
412 uint cycle)
413 {
414 char *offset;
415 xfs_daddr_t mid_blk;
416 xfs_daddr_t end_blk;
417 uint mid_cycle;
418 int error;
419
420 end_blk = *last_blk;
421 mid_blk = BLK_AVG(first_blk, end_blk);
422 while (mid_blk != first_blk && mid_blk != end_blk) {
423 error = xlog_bread(log, mid_blk, 1, bp, &offset);
424 if (error)
425 return error;
426 mid_cycle = xlog_get_cycle(offset);
427 if (mid_cycle == cycle)
428 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
429 else
430 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
431 mid_blk = BLK_AVG(first_blk, end_blk);
432 }
433 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
434 (mid_blk == end_blk && mid_blk-1 == first_blk));
435
436 *last_blk = end_blk;
437
438 return 0;
439 }
440
441 /*
442 * Check that a range of blocks does not contain stop_on_cycle_no.
443 * Fill in *new_blk with the block offset where such a block is
444 * found, or with -1 (an invalid block number) if there is no such
445 * block in the range. The scan needs to occur from front to back
446 * and the pointer into the region must be updated since a later
447 * routine will need to perform another test.
448 */
449 STATIC int
450 xlog_find_verify_cycle(
451 struct xlog *log,
452 xfs_daddr_t start_blk,
453 int nbblks,
454 uint stop_on_cycle_no,
455 xfs_daddr_t *new_blk)
456 {
457 xfs_daddr_t i, j;
458 uint cycle;
459 xfs_buf_t *bp;
460 xfs_daddr_t bufblks;
461 char *buf = NULL;
462 int error = 0;
463
464 /*
465 * Greedily allocate a buffer big enough to handle the full
466 * range of basic blocks we'll be examining. If that fails,
467 * try a smaller size. We need to be able to read at least
468 * a log sector, or we're out of luck.
469 */
470 bufblks = 1 << ffs(nbblks);
471 while (bufblks > log->l_logBBsize)
472 bufblks >>= 1;
473 while (!(bp = xlog_get_bp(log, bufblks))) {
474 bufblks >>= 1;
475 if (bufblks < log->l_sectBBsize)
476 return -ENOMEM;
477 }
478
479 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
480 int bcount;
481
482 bcount = min(bufblks, (start_blk + nbblks - i));
483
484 error = xlog_bread(log, i, bcount, bp, &buf);
485 if (error)
486 goto out;
487
488 for (j = 0; j < bcount; j++) {
489 cycle = xlog_get_cycle(buf);
490 if (cycle == stop_on_cycle_no) {
491 *new_blk = i+j;
492 goto out;
493 }
494
495 buf += BBSIZE;
496 }
497 }
498
499 *new_blk = -1;
500
501 out:
502 xlog_put_bp(bp);
503 return error;
504 }
505
506 /*
507 * Potentially backup over partial log record write.
508 *
509 * In the typical case, last_blk is the number of the block directly after
510 * a good log record. Therefore, we subtract one to get the block number
511 * of the last block in the given buffer. extra_bblks contains the number
512 * of blocks we would have read on a previous read. This happens when the
513 * last log record is split over the end of the physical log.
514 *
515 * extra_bblks is the number of blocks potentially verified on a previous
516 * call to this routine.
517 */
518 STATIC int
519 xlog_find_verify_log_record(
520 struct xlog *log,
521 xfs_daddr_t start_blk,
522 xfs_daddr_t *last_blk,
523 int extra_bblks)
524 {
525 xfs_daddr_t i;
526 xfs_buf_t *bp;
527 char *offset = NULL;
528 xlog_rec_header_t *head = NULL;
529 int error = 0;
530 int smallmem = 0;
531 int num_blks = *last_blk - start_blk;
532 int xhdrs;
533
534 ASSERT(start_blk != 0 || *last_blk != start_blk);
535
536 if (!(bp = xlog_get_bp(log, num_blks))) {
537 if (!(bp = xlog_get_bp(log, 1)))
538 return -ENOMEM;
539 smallmem = 1;
540 } else {
541 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
542 if (error)
543 goto out;
544 offset += ((num_blks - 1) << BBSHIFT);
545 }
546
547 for (i = (*last_blk) - 1; i >= 0; i--) {
548 if (i < start_blk) {
549 /* valid log record not found */
550 xfs_warn(log->l_mp,
551 "Log inconsistent (didn't find previous header)");
552 ASSERT(0);
553 error = -EIO;
554 goto out;
555 }
556
557 if (smallmem) {
558 error = xlog_bread(log, i, 1, bp, &offset);
559 if (error)
560 goto out;
561 }
562
563 head = (xlog_rec_header_t *)offset;
564
565 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
566 break;
567
568 if (!smallmem)
569 offset -= BBSIZE;
570 }
571
572 /*
573 * We hit the beginning of the physical log & still no header. Return
574 * to caller. If caller can handle a return of -1, then this routine
575 * will be called again for the end of the physical log.
576 */
577 if (i == -1) {
578 error = 1;
579 goto out;
580 }
581
582 /*
583 * We have the final block of the good log (the first block
584 * of the log record _before_ the head. So we check the uuid.
585 */
586 if ((error = xlog_header_check_mount(log->l_mp, head)))
587 goto out;
588
589 /*
590 * We may have found a log record header before we expected one.
591 * last_blk will be the 1st block # with a given cycle #. We may end
592 * up reading an entire log record. In this case, we don't want to
593 * reset last_blk. Only when last_blk points in the middle of a log
594 * record do we update last_blk.
595 */
596 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
597 uint h_size = be32_to_cpu(head->h_size);
598
599 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
600 if (h_size % XLOG_HEADER_CYCLE_SIZE)
601 xhdrs++;
602 } else {
603 xhdrs = 1;
604 }
605
606 if (*last_blk - i + extra_bblks !=
607 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
608 *last_blk = i;
609
610 out:
611 xlog_put_bp(bp);
612 return error;
613 }
614
615 /*
616 * Head is defined to be the point of the log where the next log write
617 * could go. This means that incomplete LR writes at the end are
618 * eliminated when calculating the head. We aren't guaranteed that previous
619 * LR have complete transactions. We only know that a cycle number of
620 * current cycle number -1 won't be present in the log if we start writing
621 * from our current block number.
622 *
623 * last_blk contains the block number of the first block with a given
624 * cycle number.
625 *
626 * Return: zero if normal, non-zero if error.
627 */
628 STATIC int
629 xlog_find_head(
630 struct xlog *log,
631 xfs_daddr_t *return_head_blk)
632 {
633 xfs_buf_t *bp;
634 char *offset;
635 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
636 int num_scan_bblks;
637 uint first_half_cycle, last_half_cycle;
638 uint stop_on_cycle;
639 int error, log_bbnum = log->l_logBBsize;
640
641 /* Is the end of the log device zeroed? */
642 error = xlog_find_zeroed(log, &first_blk);
643 if (error < 0) {
644 xfs_warn(log->l_mp, "empty log check failed");
645 return error;
646 }
647 if (error == 1) {
648 *return_head_blk = first_blk;
649
650 /* Is the whole lot zeroed? */
651 if (!first_blk) {
652 /* Linux XFS shouldn't generate totally zeroed logs -
653 * mkfs etc write a dummy unmount record to a fresh
654 * log so we can store the uuid in there
655 */
656 xfs_warn(log->l_mp, "totally zeroed log");
657 }
658
659 return 0;
660 }
661
662 first_blk = 0; /* get cycle # of 1st block */
663 bp = xlog_get_bp(log, 1);
664 if (!bp)
665 return -ENOMEM;
666
667 error = xlog_bread(log, 0, 1, bp, &offset);
668 if (error)
669 goto bp_err;
670
671 first_half_cycle = xlog_get_cycle(offset);
672
673 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
674 error = xlog_bread(log, last_blk, 1, bp, &offset);
675 if (error)
676 goto bp_err;
677
678 last_half_cycle = xlog_get_cycle(offset);
679 ASSERT(last_half_cycle != 0);
680
681 /*
682 * If the 1st half cycle number is equal to the last half cycle number,
683 * then the entire log is stamped with the same cycle number. In this
684 * case, head_blk can't be set to zero (which makes sense). The below
685 * math doesn't work out properly with head_blk equal to zero. Instead,
686 * we set it to log_bbnum which is an invalid block number, but this
687 * value makes the math correct. If head_blk doesn't changed through
688 * all the tests below, *head_blk is set to zero at the very end rather
689 * than log_bbnum. In a sense, log_bbnum and zero are the same block
690 * in a circular file.
691 */
692 if (first_half_cycle == last_half_cycle) {
693 /*
694 * In this case we believe that the entire log should have
695 * cycle number last_half_cycle. We need to scan backwards
696 * from the end verifying that there are no holes still
697 * containing last_half_cycle - 1. If we find such a hole,
698 * then the start of that hole will be the new head. The
699 * simple case looks like
700 * x | x ... | x - 1 | x
701 * Another case that fits this picture would be
702 * x | x + 1 | x ... | x
703 * In this case the head really is somewhere at the end of the
704 * log, as one of the latest writes at the beginning was
705 * incomplete.
706 * One more case is
707 * x | x + 1 | x ... | x - 1 | x
708 * This is really the combination of the above two cases, and
709 * the head has to end up at the start of the x-1 hole at the
710 * end of the log.
711 *
712 * In the 256k log case, we will read from the beginning to the
713 * end of the log and search for cycle numbers equal to x-1.
714 * We don't worry about the x+1 blocks that we encounter,
715 * because we know that they cannot be the head since the log
716 * started with x.
717 */
718 head_blk = log_bbnum;
719 stop_on_cycle = last_half_cycle - 1;
720 } else {
721 /*
722 * In this case we want to find the first block with cycle
723 * number matching last_half_cycle. We expect the log to be
724 * some variation on
725 * x + 1 ... | x ... | x
726 * The first block with cycle number x (last_half_cycle) will
727 * be where the new head belongs. First we do a binary search
728 * for the first occurrence of last_half_cycle. The binary
729 * search may not be totally accurate, so then we scan back
730 * from there looking for occurrences of last_half_cycle before
731 * us. If that backwards scan wraps around the beginning of
732 * the log, then we look for occurrences of last_half_cycle - 1
733 * at the end of the log. The cases we're looking for look
734 * like
735 * v binary search stopped here
736 * x + 1 ... | x | x + 1 | x ... | x
737 * ^ but we want to locate this spot
738 * or
739 * <---------> less than scan distance
740 * x + 1 ... | x ... | x - 1 | x
741 * ^ we want to locate this spot
742 */
743 stop_on_cycle = last_half_cycle;
744 if ((error = xlog_find_cycle_start(log, bp, first_blk,
745 &head_blk, last_half_cycle)))
746 goto bp_err;
747 }
748
749 /*
750 * Now validate the answer. Scan back some number of maximum possible
751 * blocks and make sure each one has the expected cycle number. The
752 * maximum is determined by the total possible amount of buffering
753 * in the in-core log. The following number can be made tighter if
754 * we actually look at the block size of the filesystem.
755 */
756 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
757 if (head_blk >= num_scan_bblks) {
758 /*
759 * We are guaranteed that the entire check can be performed
760 * in one buffer.
761 */
762 start_blk = head_blk - num_scan_bblks;
763 if ((error = xlog_find_verify_cycle(log,
764 start_blk, num_scan_bblks,
765 stop_on_cycle, &new_blk)))
766 goto bp_err;
767 if (new_blk != -1)
768 head_blk = new_blk;
769 } else { /* need to read 2 parts of log */
770 /*
771 * We are going to scan backwards in the log in two parts.
772 * First we scan the physical end of the log. In this part
773 * of the log, we are looking for blocks with cycle number
774 * last_half_cycle - 1.
775 * If we find one, then we know that the log starts there, as
776 * we've found a hole that didn't get written in going around
777 * the end of the physical log. The simple case for this is
778 * x + 1 ... | x ... | x - 1 | x
779 * <---------> less than scan distance
780 * If all of the blocks at the end of the log have cycle number
781 * last_half_cycle, then we check the blocks at the start of
782 * the log looking for occurrences of last_half_cycle. If we
783 * find one, then our current estimate for the location of the
784 * first occurrence of last_half_cycle is wrong and we move
785 * back to the hole we've found. This case looks like
786 * x + 1 ... | x | x + 1 | x ...
787 * ^ binary search stopped here
788 * Another case we need to handle that only occurs in 256k
789 * logs is
790 * x + 1 ... | x ... | x+1 | x ...
791 * ^ binary search stops here
792 * In a 256k log, the scan at the end of the log will see the
793 * x + 1 blocks. We need to skip past those since that is
794 * certainly not the head of the log. By searching for
795 * last_half_cycle-1 we accomplish that.
796 */
797 ASSERT(head_blk <= INT_MAX &&
798 (xfs_daddr_t) num_scan_bblks >= head_blk);
799 start_blk = log_bbnum - (num_scan_bblks - head_blk);
800 if ((error = xlog_find_verify_cycle(log, start_blk,
801 num_scan_bblks - (int)head_blk,
802 (stop_on_cycle - 1), &new_blk)))
803 goto bp_err;
804 if (new_blk != -1) {
805 head_blk = new_blk;
806 goto validate_head;
807 }
808
809 /*
810 * Scan beginning of log now. The last part of the physical
811 * log is good. This scan needs to verify that it doesn't find
812 * the last_half_cycle.
813 */
814 start_blk = 0;
815 ASSERT(head_blk <= INT_MAX);
816 if ((error = xlog_find_verify_cycle(log,
817 start_blk, (int)head_blk,
818 stop_on_cycle, &new_blk)))
819 goto bp_err;
820 if (new_blk != -1)
821 head_blk = new_blk;
822 }
823
824 validate_head:
825 /*
826 * Now we need to make sure head_blk is not pointing to a block in
827 * the middle of a log record.
828 */
829 num_scan_bblks = XLOG_REC_SHIFT(log);
830 if (head_blk >= num_scan_bblks) {
831 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
832
833 /* start ptr at last block ptr before head_blk */
834 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
835 if (error == 1)
836 error = -EIO;
837 if (error)
838 goto bp_err;
839 } else {
840 start_blk = 0;
841 ASSERT(head_blk <= INT_MAX);
842 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
843 if (error < 0)
844 goto bp_err;
845 if (error == 1) {
846 /* We hit the beginning of the log during our search */
847 start_blk = log_bbnum - (num_scan_bblks - head_blk);
848 new_blk = log_bbnum;
849 ASSERT(start_blk <= INT_MAX &&
850 (xfs_daddr_t) log_bbnum-start_blk >= 0);
851 ASSERT(head_blk <= INT_MAX);
852 error = xlog_find_verify_log_record(log, start_blk,
853 &new_blk, (int)head_blk);
854 if (error == 1)
855 error = -EIO;
856 if (error)
857 goto bp_err;
858 if (new_blk != log_bbnum)
859 head_blk = new_blk;
860 } else if (error)
861 goto bp_err;
862 }
863
864 xlog_put_bp(bp);
865 if (head_blk == log_bbnum)
866 *return_head_blk = 0;
867 else
868 *return_head_blk = head_blk;
869 /*
870 * When returning here, we have a good block number. Bad block
871 * means that during a previous crash, we didn't have a clean break
872 * from cycle number N to cycle number N-1. In this case, we need
873 * to find the first block with cycle number N-1.
874 */
875 return 0;
876
877 bp_err:
878 xlog_put_bp(bp);
879
880 if (error)
881 xfs_warn(log->l_mp, "failed to find log head");
882 return error;
883 }
884
885 /*
886 * Seek backwards in the log for log record headers.
887 *
888 * Given a starting log block, walk backwards until we find the provided number
889 * of records or hit the provided tail block. The return value is the number of
890 * records encountered or a negative error code. The log block and buffer
891 * pointer of the last record seen are returned in rblk and rhead respectively.
892 */
893 STATIC int
894 xlog_rseek_logrec_hdr(
895 struct xlog *log,
896 xfs_daddr_t head_blk,
897 xfs_daddr_t tail_blk,
898 int count,
899 struct xfs_buf *bp,
900 xfs_daddr_t *rblk,
901 struct xlog_rec_header **rhead,
902 bool *wrapped)
903 {
904 int i;
905 int error;
906 int found = 0;
907 char *offset = NULL;
908 xfs_daddr_t end_blk;
909
910 *wrapped = false;
911
912 /*
913 * Walk backwards from the head block until we hit the tail or the first
914 * block in the log.
915 */
916 end_blk = head_blk > tail_blk ? tail_blk : 0;
917 for (i = (int) head_blk - 1; i >= end_blk; i--) {
918 error = xlog_bread(log, i, 1, bp, &offset);
919 if (error)
920 goto out_error;
921
922 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
923 *rblk = i;
924 *rhead = (struct xlog_rec_header *) offset;
925 if (++found == count)
926 break;
927 }
928 }
929
930 /*
931 * If we haven't hit the tail block or the log record header count,
932 * start looking again from the end of the physical log. Note that
933 * callers can pass head == tail if the tail is not yet known.
934 */
935 if (tail_blk >= head_blk && found != count) {
936 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
937 error = xlog_bread(log, i, 1, bp, &offset);
938 if (error)
939 goto out_error;
940
941 if (*(__be32 *)offset ==
942 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
943 *wrapped = true;
944 *rblk = i;
945 *rhead = (struct xlog_rec_header *) offset;
946 if (++found == count)
947 break;
948 }
949 }
950 }
951
952 return found;
953
954 out_error:
955 return error;
956 }
957
958 /*
959 * Seek forward in the log for log record headers.
960 *
961 * Given head and tail blocks, walk forward from the tail block until we find
962 * the provided number of records or hit the head block. The return value is the
963 * number of records encountered or a negative error code. The log block and
964 * buffer pointer of the last record seen are returned in rblk and rhead
965 * respectively.
966 */
967 STATIC int
968 xlog_seek_logrec_hdr(
969 struct xlog *log,
970 xfs_daddr_t head_blk,
971 xfs_daddr_t tail_blk,
972 int count,
973 struct xfs_buf *bp,
974 xfs_daddr_t *rblk,
975 struct xlog_rec_header **rhead,
976 bool *wrapped)
977 {
978 int i;
979 int error;
980 int found = 0;
981 char *offset = NULL;
982 xfs_daddr_t end_blk;
983
984 *wrapped = false;
985
986 /*
987 * Walk forward from the tail block until we hit the head or the last
988 * block in the log.
989 */
990 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
991 for (i = (int) tail_blk; i <= end_blk; i++) {
992 error = xlog_bread(log, i, 1, bp, &offset);
993 if (error)
994 goto out_error;
995
996 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
997 *rblk = i;
998 *rhead = (struct xlog_rec_header *) offset;
999 if (++found == count)
1000 break;
1001 }
1002 }
1003
1004 /*
1005 * If we haven't hit the head block or the log record header count,
1006 * start looking again from the start of the physical log.
1007 */
1008 if (tail_blk > head_blk && found != count) {
1009 for (i = 0; i < (int) head_blk; i++) {
1010 error = xlog_bread(log, i, 1, bp, &offset);
1011 if (error)
1012 goto out_error;
1013
1014 if (*(__be32 *)offset ==
1015 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1016 *wrapped = true;
1017 *rblk = i;
1018 *rhead = (struct xlog_rec_header *) offset;
1019 if (++found == count)
1020 break;
1021 }
1022 }
1023 }
1024
1025 return found;
1026
1027 out_error:
1028 return error;
1029 }
1030
1031 /*
1032 * Calculate distance from head to tail (i.e., unused space in the log).
1033 */
1034 static inline int
1035 xlog_tail_distance(
1036 struct xlog *log,
1037 xfs_daddr_t head_blk,
1038 xfs_daddr_t tail_blk)
1039 {
1040 if (head_blk < tail_blk)
1041 return tail_blk - head_blk;
1042
1043 return tail_blk + (log->l_logBBsize - head_blk);
1044 }
1045
1046 /*
1047 * Verify the log tail. This is particularly important when torn or incomplete
1048 * writes have been detected near the front of the log and the head has been
1049 * walked back accordingly.
1050 *
1051 * We also have to handle the case where the tail was pinned and the head
1052 * blocked behind the tail right before a crash. If the tail had been pushed
1053 * immediately prior to the crash and the subsequent checkpoint was only
1054 * partially written, it's possible it overwrote the last referenced tail in the
1055 * log with garbage. This is not a coherency problem because the tail must have
1056 * been pushed before it can be overwritten, but appears as log corruption to
1057 * recovery because we have no way to know the tail was updated if the
1058 * subsequent checkpoint didn't write successfully.
1059 *
1060 * Therefore, CRC check the log from tail to head. If a failure occurs and the
1061 * offending record is within max iclog bufs from the head, walk the tail
1062 * forward and retry until a valid tail is found or corruption is detected out
1063 * of the range of a possible overwrite.
1064 */
1065 STATIC int
1066 xlog_verify_tail(
1067 struct xlog *log,
1068 xfs_daddr_t head_blk,
1069 xfs_daddr_t *tail_blk,
1070 int hsize)
1071 {
1072 struct xlog_rec_header *thead;
1073 struct xfs_buf *bp;
1074 xfs_daddr_t first_bad;
1075 int error = 0;
1076 bool wrapped;
1077 xfs_daddr_t tmp_tail;
1078 xfs_daddr_t orig_tail = *tail_blk;
1079
1080 bp = xlog_get_bp(log, 1);
1081 if (!bp)
1082 return -ENOMEM;
1083
1084 /*
1085 * Make sure the tail points to a record (returns positive count on
1086 * success).
1087 */
1088 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
1089 &tmp_tail, &thead, &wrapped);
1090 if (error < 0)
1091 goto out;
1092 if (*tail_blk != tmp_tail)
1093 *tail_blk = tmp_tail;
1094
1095 /*
1096 * Run a CRC check from the tail to the head. We can't just check
1097 * MAX_ICLOGS records past the tail because the tail may point to stale
1098 * blocks cleared during the search for the head/tail. These blocks are
1099 * overwritten with zero-length records and thus record count is not a
1100 * reliable indicator of the iclog state before a crash.
1101 */
1102 first_bad = 0;
1103 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1104 XLOG_RECOVER_CRCPASS, &first_bad);
1105 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1106 int tail_distance;
1107
1108 /*
1109 * Is corruption within range of the head? If so, retry from
1110 * the next record. Otherwise return an error.
1111 */
1112 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1113 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1114 break;
1115
1116 /* skip to the next record; returns positive count on success */
1117 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
1118 &tmp_tail, &thead, &wrapped);
1119 if (error < 0)
1120 goto out;
1121
1122 *tail_blk = tmp_tail;
1123 first_bad = 0;
1124 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1125 XLOG_RECOVER_CRCPASS, &first_bad);
1126 }
1127
1128 if (!error && *tail_blk != orig_tail)
1129 xfs_warn(log->l_mp,
1130 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1131 orig_tail, *tail_blk);
1132 out:
1133 xlog_put_bp(bp);
1134 return error;
1135 }
1136
1137 /*
1138 * Detect and trim torn writes from the head of the log.
1139 *
1140 * Storage without sector atomicity guarantees can result in torn writes in the
1141 * log in the event of a crash. Our only means to detect this scenario is via
1142 * CRC verification. While we can't always be certain that CRC verification
1143 * failure is due to a torn write vs. an unrelated corruption, we do know that
1144 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1145 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1146 * the log and treat failures in this range as torn writes as a matter of
1147 * policy. In the event of CRC failure, the head is walked back to the last good
1148 * record in the log and the tail is updated from that record and verified.
1149 */
1150 STATIC int
1151 xlog_verify_head(
1152 struct xlog *log,
1153 xfs_daddr_t *head_blk, /* in/out: unverified head */
1154 xfs_daddr_t *tail_blk, /* out: tail block */
1155 struct xfs_buf *bp,
1156 xfs_daddr_t *rhead_blk, /* start blk of last record */
1157 struct xlog_rec_header **rhead, /* ptr to last record */
1158 bool *wrapped) /* last rec. wraps phys. log */
1159 {
1160 struct xlog_rec_header *tmp_rhead;
1161 struct xfs_buf *tmp_bp;
1162 xfs_daddr_t first_bad;
1163 xfs_daddr_t tmp_rhead_blk;
1164 int found;
1165 int error;
1166 bool tmp_wrapped;
1167
1168 /*
1169 * Check the head of the log for torn writes. Search backwards from the
1170 * head until we hit the tail or the maximum number of log record I/Os
1171 * that could have been in flight at one time. Use a temporary buffer so
1172 * we don't trash the rhead/bp pointers from the caller.
1173 */
1174 tmp_bp = xlog_get_bp(log, 1);
1175 if (!tmp_bp)
1176 return -ENOMEM;
1177 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1178 XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1179 &tmp_rhead, &tmp_wrapped);
1180 xlog_put_bp(tmp_bp);
1181 if (error < 0)
1182 return error;
1183
1184 /*
1185 * Now run a CRC verification pass over the records starting at the
1186 * block found above to the current head. If a CRC failure occurs, the
1187 * log block of the first bad record is saved in first_bad.
1188 */
1189 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1190 XLOG_RECOVER_CRCPASS, &first_bad);
1191 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1192 /*
1193 * We've hit a potential torn write. Reset the error and warn
1194 * about it.
1195 */
1196 error = 0;
1197 xfs_warn(log->l_mp,
1198 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1199 first_bad, *head_blk);
1200
1201 /*
1202 * Get the header block and buffer pointer for the last good
1203 * record before the bad record.
1204 *
1205 * Note that xlog_find_tail() clears the blocks at the new head
1206 * (i.e., the records with invalid CRC) if the cycle number
1207 * matches the the current cycle.
1208 */
1209 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1210 rhead_blk, rhead, wrapped);
1211 if (found < 0)
1212 return found;
1213 if (found == 0) /* XXX: right thing to do here? */
1214 return -EIO;
1215
1216 /*
1217 * Reset the head block to the starting block of the first bad
1218 * log record and set the tail block based on the last good
1219 * record.
1220 *
1221 * Bail out if the updated head/tail match as this indicates
1222 * possible corruption outside of the acceptable
1223 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1224 */
1225 *head_blk = first_bad;
1226 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1227 if (*head_blk == *tail_blk) {
1228 ASSERT(0);
1229 return 0;
1230 }
1231 }
1232 if (error)
1233 return error;
1234
1235 return xlog_verify_tail(log, *head_blk, tail_blk,
1236 be32_to_cpu((*rhead)->h_size));
1237 }
1238
1239 /*
1240 * Check whether the head of the log points to an unmount record. In other
1241 * words, determine whether the log is clean. If so, update the in-core state
1242 * appropriately.
1243 */
1244 static int
1245 xlog_check_unmount_rec(
1246 struct xlog *log,
1247 xfs_daddr_t *head_blk,
1248 xfs_daddr_t *tail_blk,
1249 struct xlog_rec_header *rhead,
1250 xfs_daddr_t rhead_blk,
1251 struct xfs_buf *bp,
1252 bool *clean)
1253 {
1254 struct xlog_op_header *op_head;
1255 xfs_daddr_t umount_data_blk;
1256 xfs_daddr_t after_umount_blk;
1257 int hblks;
1258 int error;
1259 char *offset;
1260
1261 *clean = false;
1262
1263 /*
1264 * Look for unmount record. If we find it, then we know there was a
1265 * clean unmount. Since 'i' could be the last block in the physical
1266 * log, we convert to a log block before comparing to the head_blk.
1267 *
1268 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1269 * below. We won't want to clear the unmount record if there is one, so
1270 * we pass the lsn of the unmount record rather than the block after it.
1271 */
1272 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1273 int h_size = be32_to_cpu(rhead->h_size);
1274 int h_version = be32_to_cpu(rhead->h_version);
1275
1276 if ((h_version & XLOG_VERSION_2) &&
1277 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1278 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1279 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1280 hblks++;
1281 } else {
1282 hblks = 1;
1283 }
1284 } else {
1285 hblks = 1;
1286 }
1287 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1288 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1289 if (*head_blk == after_umount_blk &&
1290 be32_to_cpu(rhead->h_num_logops) == 1) {
1291 umount_data_blk = rhead_blk + hblks;
1292 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1293 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1294 if (error)
1295 return error;
1296
1297 op_head = (struct xlog_op_header *)offset;
1298 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1299 /*
1300 * Set tail and last sync so that newly written log
1301 * records will point recovery to after the current
1302 * unmount record.
1303 */
1304 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1305 log->l_curr_cycle, after_umount_blk);
1306 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1307 log->l_curr_cycle, after_umount_blk);
1308 *tail_blk = after_umount_blk;
1309
1310 *clean = true;
1311 }
1312 }
1313
1314 return 0;
1315 }
1316
1317 static void
1318 xlog_set_state(
1319 struct xlog *log,
1320 xfs_daddr_t head_blk,
1321 struct xlog_rec_header *rhead,
1322 xfs_daddr_t rhead_blk,
1323 bool bump_cycle)
1324 {
1325 /*
1326 * Reset log values according to the state of the log when we
1327 * crashed. In the case where head_blk == 0, we bump curr_cycle
1328 * one because the next write starts a new cycle rather than
1329 * continuing the cycle of the last good log record. At this
1330 * point we have guaranteed that all partial log records have been
1331 * accounted for. Therefore, we know that the last good log record
1332 * written was complete and ended exactly on the end boundary
1333 * of the physical log.
1334 */
1335 log->l_prev_block = rhead_blk;
1336 log->l_curr_block = (int)head_blk;
1337 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1338 if (bump_cycle)
1339 log->l_curr_cycle++;
1340 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1341 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1342 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1343 BBTOB(log->l_curr_block));
1344 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1345 BBTOB(log->l_curr_block));
1346 }
1347
1348 /*
1349 * Find the sync block number or the tail of the log.
1350 *
1351 * This will be the block number of the last record to have its
1352 * associated buffers synced to disk. Every log record header has
1353 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1354 * to get a sync block number. The only concern is to figure out which
1355 * log record header to believe.
1356 *
1357 * The following algorithm uses the log record header with the largest
1358 * lsn. The entire log record does not need to be valid. We only care
1359 * that the header is valid.
1360 *
1361 * We could speed up search by using current head_blk buffer, but it is not
1362 * available.
1363 */
1364 STATIC int
1365 xlog_find_tail(
1366 struct xlog *log,
1367 xfs_daddr_t *head_blk,
1368 xfs_daddr_t *tail_blk)
1369 {
1370 xlog_rec_header_t *rhead;
1371 char *offset = NULL;
1372 xfs_buf_t *bp;
1373 int error;
1374 xfs_daddr_t rhead_blk;
1375 xfs_lsn_t tail_lsn;
1376 bool wrapped = false;
1377 bool clean = false;
1378
1379 /*
1380 * Find previous log record
1381 */
1382 if ((error = xlog_find_head(log, head_blk)))
1383 return error;
1384 ASSERT(*head_blk < INT_MAX);
1385
1386 bp = xlog_get_bp(log, 1);
1387 if (!bp)
1388 return -ENOMEM;
1389 if (*head_blk == 0) { /* special case */
1390 error = xlog_bread(log, 0, 1, bp, &offset);
1391 if (error)
1392 goto done;
1393
1394 if (xlog_get_cycle(offset) == 0) {
1395 *tail_blk = 0;
1396 /* leave all other log inited values alone */
1397 goto done;
1398 }
1399 }
1400
1401 /*
1402 * Search backwards through the log looking for the log record header
1403 * block. This wraps all the way back around to the head so something is
1404 * seriously wrong if we can't find it.
1405 */
1406 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1407 &rhead_blk, &rhead, &wrapped);
1408 if (error < 0)
1409 return error;
1410 if (!error) {
1411 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1412 return -EIO;
1413 }
1414 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1415
1416 /*
1417 * Set the log state based on the current head record.
1418 */
1419 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1420 tail_lsn = atomic64_read(&log->l_tail_lsn);
1421
1422 /*
1423 * Look for an unmount record at the head of the log. This sets the log
1424 * state to determine whether recovery is necessary.
1425 */
1426 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1427 rhead_blk, bp, &clean);
1428 if (error)
1429 goto done;
1430
1431 /*
1432 * Verify the log head if the log is not clean (e.g., we have anything
1433 * but an unmount record at the head). This uses CRC verification to
1434 * detect and trim torn writes. If discovered, CRC failures are
1435 * considered torn writes and the log head is trimmed accordingly.
1436 *
1437 * Note that we can only run CRC verification when the log is dirty
1438 * because there's no guarantee that the log data behind an unmount
1439 * record is compatible with the current architecture.
1440 */
1441 if (!clean) {
1442 xfs_daddr_t orig_head = *head_blk;
1443
1444 error = xlog_verify_head(log, head_blk, tail_blk, bp,
1445 &rhead_blk, &rhead, &wrapped);
1446 if (error)
1447 goto done;
1448
1449 /* update in-core state again if the head changed */
1450 if (*head_blk != orig_head) {
1451 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1452 wrapped);
1453 tail_lsn = atomic64_read(&log->l_tail_lsn);
1454 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1455 rhead, rhead_blk, bp,
1456 &clean);
1457 if (error)
1458 goto done;
1459 }
1460 }
1461
1462 /*
1463 * Note that the unmount was clean. If the unmount was not clean, we
1464 * need to know this to rebuild the superblock counters from the perag
1465 * headers if we have a filesystem using non-persistent counters.
1466 */
1467 if (clean)
1468 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1469
1470 /*
1471 * Make sure that there are no blocks in front of the head
1472 * with the same cycle number as the head. This can happen
1473 * because we allow multiple outstanding log writes concurrently,
1474 * and the later writes might make it out before earlier ones.
1475 *
1476 * We use the lsn from before modifying it so that we'll never
1477 * overwrite the unmount record after a clean unmount.
1478 *
1479 * Do this only if we are going to recover the filesystem
1480 *
1481 * NOTE: This used to say "if (!readonly)"
1482 * However on Linux, we can & do recover a read-only filesystem.
1483 * We only skip recovery if NORECOVERY is specified on mount,
1484 * in which case we would not be here.
1485 *
1486 * But... if the -device- itself is readonly, just skip this.
1487 * We can't recover this device anyway, so it won't matter.
1488 */
1489 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1490 error = xlog_clear_stale_blocks(log, tail_lsn);
1491
1492 done:
1493 xlog_put_bp(bp);
1494
1495 if (error)
1496 xfs_warn(log->l_mp, "failed to locate log tail");
1497 return error;
1498 }
1499
1500 /*
1501 * Is the log zeroed at all?
1502 *
1503 * The last binary search should be changed to perform an X block read
1504 * once X becomes small enough. You can then search linearly through
1505 * the X blocks. This will cut down on the number of reads we need to do.
1506 *
1507 * If the log is partially zeroed, this routine will pass back the blkno
1508 * of the first block with cycle number 0. It won't have a complete LR
1509 * preceding it.
1510 *
1511 * Return:
1512 * 0 => the log is completely written to
1513 * 1 => use *blk_no as the first block of the log
1514 * <0 => error has occurred
1515 */
1516 STATIC int
1517 xlog_find_zeroed(
1518 struct xlog *log,
1519 xfs_daddr_t *blk_no)
1520 {
1521 xfs_buf_t *bp;
1522 char *offset;
1523 uint first_cycle, last_cycle;
1524 xfs_daddr_t new_blk, last_blk, start_blk;
1525 xfs_daddr_t num_scan_bblks;
1526 int error, log_bbnum = log->l_logBBsize;
1527
1528 *blk_no = 0;
1529
1530 /* check totally zeroed log */
1531 bp = xlog_get_bp(log, 1);
1532 if (!bp)
1533 return -ENOMEM;
1534 error = xlog_bread(log, 0, 1, bp, &offset);
1535 if (error)
1536 goto bp_err;
1537
1538 first_cycle = xlog_get_cycle(offset);
1539 if (first_cycle == 0) { /* completely zeroed log */
1540 *blk_no = 0;
1541 xlog_put_bp(bp);
1542 return 1;
1543 }
1544
1545 /* check partially zeroed log */
1546 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1547 if (error)
1548 goto bp_err;
1549
1550 last_cycle = xlog_get_cycle(offset);
1551 if (last_cycle != 0) { /* log completely written to */
1552 xlog_put_bp(bp);
1553 return 0;
1554 } else if (first_cycle != 1) {
1555 /*
1556 * If the cycle of the last block is zero, the cycle of
1557 * the first block must be 1. If it's not, maybe we're
1558 * not looking at a log... Bail out.
1559 */
1560 xfs_warn(log->l_mp,
1561 "Log inconsistent or not a log (last==0, first!=1)");
1562 error = -EINVAL;
1563 goto bp_err;
1564 }
1565
1566 /* we have a partially zeroed log */
1567 last_blk = log_bbnum-1;
1568 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1569 goto bp_err;
1570
1571 /*
1572 * Validate the answer. Because there is no way to guarantee that
1573 * the entire log is made up of log records which are the same size,
1574 * we scan over the defined maximum blocks. At this point, the maximum
1575 * is not chosen to mean anything special. XXXmiken
1576 */
1577 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1578 ASSERT(num_scan_bblks <= INT_MAX);
1579
1580 if (last_blk < num_scan_bblks)
1581 num_scan_bblks = last_blk;
1582 start_blk = last_blk - num_scan_bblks;
1583
1584 /*
1585 * We search for any instances of cycle number 0 that occur before
1586 * our current estimate of the head. What we're trying to detect is
1587 * 1 ... | 0 | 1 | 0...
1588 * ^ binary search ends here
1589 */
1590 if ((error = xlog_find_verify_cycle(log, start_blk,
1591 (int)num_scan_bblks, 0, &new_blk)))
1592 goto bp_err;
1593 if (new_blk != -1)
1594 last_blk = new_blk;
1595
1596 /*
1597 * Potentially backup over partial log record write. We don't need
1598 * to search the end of the log because we know it is zero.
1599 */
1600 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1601 if (error == 1)
1602 error = -EIO;
1603 if (error)
1604 goto bp_err;
1605
1606 *blk_no = last_blk;
1607 bp_err:
1608 xlog_put_bp(bp);
1609 if (error)
1610 return error;
1611 return 1;
1612 }
1613
1614 /*
1615 * These are simple subroutines used by xlog_clear_stale_blocks() below
1616 * to initialize a buffer full of empty log record headers and write
1617 * them into the log.
1618 */
1619 STATIC void
1620 xlog_add_record(
1621 struct xlog *log,
1622 char *buf,
1623 int cycle,
1624 int block,
1625 int tail_cycle,
1626 int tail_block)
1627 {
1628 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1629
1630 memset(buf, 0, BBSIZE);
1631 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1632 recp->h_cycle = cpu_to_be32(cycle);
1633 recp->h_version = cpu_to_be32(
1634 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1635 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1636 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1637 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1638 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1639 }
1640
1641 STATIC int
1642 xlog_write_log_records(
1643 struct xlog *log,
1644 int cycle,
1645 int start_block,
1646 int blocks,
1647 int tail_cycle,
1648 int tail_block)
1649 {
1650 char *offset;
1651 xfs_buf_t *bp;
1652 int balign, ealign;
1653 int sectbb = log->l_sectBBsize;
1654 int end_block = start_block + blocks;
1655 int bufblks;
1656 int error = 0;
1657 int i, j = 0;
1658
1659 /*
1660 * Greedily allocate a buffer big enough to handle the full
1661 * range of basic blocks to be written. If that fails, try
1662 * a smaller size. We need to be able to write at least a
1663 * log sector, or we're out of luck.
1664 */
1665 bufblks = 1 << ffs(blocks);
1666 while (bufblks > log->l_logBBsize)
1667 bufblks >>= 1;
1668 while (!(bp = xlog_get_bp(log, bufblks))) {
1669 bufblks >>= 1;
1670 if (bufblks < sectbb)
1671 return -ENOMEM;
1672 }
1673
1674 /* We may need to do a read at the start to fill in part of
1675 * the buffer in the starting sector not covered by the first
1676 * write below.
1677 */
1678 balign = round_down(start_block, sectbb);
1679 if (balign != start_block) {
1680 error = xlog_bread_noalign(log, start_block, 1, bp);
1681 if (error)
1682 goto out_put_bp;
1683
1684 j = start_block - balign;
1685 }
1686
1687 for (i = start_block; i < end_block; i += bufblks) {
1688 int bcount, endcount;
1689
1690 bcount = min(bufblks, end_block - start_block);
1691 endcount = bcount - j;
1692
1693 /* We may need to do a read at the end to fill in part of
1694 * the buffer in the final sector not covered by the write.
1695 * If this is the same sector as the above read, skip it.
1696 */
1697 ealign = round_down(end_block, sectbb);
1698 if (j == 0 && (start_block + endcount > ealign)) {
1699 offset = bp->b_addr + BBTOB(ealign - start_block);
1700 error = xlog_bread_offset(log, ealign, sectbb,
1701 bp, offset);
1702 if (error)
1703 break;
1704
1705 }
1706
1707 offset = xlog_align(log, start_block, endcount, bp);
1708 for (; j < endcount; j++) {
1709 xlog_add_record(log, offset, cycle, i+j,
1710 tail_cycle, tail_block);
1711 offset += BBSIZE;
1712 }
1713 error = xlog_bwrite(log, start_block, endcount, bp);
1714 if (error)
1715 break;
1716 start_block += endcount;
1717 j = 0;
1718 }
1719
1720 out_put_bp:
1721 xlog_put_bp(bp);
1722 return error;
1723 }
1724
1725 /*
1726 * This routine is called to blow away any incomplete log writes out
1727 * in front of the log head. We do this so that we won't become confused
1728 * if we come up, write only a little bit more, and then crash again.
1729 * If we leave the partial log records out there, this situation could
1730 * cause us to think those partial writes are valid blocks since they
1731 * have the current cycle number. We get rid of them by overwriting them
1732 * with empty log records with the old cycle number rather than the
1733 * current one.
1734 *
1735 * The tail lsn is passed in rather than taken from
1736 * the log so that we will not write over the unmount record after a
1737 * clean unmount in a 512 block log. Doing so would leave the log without
1738 * any valid log records in it until a new one was written. If we crashed
1739 * during that time we would not be able to recover.
1740 */
1741 STATIC int
1742 xlog_clear_stale_blocks(
1743 struct xlog *log,
1744 xfs_lsn_t tail_lsn)
1745 {
1746 int tail_cycle, head_cycle;
1747 int tail_block, head_block;
1748 int tail_distance, max_distance;
1749 int distance;
1750 int error;
1751
1752 tail_cycle = CYCLE_LSN(tail_lsn);
1753 tail_block = BLOCK_LSN(tail_lsn);
1754 head_cycle = log->l_curr_cycle;
1755 head_block = log->l_curr_block;
1756
1757 /*
1758 * Figure out the distance between the new head of the log
1759 * and the tail. We want to write over any blocks beyond the
1760 * head that we may have written just before the crash, but
1761 * we don't want to overwrite the tail of the log.
1762 */
1763 if (head_cycle == tail_cycle) {
1764 /*
1765 * The tail is behind the head in the physical log,
1766 * so the distance from the head to the tail is the
1767 * distance from the head to the end of the log plus
1768 * the distance from the beginning of the log to the
1769 * tail.
1770 */
1771 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1772 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1773 XFS_ERRLEVEL_LOW, log->l_mp);
1774 return -EFSCORRUPTED;
1775 }
1776 tail_distance = tail_block + (log->l_logBBsize - head_block);
1777 } else {
1778 /*
1779 * The head is behind the tail in the physical log,
1780 * so the distance from the head to the tail is just
1781 * the tail block minus the head block.
1782 */
1783 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1784 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1785 XFS_ERRLEVEL_LOW, log->l_mp);
1786 return -EFSCORRUPTED;
1787 }
1788 tail_distance = tail_block - head_block;
1789 }
1790
1791 /*
1792 * If the head is right up against the tail, we can't clear
1793 * anything.
1794 */
1795 if (tail_distance <= 0) {
1796 ASSERT(tail_distance == 0);
1797 return 0;
1798 }
1799
1800 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1801 /*
1802 * Take the smaller of the maximum amount of outstanding I/O
1803 * we could have and the distance to the tail to clear out.
1804 * We take the smaller so that we don't overwrite the tail and
1805 * we don't waste all day writing from the head to the tail
1806 * for no reason.
1807 */
1808 max_distance = MIN(max_distance, tail_distance);
1809
1810 if ((head_block + max_distance) <= log->l_logBBsize) {
1811 /*
1812 * We can stomp all the blocks we need to without
1813 * wrapping around the end of the log. Just do it
1814 * in a single write. Use the cycle number of the
1815 * current cycle minus one so that the log will look like:
1816 * n ... | n - 1 ...
1817 */
1818 error = xlog_write_log_records(log, (head_cycle - 1),
1819 head_block, max_distance, tail_cycle,
1820 tail_block);
1821 if (error)
1822 return error;
1823 } else {
1824 /*
1825 * We need to wrap around the end of the physical log in
1826 * order to clear all the blocks. Do it in two separate
1827 * I/Os. The first write should be from the head to the
1828 * end of the physical log, and it should use the current
1829 * cycle number minus one just like above.
1830 */
1831 distance = log->l_logBBsize - head_block;
1832 error = xlog_write_log_records(log, (head_cycle - 1),
1833 head_block, distance, tail_cycle,
1834 tail_block);
1835
1836 if (error)
1837 return error;
1838
1839 /*
1840 * Now write the blocks at the start of the physical log.
1841 * This writes the remainder of the blocks we want to clear.
1842 * It uses the current cycle number since we're now on the
1843 * same cycle as the head so that we get:
1844 * n ... n ... | n - 1 ...
1845 * ^^^^^ blocks we're writing
1846 */
1847 distance = max_distance - (log->l_logBBsize - head_block);
1848 error = xlog_write_log_records(log, head_cycle, 0, distance,
1849 tail_cycle, tail_block);
1850 if (error)
1851 return error;
1852 }
1853
1854 return 0;
1855 }
1856
1857 /******************************************************************************
1858 *
1859 * Log recover routines
1860 *
1861 ******************************************************************************
1862 */
1863
1864 /*
1865 * Sort the log items in the transaction.
1866 *
1867 * The ordering constraints are defined by the inode allocation and unlink
1868 * behaviour. The rules are:
1869 *
1870 * 1. Every item is only logged once in a given transaction. Hence it
1871 * represents the last logged state of the item. Hence ordering is
1872 * dependent on the order in which operations need to be performed so
1873 * required initial conditions are always met.
1874 *
1875 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1876 * there's nothing to replay from them so we can simply cull them
1877 * from the transaction. However, we can't do that until after we've
1878 * replayed all the other items because they may be dependent on the
1879 * cancelled buffer and replaying the cancelled buffer can remove it
1880 * form the cancelled buffer table. Hence they have tobe done last.
1881 *
1882 * 3. Inode allocation buffers must be replayed before inode items that
1883 * read the buffer and replay changes into it. For filesystems using the
1884 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1885 * treated the same as inode allocation buffers as they create and
1886 * initialise the buffers directly.
1887 *
1888 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1889 * This ensures that inodes are completely flushed to the inode buffer
1890 * in a "free" state before we remove the unlinked inode list pointer.
1891 *
1892 * Hence the ordering needs to be inode allocation buffers first, inode items
1893 * second, inode unlink buffers third and cancelled buffers last.
1894 *
1895 * But there's a problem with that - we can't tell an inode allocation buffer
1896 * apart from a regular buffer, so we can't separate them. We can, however,
1897 * tell an inode unlink buffer from the others, and so we can separate them out
1898 * from all the other buffers and move them to last.
1899 *
1900 * Hence, 4 lists, in order from head to tail:
1901 * - buffer_list for all buffers except cancelled/inode unlink buffers
1902 * - item_list for all non-buffer items
1903 * - inode_buffer_list for inode unlink buffers
1904 * - cancel_list for the cancelled buffers
1905 *
1906 * Note that we add objects to the tail of the lists so that first-to-last
1907 * ordering is preserved within the lists. Adding objects to the head of the
1908 * list means when we traverse from the head we walk them in last-to-first
1909 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1910 * but for all other items there may be specific ordering that we need to
1911 * preserve.
1912 */
1913 STATIC int
1914 xlog_recover_reorder_trans(
1915 struct xlog *log,
1916 struct xlog_recover *trans,
1917 int pass)
1918 {
1919 xlog_recover_item_t *item, *n;
1920 int error = 0;
1921 LIST_HEAD(sort_list);
1922 LIST_HEAD(cancel_list);
1923 LIST_HEAD(buffer_list);
1924 LIST_HEAD(inode_buffer_list);
1925 LIST_HEAD(inode_list);
1926
1927 list_splice_init(&trans->r_itemq, &sort_list);
1928 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1929 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1930
1931 switch (ITEM_TYPE(item)) {
1932 case XFS_LI_ICREATE:
1933 list_move_tail(&item->ri_list, &buffer_list);
1934 break;
1935 case XFS_LI_BUF:
1936 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1937 trace_xfs_log_recover_item_reorder_head(log,
1938 trans, item, pass);
1939 list_move(&item->ri_list, &cancel_list);
1940 break;
1941 }
1942 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1943 list_move(&item->ri_list, &inode_buffer_list);
1944 break;
1945 }
1946 list_move_tail(&item->ri_list, &buffer_list);
1947 break;
1948 case XFS_LI_INODE:
1949 case XFS_LI_DQUOT:
1950 case XFS_LI_QUOTAOFF:
1951 case XFS_LI_EFD:
1952 case XFS_LI_EFI:
1953 case XFS_LI_RUI:
1954 case XFS_LI_RUD:
1955 case XFS_LI_CUI:
1956 case XFS_LI_CUD:
1957 case XFS_LI_BUI:
1958 case XFS_LI_BUD:
1959 trace_xfs_log_recover_item_reorder_tail(log,
1960 trans, item, pass);
1961 list_move_tail(&item->ri_list, &inode_list);
1962 break;
1963 default:
1964 xfs_warn(log->l_mp,
1965 "%s: unrecognized type of log operation",
1966 __func__);
1967 ASSERT(0);
1968 /*
1969 * return the remaining items back to the transaction
1970 * item list so they can be freed in caller.
1971 */
1972 if (!list_empty(&sort_list))
1973 list_splice_init(&sort_list, &trans->r_itemq);
1974 error = -EIO;
1975 goto out;
1976 }
1977 }
1978 out:
1979 ASSERT(list_empty(&sort_list));
1980 if (!list_empty(&buffer_list))
1981 list_splice(&buffer_list, &trans->r_itemq);
1982 if (!list_empty(&inode_list))
1983 list_splice_tail(&inode_list, &trans->r_itemq);
1984 if (!list_empty(&inode_buffer_list))
1985 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1986 if (!list_empty(&cancel_list))
1987 list_splice_tail(&cancel_list, &trans->r_itemq);
1988 return error;
1989 }
1990
1991 /*
1992 * Build up the table of buf cancel records so that we don't replay
1993 * cancelled data in the second pass. For buffer records that are
1994 * not cancel records, there is nothing to do here so we just return.
1995 *
1996 * If we get a cancel record which is already in the table, this indicates
1997 * that the buffer was cancelled multiple times. In order to ensure
1998 * that during pass 2 we keep the record in the table until we reach its
1999 * last occurrence in the log, we keep a reference count in the cancel
2000 * record in the table to tell us how many times we expect to see this
2001 * record during the second pass.
2002 */
2003 STATIC int
2004 xlog_recover_buffer_pass1(
2005 struct xlog *log,
2006 struct xlog_recover_item *item)
2007 {
2008 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2009 struct list_head *bucket;
2010 struct xfs_buf_cancel *bcp;
2011
2012 /*
2013 * If this isn't a cancel buffer item, then just return.
2014 */
2015 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
2016 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
2017 return 0;
2018 }
2019
2020 /*
2021 * Insert an xfs_buf_cancel record into the hash table of them.
2022 * If there is already an identical record, bump its reference count.
2023 */
2024 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
2025 list_for_each_entry(bcp, bucket, bc_list) {
2026 if (bcp->bc_blkno == buf_f->blf_blkno &&
2027 bcp->bc_len == buf_f->blf_len) {
2028 bcp->bc_refcount++;
2029 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
2030 return 0;
2031 }
2032 }
2033
2034 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
2035 bcp->bc_blkno = buf_f->blf_blkno;
2036 bcp->bc_len = buf_f->blf_len;
2037 bcp->bc_refcount = 1;
2038 list_add_tail(&bcp->bc_list, bucket);
2039
2040 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
2041 return 0;
2042 }
2043
2044 /*
2045 * Check to see whether the buffer being recovered has a corresponding
2046 * entry in the buffer cancel record table. If it is, return the cancel
2047 * buffer structure to the caller.
2048 */
2049 STATIC struct xfs_buf_cancel *
2050 xlog_peek_buffer_cancelled(
2051 struct xlog *log,
2052 xfs_daddr_t blkno,
2053 uint len,
2054 unsigned short flags)
2055 {
2056 struct list_head *bucket;
2057 struct xfs_buf_cancel *bcp;
2058
2059 if (!log->l_buf_cancel_table) {
2060 /* empty table means no cancelled buffers in the log */
2061 ASSERT(!(flags & XFS_BLF_CANCEL));
2062 return NULL;
2063 }
2064
2065 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2066 list_for_each_entry(bcp, bucket, bc_list) {
2067 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2068 return bcp;
2069 }
2070
2071 /*
2072 * We didn't find a corresponding entry in the table, so return 0 so
2073 * that the buffer is NOT cancelled.
2074 */
2075 ASSERT(!(flags & XFS_BLF_CANCEL));
2076 return NULL;
2077 }
2078
2079 /*
2080 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2081 * otherwise return 0. If the buffer is actually a buffer cancel item
2082 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2083 * table and remove it from the table if this is the last reference.
2084 *
2085 * We remove the cancel record from the table when we encounter its last
2086 * occurrence in the log so that if the same buffer is re-used again after its
2087 * last cancellation we actually replay the changes made at that point.
2088 */
2089 STATIC int
2090 xlog_check_buffer_cancelled(
2091 struct xlog *log,
2092 xfs_daddr_t blkno,
2093 uint len,
2094 unsigned short flags)
2095 {
2096 struct xfs_buf_cancel *bcp;
2097
2098 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2099 if (!bcp)
2100 return 0;
2101
2102 /*
2103 * We've go a match, so return 1 so that the recovery of this buffer
2104 * is cancelled. If this buffer is actually a buffer cancel log
2105 * item, then decrement the refcount on the one in the table and
2106 * remove it if this is the last reference.
2107 */
2108 if (flags & XFS_BLF_CANCEL) {
2109 if (--bcp->bc_refcount == 0) {
2110 list_del(&bcp->bc_list);
2111 kmem_free(bcp);
2112 }
2113 }
2114 return 1;
2115 }
2116
2117 /*
2118 * Perform recovery for a buffer full of inodes. In these buffers, the only
2119 * data which should be recovered is that which corresponds to the
2120 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2121 * data for the inodes is always logged through the inodes themselves rather
2122 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2123 *
2124 * The only time when buffers full of inodes are fully recovered is when the
2125 * buffer is full of newly allocated inodes. In this case the buffer will
2126 * not be marked as an inode buffer and so will be sent to
2127 * xlog_recover_do_reg_buffer() below during recovery.
2128 */
2129 STATIC int
2130 xlog_recover_do_inode_buffer(
2131 struct xfs_mount *mp,
2132 xlog_recover_item_t *item,
2133 struct xfs_buf *bp,
2134 xfs_buf_log_format_t *buf_f)
2135 {
2136 int i;
2137 int item_index = 0;
2138 int bit = 0;
2139 int nbits = 0;
2140 int reg_buf_offset = 0;
2141 int reg_buf_bytes = 0;
2142 int next_unlinked_offset;
2143 int inodes_per_buf;
2144 xfs_agino_t *logged_nextp;
2145 xfs_agino_t *buffer_nextp;
2146
2147 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2148
2149 /*
2150 * Post recovery validation only works properly on CRC enabled
2151 * filesystems.
2152 */
2153 if (xfs_sb_version_hascrc(&mp->m_sb))
2154 bp->b_ops = &xfs_inode_buf_ops;
2155
2156 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2157 for (i = 0; i < inodes_per_buf; i++) {
2158 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2159 offsetof(xfs_dinode_t, di_next_unlinked);
2160
2161 while (next_unlinked_offset >=
2162 (reg_buf_offset + reg_buf_bytes)) {
2163 /*
2164 * The next di_next_unlinked field is beyond
2165 * the current logged region. Find the next
2166 * logged region that contains or is beyond
2167 * the current di_next_unlinked field.
2168 */
2169 bit += nbits;
2170 bit = xfs_next_bit(buf_f->blf_data_map,
2171 buf_f->blf_map_size, bit);
2172
2173 /*
2174 * If there are no more logged regions in the
2175 * buffer, then we're done.
2176 */
2177 if (bit == -1)
2178 return 0;
2179
2180 nbits = xfs_contig_bits(buf_f->blf_data_map,
2181 buf_f->blf_map_size, bit);
2182 ASSERT(nbits > 0);
2183 reg_buf_offset = bit << XFS_BLF_SHIFT;
2184 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2185 item_index++;
2186 }
2187
2188 /*
2189 * If the current logged region starts after the current
2190 * di_next_unlinked field, then move on to the next
2191 * di_next_unlinked field.
2192 */
2193 if (next_unlinked_offset < reg_buf_offset)
2194 continue;
2195
2196 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2197 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2198 ASSERT((reg_buf_offset + reg_buf_bytes) <=
2199 BBTOB(bp->b_io_length));
2200
2201 /*
2202 * The current logged region contains a copy of the
2203 * current di_next_unlinked field. Extract its value
2204 * and copy it to the buffer copy.
2205 */
2206 logged_nextp = item->ri_buf[item_index].i_addr +
2207 next_unlinked_offset - reg_buf_offset;
2208 if (unlikely(*logged_nextp == 0)) {
2209 xfs_alert(mp,
2210 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2211 "Trying to replay bad (0) inode di_next_unlinked field.",
2212 item, bp);
2213 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2214 XFS_ERRLEVEL_LOW, mp);
2215 return -EFSCORRUPTED;
2216 }
2217
2218 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2219 *buffer_nextp = *logged_nextp;
2220
2221 /*
2222 * If necessary, recalculate the CRC in the on-disk inode. We
2223 * have to leave the inode in a consistent state for whoever
2224 * reads it next....
2225 */
2226 xfs_dinode_calc_crc(mp,
2227 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2228
2229 }
2230
2231 return 0;
2232 }
2233
2234 /*
2235 * V5 filesystems know the age of the buffer on disk being recovered. We can
2236 * have newer objects on disk than we are replaying, and so for these cases we
2237 * don't want to replay the current change as that will make the buffer contents
2238 * temporarily invalid on disk.
2239 *
2240 * The magic number might not match the buffer type we are going to recover
2241 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2242 * extract the LSN of the existing object in the buffer based on it's current
2243 * magic number. If we don't recognise the magic number in the buffer, then
2244 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2245 * so can recover the buffer.
2246 *
2247 * Note: we cannot rely solely on magic number matches to determine that the
2248 * buffer has a valid LSN - we also need to verify that it belongs to this
2249 * filesystem, so we need to extract the object's LSN and compare it to that
2250 * which we read from the superblock. If the UUIDs don't match, then we've got a
2251 * stale metadata block from an old filesystem instance that we need to recover
2252 * over the top of.
2253 */
2254 static xfs_lsn_t
2255 xlog_recover_get_buf_lsn(
2256 struct xfs_mount *mp,
2257 struct xfs_buf *bp)
2258 {
2259 uint32_t magic32;
2260 uint16_t magic16;
2261 uint16_t magicda;
2262 void *blk = bp->b_addr;
2263 uuid_t *uuid;
2264 xfs_lsn_t lsn = -1;
2265
2266 /* v4 filesystems always recover immediately */
2267 if (!xfs_sb_version_hascrc(&mp->m_sb))
2268 goto recover_immediately;
2269
2270 magic32 = be32_to_cpu(*(__be32 *)blk);
2271 switch (magic32) {
2272 case XFS_ABTB_CRC_MAGIC:
2273 case XFS_ABTC_CRC_MAGIC:
2274 case XFS_ABTB_MAGIC:
2275 case XFS_ABTC_MAGIC:
2276 case XFS_RMAP_CRC_MAGIC:
2277 case XFS_REFC_CRC_MAGIC:
2278 case XFS_IBT_CRC_MAGIC:
2279 case XFS_IBT_MAGIC: {
2280 struct xfs_btree_block *btb = blk;
2281
2282 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2283 uuid = &btb->bb_u.s.bb_uuid;
2284 break;
2285 }
2286 case XFS_BMAP_CRC_MAGIC:
2287 case XFS_BMAP_MAGIC: {
2288 struct xfs_btree_block *btb = blk;
2289
2290 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2291 uuid = &btb->bb_u.l.bb_uuid;
2292 break;
2293 }
2294 case XFS_AGF_MAGIC:
2295 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2296 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2297 break;
2298 case XFS_AGFL_MAGIC:
2299 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2300 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2301 break;
2302 case XFS_AGI_MAGIC:
2303 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2304 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2305 break;
2306 case XFS_SYMLINK_MAGIC:
2307 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2308 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2309 break;
2310 case XFS_DIR3_BLOCK_MAGIC:
2311 case XFS_DIR3_DATA_MAGIC:
2312 case XFS_DIR3_FREE_MAGIC:
2313 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2314 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2315 break;
2316 case XFS_ATTR3_RMT_MAGIC:
2317 /*
2318 * Remote attr blocks are written synchronously, rather than
2319 * being logged. That means they do not contain a valid LSN
2320 * (i.e. transactionally ordered) in them, and hence any time we
2321 * see a buffer to replay over the top of a remote attribute
2322 * block we should simply do so.
2323 */
2324 goto recover_immediately;
2325 case XFS_SB_MAGIC:
2326 /*
2327 * superblock uuids are magic. We may or may not have a
2328 * sb_meta_uuid on disk, but it will be set in the in-core
2329 * superblock. We set the uuid pointer for verification
2330 * according to the superblock feature mask to ensure we check
2331 * the relevant UUID in the superblock.
2332 */
2333 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2334 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2335 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2336 else
2337 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2338 break;
2339 default:
2340 break;
2341 }
2342
2343 if (lsn != (xfs_lsn_t)-1) {
2344 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2345 goto recover_immediately;
2346 return lsn;
2347 }
2348
2349 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2350 switch (magicda) {
2351 case XFS_DIR3_LEAF1_MAGIC:
2352 case XFS_DIR3_LEAFN_MAGIC:
2353 case XFS_DA3_NODE_MAGIC:
2354 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2355 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2356 break;
2357 default:
2358 break;
2359 }
2360
2361 if (lsn != (xfs_lsn_t)-1) {
2362 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2363 goto recover_immediately;
2364 return lsn;
2365 }
2366
2367 /*
2368 * We do individual object checks on dquot and inode buffers as they
2369 * have their own individual LSN records. Also, we could have a stale
2370 * buffer here, so we have to at least recognise these buffer types.
2371 *
2372 * A notd complexity here is inode unlinked list processing - it logs
2373 * the inode directly in the buffer, but we don't know which inodes have
2374 * been modified, and there is no global buffer LSN. Hence we need to
2375 * recover all inode buffer types immediately. This problem will be
2376 * fixed by logical logging of the unlinked list modifications.
2377 */
2378 magic16 = be16_to_cpu(*(__be16 *)blk);
2379 switch (magic16) {
2380 case XFS_DQUOT_MAGIC:
2381 case XFS_DINODE_MAGIC:
2382 goto recover_immediately;
2383 default:
2384 break;
2385 }
2386
2387 /* unknown buffer contents, recover immediately */
2388
2389 recover_immediately:
2390 return (xfs_lsn_t)-1;
2391
2392 }
2393
2394 /*
2395 * Validate the recovered buffer is of the correct type and attach the
2396 * appropriate buffer operations to them for writeback. Magic numbers are in a
2397 * few places:
2398 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2399 * the first 32 bits of the buffer (most blocks),
2400 * inside a struct xfs_da_blkinfo at the start of the buffer.
2401 */
2402 static void
2403 xlog_recover_validate_buf_type(
2404 struct xfs_mount *mp,
2405 struct xfs_buf *bp,
2406 xfs_buf_log_format_t *buf_f,
2407 xfs_lsn_t current_lsn)
2408 {
2409 struct xfs_da_blkinfo *info = bp->b_addr;
2410 uint32_t magic32;
2411 uint16_t magic16;
2412 uint16_t magicda;
2413 char *warnmsg = NULL;
2414
2415 /*
2416 * We can only do post recovery validation on items on CRC enabled
2417 * fielsystems as we need to know when the buffer was written to be able
2418 * to determine if we should have replayed the item. If we replay old
2419 * metadata over a newer buffer, then it will enter a temporarily
2420 * inconsistent state resulting in verification failures. Hence for now
2421 * just avoid the verification stage for non-crc filesystems
2422 */
2423 if (!xfs_sb_version_hascrc(&mp->m_sb))
2424 return;
2425
2426 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2427 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2428 magicda = be16_to_cpu(info->magic);
2429 switch (xfs_blft_from_flags(buf_f)) {
2430 case XFS_BLFT_BTREE_BUF:
2431 switch (magic32) {
2432 case XFS_ABTB_CRC_MAGIC:
2433 case XFS_ABTC_CRC_MAGIC:
2434 case XFS_ABTB_MAGIC:
2435 case XFS_ABTC_MAGIC:
2436 bp->b_ops = &xfs_allocbt_buf_ops;
2437 break;
2438 case XFS_IBT_CRC_MAGIC:
2439 case XFS_FIBT_CRC_MAGIC:
2440 case XFS_IBT_MAGIC:
2441 case XFS_FIBT_MAGIC:
2442 bp->b_ops = &xfs_inobt_buf_ops;
2443 break;
2444 case XFS_BMAP_CRC_MAGIC:
2445 case XFS_BMAP_MAGIC:
2446 bp->b_ops = &xfs_bmbt_buf_ops;
2447 break;
2448 case XFS_RMAP_CRC_MAGIC:
2449 bp->b_ops = &xfs_rmapbt_buf_ops;
2450 break;
2451 case XFS_REFC_CRC_MAGIC:
2452 bp->b_ops = &xfs_refcountbt_buf_ops;
2453 break;
2454 default:
2455 warnmsg = "Bad btree block magic!";
2456 break;
2457 }
2458 break;
2459 case XFS_BLFT_AGF_BUF:
2460 if (magic32 != XFS_AGF_MAGIC) {
2461 warnmsg = "Bad AGF block magic!";
2462 break;
2463 }
2464 bp->b_ops = &xfs_agf_buf_ops;
2465 break;
2466 case XFS_BLFT_AGFL_BUF:
2467 if (magic32 != XFS_AGFL_MAGIC) {
2468 warnmsg = "Bad AGFL block magic!";
2469 break;
2470 }
2471 bp->b_ops = &xfs_agfl_buf_ops;
2472 break;
2473 case XFS_BLFT_AGI_BUF:
2474 if (magic32 != XFS_AGI_MAGIC) {
2475 warnmsg = "Bad AGI block magic!";
2476 break;
2477 }
2478 bp->b_ops = &xfs_agi_buf_ops;
2479 break;
2480 case XFS_BLFT_UDQUOT_BUF:
2481 case XFS_BLFT_PDQUOT_BUF:
2482 case XFS_BLFT_GDQUOT_BUF:
2483 #ifdef CONFIG_XFS_QUOTA
2484 if (magic16 != XFS_DQUOT_MAGIC) {
2485 warnmsg = "Bad DQUOT block magic!";
2486 break;
2487 }
2488 bp->b_ops = &xfs_dquot_buf_ops;
2489 #else
2490 xfs_alert(mp,
2491 "Trying to recover dquots without QUOTA support built in!");
2492 ASSERT(0);
2493 #endif
2494 break;
2495 case XFS_BLFT_DINO_BUF:
2496 if (magic16 != XFS_DINODE_MAGIC) {
2497 warnmsg = "Bad INODE block magic!";
2498 break;
2499 }
2500 bp->b_ops = &xfs_inode_buf_ops;
2501 break;
2502 case XFS_BLFT_SYMLINK_BUF:
2503 if (magic32 != XFS_SYMLINK_MAGIC) {
2504 warnmsg = "Bad symlink block magic!";
2505 break;
2506 }
2507 bp->b_ops = &xfs_symlink_buf_ops;
2508 break;
2509 case XFS_BLFT_DIR_BLOCK_BUF:
2510 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2511 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2512 warnmsg = "Bad dir block magic!";
2513 break;
2514 }
2515 bp->b_ops = &xfs_dir3_block_buf_ops;
2516 break;
2517 case XFS_BLFT_DIR_DATA_BUF:
2518 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2519 magic32 != XFS_DIR3_DATA_MAGIC) {
2520 warnmsg = "Bad dir data magic!";
2521 break;
2522 }
2523 bp->b_ops = &xfs_dir3_data_buf_ops;
2524 break;
2525 case XFS_BLFT_DIR_FREE_BUF:
2526 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2527 magic32 != XFS_DIR3_FREE_MAGIC) {
2528 warnmsg = "Bad dir3 free magic!";
2529 break;
2530 }
2531 bp->b_ops = &xfs_dir3_free_buf_ops;
2532 break;
2533 case XFS_BLFT_DIR_LEAF1_BUF:
2534 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2535 magicda != XFS_DIR3_LEAF1_MAGIC) {
2536 warnmsg = "Bad dir leaf1 magic!";
2537 break;
2538 }
2539 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2540 break;
2541 case XFS_BLFT_DIR_LEAFN_BUF:
2542 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2543 magicda != XFS_DIR3_LEAFN_MAGIC) {
2544 warnmsg = "Bad dir leafn magic!";
2545 break;
2546 }
2547 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2548 break;
2549 case XFS_BLFT_DA_NODE_BUF:
2550 if (magicda != XFS_DA_NODE_MAGIC &&
2551 magicda != XFS_DA3_NODE_MAGIC) {
2552 warnmsg = "Bad da node magic!";
2553 break;
2554 }
2555 bp->b_ops = &xfs_da3_node_buf_ops;
2556 break;
2557 case XFS_BLFT_ATTR_LEAF_BUF:
2558 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2559 magicda != XFS_ATTR3_LEAF_MAGIC) {
2560 warnmsg = "Bad attr leaf magic!";
2561 break;
2562 }
2563 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2564 break;
2565 case XFS_BLFT_ATTR_RMT_BUF:
2566 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2567 warnmsg = "Bad attr remote magic!";
2568 break;
2569 }
2570 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2571 break;
2572 case XFS_BLFT_SB_BUF:
2573 if (magic32 != XFS_SB_MAGIC) {
2574 warnmsg = "Bad SB block magic!";
2575 break;
2576 }
2577 bp->b_ops = &xfs_sb_buf_ops;
2578 break;
2579 #ifdef CONFIG_XFS_RT
2580 case XFS_BLFT_RTBITMAP_BUF:
2581 case XFS_BLFT_RTSUMMARY_BUF:
2582 /* no magic numbers for verification of RT buffers */
2583 bp->b_ops = &xfs_rtbuf_ops;
2584 break;
2585 #endif /* CONFIG_XFS_RT */
2586 default:
2587 xfs_warn(mp, "Unknown buffer type %d!",
2588 xfs_blft_from_flags(buf_f));
2589 break;
2590 }
2591
2592 /*
2593 * Nothing else to do in the case of a NULL current LSN as this means
2594 * the buffer is more recent than the change in the log and will be
2595 * skipped.
2596 */
2597 if (current_lsn == NULLCOMMITLSN)
2598 return;
2599
2600 if (warnmsg) {
2601 xfs_warn(mp, warnmsg);
2602 ASSERT(0);
2603 }
2604
2605 /*
2606 * We must update the metadata LSN of the buffer as it is written out to
2607 * ensure that older transactions never replay over this one and corrupt
2608 * the buffer. This can occur if log recovery is interrupted at some
2609 * point after the current transaction completes, at which point a
2610 * subsequent mount starts recovery from the beginning.
2611 *
2612 * Write verifiers update the metadata LSN from log items attached to
2613 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2614 * the verifier. We'll clean it up in our ->iodone() callback.
2615 */
2616 if (bp->b_ops) {
2617 struct xfs_buf_log_item *bip;
2618
2619 ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2620 bp->b_iodone = xlog_recover_iodone;
2621 xfs_buf_item_init(bp, mp);
2622 bip = bp->b_fspriv;
2623 bip->bli_item.li_lsn = current_lsn;
2624 }
2625 }
2626
2627 /*
2628 * Perform a 'normal' buffer recovery. Each logged region of the
2629 * buffer should be copied over the corresponding region in the
2630 * given buffer. The bitmap in the buf log format structure indicates
2631 * where to place the logged data.
2632 */
2633 STATIC void
2634 xlog_recover_do_reg_buffer(
2635 struct xfs_mount *mp,
2636 xlog_recover_item_t *item,
2637 struct xfs_buf *bp,
2638 xfs_buf_log_format_t *buf_f,
2639 xfs_lsn_t current_lsn)
2640 {
2641 int i;
2642 int bit;
2643 int nbits;
2644 int error;
2645
2646 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2647
2648 bit = 0;
2649 i = 1; /* 0 is the buf format structure */
2650 while (1) {
2651 bit = xfs_next_bit(buf_f->blf_data_map,
2652 buf_f->blf_map_size, bit);
2653 if (bit == -1)
2654 break;
2655 nbits = xfs_contig_bits(buf_f->blf_data_map,
2656 buf_f->blf_map_size, bit);
2657 ASSERT(nbits > 0);
2658 ASSERT(item->ri_buf[i].i_addr != NULL);
2659 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2660 ASSERT(BBTOB(bp->b_io_length) >=
2661 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2662
2663 /*
2664 * The dirty regions logged in the buffer, even though
2665 * contiguous, may span multiple chunks. This is because the
2666 * dirty region may span a physical page boundary in a buffer
2667 * and hence be split into two separate vectors for writing into
2668 * the log. Hence we need to trim nbits back to the length of
2669 * the current region being copied out of the log.
2670 */
2671 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2672 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2673
2674 /*
2675 * Do a sanity check if this is a dquot buffer. Just checking
2676 * the first dquot in the buffer should do. XXXThis is
2677 * probably a good thing to do for other buf types also.
2678 */
2679 error = 0;
2680 if (buf_f->blf_flags &
2681 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2682 if (item->ri_buf[i].i_addr == NULL) {
2683 xfs_alert(mp,
2684 "XFS: NULL dquot in %s.", __func__);
2685 goto next;
2686 }
2687 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2688 xfs_alert(mp,
2689 "XFS: dquot too small (%d) in %s.",
2690 item->ri_buf[i].i_len, __func__);
2691 goto next;
2692 }
2693 error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2694 -1, 0, XFS_QMOPT_DOWARN,
2695 "dquot_buf_recover");
2696 if (error)
2697 goto next;
2698 }
2699
2700 memcpy(xfs_buf_offset(bp,
2701 (uint)bit << XFS_BLF_SHIFT), /* dest */
2702 item->ri_buf[i].i_addr, /* source */
2703 nbits<<XFS_BLF_SHIFT); /* length */
2704 next:
2705 i++;
2706 bit += nbits;
2707 }
2708
2709 /* Shouldn't be any more regions */
2710 ASSERT(i == item->ri_total);
2711
2712 xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2713 }
2714
2715 /*
2716 * Perform a dquot buffer recovery.
2717 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2718 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2719 * Else, treat it as a regular buffer and do recovery.
2720 *
2721 * Return false if the buffer was tossed and true if we recovered the buffer to
2722 * indicate to the caller if the buffer needs writing.
2723 */
2724 STATIC bool
2725 xlog_recover_do_dquot_buffer(
2726 struct xfs_mount *mp,
2727 struct xlog *log,
2728 struct xlog_recover_item *item,
2729 struct xfs_buf *bp,
2730 struct xfs_buf_log_format *buf_f)
2731 {
2732 uint type;
2733
2734 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2735
2736 /*
2737 * Filesystems are required to send in quota flags at mount time.
2738 */
2739 if (!mp->m_qflags)
2740 return false;
2741
2742 type = 0;
2743 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2744 type |= XFS_DQ_USER;
2745 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2746 type |= XFS_DQ_PROJ;
2747 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2748 type |= XFS_DQ_GROUP;
2749 /*
2750 * This type of quotas was turned off, so ignore this buffer
2751 */
2752 if (log->l_quotaoffs_flag & type)
2753 return false;
2754
2755 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2756 return true;
2757 }
2758
2759 /*
2760 * This routine replays a modification made to a buffer at runtime.
2761 * There are actually two types of buffer, regular and inode, which
2762 * are handled differently. Inode buffers are handled differently
2763 * in that we only recover a specific set of data from them, namely
2764 * the inode di_next_unlinked fields. This is because all other inode
2765 * data is actually logged via inode records and any data we replay
2766 * here which overlaps that may be stale.
2767 *
2768 * When meta-data buffers are freed at run time we log a buffer item
2769 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2770 * of the buffer in the log should not be replayed at recovery time.
2771 * This is so that if the blocks covered by the buffer are reused for
2772 * file data before we crash we don't end up replaying old, freed
2773 * meta-data into a user's file.
2774 *
2775 * To handle the cancellation of buffer log items, we make two passes
2776 * over the log during recovery. During the first we build a table of
2777 * those buffers which have been cancelled, and during the second we
2778 * only replay those buffers which do not have corresponding cancel
2779 * records in the table. See xlog_recover_buffer_pass[1,2] above
2780 * for more details on the implementation of the table of cancel records.
2781 */
2782 STATIC int
2783 xlog_recover_buffer_pass2(
2784 struct xlog *log,
2785 struct list_head *buffer_list,
2786 struct xlog_recover_item *item,
2787 xfs_lsn_t current_lsn)
2788 {
2789 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2790 xfs_mount_t *mp = log->l_mp;
2791 xfs_buf_t *bp;
2792 int error;
2793 uint buf_flags;
2794 xfs_lsn_t lsn;
2795
2796 /*
2797 * In this pass we only want to recover all the buffers which have
2798 * not been cancelled and are not cancellation buffers themselves.
2799 */
2800 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2801 buf_f->blf_len, buf_f->blf_flags)) {
2802 trace_xfs_log_recover_buf_cancel(log, buf_f);
2803 return 0;
2804 }
2805
2806 trace_xfs_log_recover_buf_recover(log, buf_f);
2807
2808 buf_flags = 0;
2809 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2810 buf_flags |= XBF_UNMAPPED;
2811
2812 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2813 buf_flags, NULL);
2814 if (!bp)
2815 return -ENOMEM;
2816 error = bp->b_error;
2817 if (error) {
2818 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2819 goto out_release;
2820 }
2821
2822 /*
2823 * Recover the buffer only if we get an LSN from it and it's less than
2824 * the lsn of the transaction we are replaying.
2825 *
2826 * Note that we have to be extremely careful of readahead here.
2827 * Readahead does not attach verfiers to the buffers so if we don't
2828 * actually do any replay after readahead because of the LSN we found
2829 * in the buffer if more recent than that current transaction then we
2830 * need to attach the verifier directly. Failure to do so can lead to
2831 * future recovery actions (e.g. EFI and unlinked list recovery) can
2832 * operate on the buffers and they won't get the verifier attached. This
2833 * can lead to blocks on disk having the correct content but a stale
2834 * CRC.
2835 *
2836 * It is safe to assume these clean buffers are currently up to date.
2837 * If the buffer is dirtied by a later transaction being replayed, then
2838 * the verifier will be reset to match whatever recover turns that
2839 * buffer into.
2840 */
2841 lsn = xlog_recover_get_buf_lsn(mp, bp);
2842 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2843 trace_xfs_log_recover_buf_skip(log, buf_f);
2844 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2845 goto out_release;
2846 }
2847
2848 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2849 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2850 if (error)
2851 goto out_release;
2852 } else if (buf_f->blf_flags &
2853 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2854 bool dirty;
2855
2856 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2857 if (!dirty)
2858 goto out_release;
2859 } else {
2860 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2861 }
2862
2863 /*
2864 * Perform delayed write on the buffer. Asynchronous writes will be
2865 * slower when taking into account all the buffers to be flushed.
2866 *
2867 * Also make sure that only inode buffers with good sizes stay in
2868 * the buffer cache. The kernel moves inodes in buffers of 1 block
2869 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2870 * buffers in the log can be a different size if the log was generated
2871 * by an older kernel using unclustered inode buffers or a newer kernel
2872 * running with a different inode cluster size. Regardless, if the
2873 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2874 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2875 * the buffer out of the buffer cache so that the buffer won't
2876 * overlap with future reads of those inodes.
2877 */
2878 if (XFS_DINODE_MAGIC ==
2879 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2880 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2881 (uint32_t)log->l_mp->m_inode_cluster_size))) {
2882 xfs_buf_stale(bp);
2883 error = xfs_bwrite(bp);
2884 } else {
2885 ASSERT(bp->b_target->bt_mount == mp);
2886 bp->b_iodone = xlog_recover_iodone;
2887 xfs_buf_delwri_queue(bp, buffer_list);
2888 }
2889
2890 out_release:
2891 xfs_buf_relse(bp);
2892 return error;
2893 }
2894
2895 /*
2896 * Inode fork owner changes
2897 *
2898 * If we have been told that we have to reparent the inode fork, it's because an
2899 * extent swap operation on a CRC enabled filesystem has been done and we are
2900 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2901 * owners of it.
2902 *
2903 * The complexity here is that we don't have an inode context to work with, so
2904 * after we've replayed the inode we need to instantiate one. This is where the
2905 * fun begins.
2906 *
2907 * We are in the middle of log recovery, so we can't run transactions. That
2908 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2909 * that will result in the corresponding iput() running the inode through
2910 * xfs_inactive(). If we've just replayed an inode core that changes the link
2911 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2912 * transactions (bad!).
2913 *
2914 * So, to avoid this, we instantiate an inode directly from the inode core we've
2915 * just recovered. We have the buffer still locked, and all we really need to
2916 * instantiate is the inode core and the forks being modified. We can do this
2917 * manually, then run the inode btree owner change, and then tear down the
2918 * xfs_inode without having to run any transactions at all.
2919 *
2920 * Also, because we don't have a transaction context available here but need to
2921 * gather all the buffers we modify for writeback so we pass the buffer_list
2922 * instead for the operation to use.
2923 */
2924
2925 STATIC int
2926 xfs_recover_inode_owner_change(
2927 struct xfs_mount *mp,
2928 struct xfs_dinode *dip,
2929 struct xfs_inode_log_format *in_f,
2930 struct list_head *buffer_list)
2931 {
2932 struct xfs_inode *ip;
2933 int error;
2934
2935 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2936
2937 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2938 if (!ip)
2939 return -ENOMEM;
2940
2941 /* instantiate the inode */
2942 xfs_inode_from_disk(ip, dip);
2943 ASSERT(ip->i_d.di_version >= 3);
2944
2945 error = xfs_iformat_fork(ip, dip);
2946 if (error)
2947 goto out_free_ip;
2948
2949
2950 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2951 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2952 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2953 ip->i_ino, buffer_list);
2954 if (error)
2955 goto out_free_ip;
2956 }
2957
2958 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2959 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2960 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2961 ip->i_ino, buffer_list);
2962 if (error)
2963 goto out_free_ip;
2964 }
2965
2966 out_free_ip:
2967 xfs_inode_free(ip);
2968 return error;
2969 }
2970
2971 STATIC int
2972 xlog_recover_inode_pass2(
2973 struct xlog *log,
2974 struct list_head *buffer_list,
2975 struct xlog_recover_item *item,
2976 xfs_lsn_t current_lsn)
2977 {
2978 xfs_inode_log_format_t *in_f;
2979 xfs_mount_t *mp = log->l_mp;
2980 xfs_buf_t *bp;
2981 xfs_dinode_t *dip;
2982 int len;
2983 char *src;
2984 char *dest;
2985 int error;
2986 int attr_index;
2987 uint fields;
2988 struct xfs_log_dinode *ldip;
2989 uint isize;
2990 int need_free = 0;
2991
2992 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2993 in_f = item->ri_buf[0].i_addr;
2994 } else {
2995 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2996 need_free = 1;
2997 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2998 if (error)
2999 goto error;
3000 }
3001
3002 /*
3003 * Inode buffers can be freed, look out for it,
3004 * and do not replay the inode.
3005 */
3006 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
3007 in_f->ilf_len, 0)) {
3008 error = 0;
3009 trace_xfs_log_recover_inode_cancel(log, in_f);
3010 goto error;
3011 }
3012 trace_xfs_log_recover_inode_recover(log, in_f);
3013
3014 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
3015 &xfs_inode_buf_ops);
3016 if (!bp) {
3017 error = -ENOMEM;
3018 goto error;
3019 }
3020 error = bp->b_error;
3021 if (error) {
3022 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
3023 goto out_release;
3024 }
3025 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
3026 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
3027
3028 /*
3029 * Make sure the place we're flushing out to really looks
3030 * like an inode!
3031 */
3032 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
3033 xfs_alert(mp,
3034 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
3035 __func__, dip, bp, in_f->ilf_ino);
3036 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
3037 XFS_ERRLEVEL_LOW, mp);
3038 error = -EFSCORRUPTED;
3039 goto out_release;
3040 }
3041 ldip = item->ri_buf[1].i_addr;
3042 if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
3043 xfs_alert(mp,
3044 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
3045 __func__, item, in_f->ilf_ino);
3046 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
3047 XFS_ERRLEVEL_LOW, mp);
3048 error = -EFSCORRUPTED;
3049 goto out_release;
3050 }
3051
3052 /*
3053 * If the inode has an LSN in it, recover the inode only if it's less
3054 * than the lsn of the transaction we are replaying. Note: we still
3055 * need to replay an owner change even though the inode is more recent
3056 * than the transaction as there is no guarantee that all the btree
3057 * blocks are more recent than this transaction, too.
3058 */
3059 if (dip->di_version >= 3) {
3060 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
3061
3062 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3063 trace_xfs_log_recover_inode_skip(log, in_f);
3064 error = 0;
3065 goto out_owner_change;
3066 }
3067 }
3068
3069 /*
3070 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3071 * are transactional and if ordering is necessary we can determine that
3072 * more accurately by the LSN field in the V3 inode core. Don't trust
3073 * the inode versions we might be changing them here - use the
3074 * superblock flag to determine whether we need to look at di_flushiter
3075 * to skip replay when the on disk inode is newer than the log one
3076 */
3077 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3078 ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3079 /*
3080 * Deal with the wrap case, DI_MAX_FLUSH is less
3081 * than smaller numbers
3082 */
3083 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3084 ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3085 /* do nothing */
3086 } else {
3087 trace_xfs_log_recover_inode_skip(log, in_f);
3088 error = 0;
3089 goto out_release;
3090 }
3091 }
3092
3093 /* Take the opportunity to reset the flush iteration count */
3094 ldip->di_flushiter = 0;
3095
3096 if (unlikely(S_ISREG(ldip->di_mode))) {
3097 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3098 (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3099 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3100 XFS_ERRLEVEL_LOW, mp, ldip);
3101 xfs_alert(mp,
3102 "%s: Bad regular inode log record, rec ptr 0x%p, "
3103 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3104 __func__, item, dip, bp, in_f->ilf_ino);
3105 error = -EFSCORRUPTED;
3106 goto out_release;
3107 }
3108 } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3109 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3110 (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3111 (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3112 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3113 XFS_ERRLEVEL_LOW, mp, ldip);
3114 xfs_alert(mp,
3115 "%s: Bad dir inode log record, rec ptr 0x%p, "
3116 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3117 __func__, item, dip, bp, in_f->ilf_ino);
3118 error = -EFSCORRUPTED;
3119 goto out_release;
3120 }
3121 }
3122 if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3123 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3124 XFS_ERRLEVEL_LOW, mp, ldip);
3125 xfs_alert(mp,
3126 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3127 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
3128 __func__, item, dip, bp, in_f->ilf_ino,
3129 ldip->di_nextents + ldip->di_anextents,
3130 ldip->di_nblocks);
3131 error = -EFSCORRUPTED;
3132 goto out_release;
3133 }
3134 if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3135 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3136 XFS_ERRLEVEL_LOW, mp, ldip);
3137 xfs_alert(mp,
3138 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3139 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
3140 item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3141 error = -EFSCORRUPTED;
3142 goto out_release;
3143 }
3144 isize = xfs_log_dinode_size(ldip->di_version);
3145 if (unlikely(item->ri_buf[1].i_len > isize)) {
3146 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3147 XFS_ERRLEVEL_LOW, mp, ldip);
3148 xfs_alert(mp,
3149 "%s: Bad inode log record length %d, rec ptr 0x%p",
3150 __func__, item->ri_buf[1].i_len, item);
3151 error = -EFSCORRUPTED;
3152 goto out_release;
3153 }
3154
3155 /* recover the log dinode inode into the on disk inode */
3156 xfs_log_dinode_to_disk(ldip, dip);
3157
3158 /* the rest is in on-disk format */
3159 if (item->ri_buf[1].i_len > isize) {
3160 memcpy((char *)dip + isize,
3161 item->ri_buf[1].i_addr + isize,
3162 item->ri_buf[1].i_len - isize);
3163 }
3164
3165 fields = in_f->ilf_fields;
3166 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
3167 case XFS_ILOG_DEV:
3168 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3169 break;
3170 case XFS_ILOG_UUID:
3171 memcpy(XFS_DFORK_DPTR(dip),
3172 &in_f->ilf_u.ilfu_uuid,
3173 sizeof(uuid_t));
3174 break;
3175 }
3176
3177 if (in_f->ilf_size == 2)
3178 goto out_owner_change;
3179 len = item->ri_buf[2].i_len;
3180 src = item->ri_buf[2].i_addr;
3181 ASSERT(in_f->ilf_size <= 4);
3182 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3183 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3184 (len == in_f->ilf_dsize));
3185
3186 switch (fields & XFS_ILOG_DFORK) {
3187 case XFS_ILOG_DDATA:
3188 case XFS_ILOG_DEXT:
3189 memcpy(XFS_DFORK_DPTR(dip), src, len);
3190 break;
3191
3192 case XFS_ILOG_DBROOT:
3193 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3194 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3195 XFS_DFORK_DSIZE(dip, mp));
3196 break;
3197
3198 default:
3199 /*
3200 * There are no data fork flags set.
3201 */
3202 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3203 break;
3204 }
3205
3206 /*
3207 * If we logged any attribute data, recover it. There may or
3208 * may not have been any other non-core data logged in this
3209 * transaction.
3210 */
3211 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3212 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3213 attr_index = 3;
3214 } else {
3215 attr_index = 2;
3216 }
3217 len = item->ri_buf[attr_index].i_len;
3218 src = item->ri_buf[attr_index].i_addr;
3219 ASSERT(len == in_f->ilf_asize);
3220
3221 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3222 case XFS_ILOG_ADATA:
3223 case XFS_ILOG_AEXT:
3224 dest = XFS_DFORK_APTR(dip);
3225 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3226 memcpy(dest, src, len);
3227 break;
3228
3229 case XFS_ILOG_ABROOT:
3230 dest = XFS_DFORK_APTR(dip);
3231 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3232 len, (xfs_bmdr_block_t*)dest,
3233 XFS_DFORK_ASIZE(dip, mp));
3234 break;
3235
3236 default:
3237 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3238 ASSERT(0);
3239 error = -EIO;
3240 goto out_release;
3241 }
3242 }
3243
3244 out_owner_change:
3245 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3246 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3247 buffer_list);
3248 /* re-generate the checksum. */
3249 xfs_dinode_calc_crc(log->l_mp, dip);
3250
3251 ASSERT(bp->b_target->bt_mount == mp);
3252 bp->b_iodone = xlog_recover_iodone;
3253 xfs_buf_delwri_queue(bp, buffer_list);
3254
3255 out_release:
3256 xfs_buf_relse(bp);
3257 error:
3258 if (need_free)
3259 kmem_free(in_f);
3260 return error;
3261 }
3262
3263 /*
3264 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3265 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3266 * of that type.
3267 */
3268 STATIC int
3269 xlog_recover_quotaoff_pass1(
3270 struct xlog *log,
3271 struct xlog_recover_item *item)
3272 {
3273 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3274 ASSERT(qoff_f);
3275
3276 /*
3277 * The logitem format's flag tells us if this was user quotaoff,
3278 * group/project quotaoff or both.
3279 */
3280 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3281 log->l_quotaoffs_flag |= XFS_DQ_USER;
3282 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3283 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3284 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3285 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3286
3287 return 0;
3288 }
3289
3290 /*
3291 * Recover a dquot record
3292 */
3293 STATIC int
3294 xlog_recover_dquot_pass2(
3295 struct xlog *log,
3296 struct list_head *buffer_list,
3297 struct xlog_recover_item *item,
3298 xfs_lsn_t current_lsn)
3299 {
3300 xfs_mount_t *mp = log->l_mp;
3301 xfs_buf_t *bp;
3302 struct xfs_disk_dquot *ddq, *recddq;
3303 int error;
3304 xfs_dq_logformat_t *dq_f;
3305 uint type;
3306
3307
3308 /*
3309 * Filesystems are required to send in quota flags at mount time.
3310 */
3311 if (mp->m_qflags == 0)
3312 return 0;
3313
3314 recddq = item->ri_buf[1].i_addr;
3315 if (recddq == NULL) {
3316 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3317 return -EIO;
3318 }
3319 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3320 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3321 item->ri_buf[1].i_len, __func__);
3322 return -EIO;
3323 }
3324
3325 /*
3326 * This type of quotas was turned off, so ignore this record.
3327 */
3328 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3329 ASSERT(type);
3330 if (log->l_quotaoffs_flag & type)
3331 return 0;
3332
3333 /*
3334 * At this point we know that quota was _not_ turned off.
3335 * Since the mount flags are not indicating to us otherwise, this
3336 * must mean that quota is on, and the dquot needs to be replayed.
3337 * Remember that we may not have fully recovered the superblock yet,
3338 * so we can't do the usual trick of looking at the SB quota bits.
3339 *
3340 * The other possibility, of course, is that the quota subsystem was
3341 * removed since the last mount - ENOSYS.
3342 */
3343 dq_f = item->ri_buf[0].i_addr;
3344 ASSERT(dq_f);
3345 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3346 "xlog_recover_dquot_pass2 (log copy)");
3347 if (error)
3348 return -EIO;
3349 ASSERT(dq_f->qlf_len == 1);
3350
3351 /*
3352 * At this point we are assuming that the dquots have been allocated
3353 * and hence the buffer has valid dquots stamped in it. It should,
3354 * therefore, pass verifier validation. If the dquot is bad, then the
3355 * we'll return an error here, so we don't need to specifically check
3356 * the dquot in the buffer after the verifier has run.
3357 */
3358 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3359 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3360 &xfs_dquot_buf_ops);
3361 if (error)
3362 return error;
3363
3364 ASSERT(bp);
3365 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3366
3367 /*
3368 * If the dquot has an LSN in it, recover the dquot only if it's less
3369 * than the lsn of the transaction we are replaying.
3370 */
3371 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3372 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3373 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3374
3375 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3376 goto out_release;
3377 }
3378 }
3379
3380 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3381 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3382 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3383 XFS_DQUOT_CRC_OFF);
3384 }
3385
3386 ASSERT(dq_f->qlf_size == 2);
3387 ASSERT(bp->b_target->bt_mount == mp);
3388 bp->b_iodone = xlog_recover_iodone;
3389 xfs_buf_delwri_queue(bp, buffer_list);
3390
3391 out_release:
3392 xfs_buf_relse(bp);
3393 return 0;
3394 }
3395
3396 /*
3397 * This routine is called to create an in-core extent free intent
3398 * item from the efi format structure which was logged on disk.
3399 * It allocates an in-core efi, copies the extents from the format
3400 * structure into it, and adds the efi to the AIL with the given
3401 * LSN.
3402 */
3403 STATIC int
3404 xlog_recover_efi_pass2(
3405 struct xlog *log,
3406 struct xlog_recover_item *item,
3407 xfs_lsn_t lsn)
3408 {
3409 int error;
3410 struct xfs_mount *mp = log->l_mp;
3411 struct xfs_efi_log_item *efip;
3412 struct xfs_efi_log_format *efi_formatp;
3413
3414 efi_formatp = item->ri_buf[0].i_addr;
3415
3416 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3417 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3418 if (error) {
3419 xfs_efi_item_free(efip);
3420 return error;
3421 }
3422 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3423
3424 spin_lock(&log->l_ailp->xa_lock);
3425 /*
3426 * The EFI has two references. One for the EFD and one for EFI to ensure
3427 * it makes it into the AIL. Insert the EFI into the AIL directly and
3428 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3429 * AIL lock.
3430 */
3431 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3432 xfs_efi_release(efip);
3433 return 0;
3434 }
3435
3436
3437 /*
3438 * This routine is called when an EFD format structure is found in a committed
3439 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3440 * was still in the log. To do this it searches the AIL for the EFI with an id
3441 * equal to that in the EFD format structure. If we find it we drop the EFD
3442 * reference, which removes the EFI from the AIL and frees it.
3443 */
3444 STATIC int
3445 xlog_recover_efd_pass2(
3446 struct xlog *log,
3447 struct xlog_recover_item *item)
3448 {
3449 xfs_efd_log_format_t *efd_formatp;
3450 xfs_efi_log_item_t *efip = NULL;
3451 xfs_log_item_t *lip;
3452 uint64_t efi_id;
3453 struct xfs_ail_cursor cur;
3454 struct xfs_ail *ailp = log->l_ailp;
3455
3456 efd_formatp = item->ri_buf[0].i_addr;
3457 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3458 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3459 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3460 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3461 efi_id = efd_formatp->efd_efi_id;
3462
3463 /*
3464 * Search for the EFI with the id in the EFD format structure in the
3465 * AIL.
3466 */
3467 spin_lock(&ailp->xa_lock);
3468 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3469 while (lip != NULL) {
3470 if (lip->li_type == XFS_LI_EFI) {
3471 efip = (xfs_efi_log_item_t *)lip;
3472 if (efip->efi_format.efi_id == efi_id) {
3473 /*
3474 * Drop the EFD reference to the EFI. This
3475 * removes the EFI from the AIL and frees it.
3476 */
3477 spin_unlock(&ailp->xa_lock);
3478 xfs_efi_release(efip);
3479 spin_lock(&ailp->xa_lock);
3480 break;
3481 }
3482 }
3483 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3484 }
3485
3486 xfs_trans_ail_cursor_done(&cur);
3487 spin_unlock(&ailp->xa_lock);
3488
3489 return 0;
3490 }
3491
3492 /*
3493 * This routine is called to create an in-core extent rmap update
3494 * item from the rui format structure which was logged on disk.
3495 * It allocates an in-core rui, copies the extents from the format
3496 * structure into it, and adds the rui to the AIL with the given
3497 * LSN.
3498 */
3499 STATIC int
3500 xlog_recover_rui_pass2(
3501 struct xlog *log,
3502 struct xlog_recover_item *item,
3503 xfs_lsn_t lsn)
3504 {
3505 int error;
3506 struct xfs_mount *mp = log->l_mp;
3507 struct xfs_rui_log_item *ruip;
3508 struct xfs_rui_log_format *rui_formatp;
3509
3510 rui_formatp = item->ri_buf[0].i_addr;
3511
3512 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3513 error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3514 if (error) {
3515 xfs_rui_item_free(ruip);
3516 return error;
3517 }
3518 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3519
3520 spin_lock(&log->l_ailp->xa_lock);
3521 /*
3522 * The RUI has two references. One for the RUD and one for RUI to ensure
3523 * it makes it into the AIL. Insert the RUI into the AIL directly and
3524 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3525 * AIL lock.
3526 */
3527 xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3528 xfs_rui_release(ruip);
3529 return 0;
3530 }
3531
3532
3533 /*
3534 * This routine is called when an RUD format structure is found in a committed
3535 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3536 * was still in the log. To do this it searches the AIL for the RUI with an id
3537 * equal to that in the RUD format structure. If we find it we drop the RUD
3538 * reference, which removes the RUI from the AIL and frees it.
3539 */
3540 STATIC int
3541 xlog_recover_rud_pass2(
3542 struct xlog *log,
3543 struct xlog_recover_item *item)
3544 {
3545 struct xfs_rud_log_format *rud_formatp;
3546 struct xfs_rui_log_item *ruip = NULL;
3547 struct xfs_log_item *lip;
3548 uint64_t rui_id;
3549 struct xfs_ail_cursor cur;
3550 struct xfs_ail *ailp = log->l_ailp;
3551
3552 rud_formatp = item->ri_buf[0].i_addr;
3553 ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3554 rui_id = rud_formatp->rud_rui_id;
3555
3556 /*
3557 * Search for the RUI with the id in the RUD format structure in the
3558 * AIL.
3559 */
3560 spin_lock(&ailp->xa_lock);
3561 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3562 while (lip != NULL) {
3563 if (lip->li_type == XFS_LI_RUI) {
3564 ruip = (struct xfs_rui_log_item *)lip;
3565 if (ruip->rui_format.rui_id == rui_id) {
3566 /*
3567 * Drop the RUD reference to the RUI. This
3568 * removes the RUI from the AIL and frees it.
3569 */
3570 spin_unlock(&ailp->xa_lock);
3571 xfs_rui_release(ruip);
3572 spin_lock(&ailp->xa_lock);
3573 break;
3574 }
3575 }
3576 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3577 }
3578
3579 xfs_trans_ail_cursor_done(&cur);
3580 spin_unlock(&ailp->xa_lock);
3581
3582 return 0;
3583 }
3584
3585 /*
3586 * Copy an CUI format buffer from the given buf, and into the destination
3587 * CUI format structure. The CUI/CUD items were designed not to need any
3588 * special alignment handling.
3589 */
3590 static int
3591 xfs_cui_copy_format(
3592 struct xfs_log_iovec *buf,
3593 struct xfs_cui_log_format *dst_cui_fmt)
3594 {
3595 struct xfs_cui_log_format *src_cui_fmt;
3596 uint len;
3597
3598 src_cui_fmt = buf->i_addr;
3599 len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3600
3601 if (buf->i_len == len) {
3602 memcpy(dst_cui_fmt, src_cui_fmt, len);
3603 return 0;
3604 }
3605 return -EFSCORRUPTED;
3606 }
3607
3608 /*
3609 * This routine is called to create an in-core extent refcount update
3610 * item from the cui format structure which was logged on disk.
3611 * It allocates an in-core cui, copies the extents from the format
3612 * structure into it, and adds the cui to the AIL with the given
3613 * LSN.
3614 */
3615 STATIC int
3616 xlog_recover_cui_pass2(
3617 struct xlog *log,
3618 struct xlog_recover_item *item,
3619 xfs_lsn_t lsn)
3620 {
3621 int error;
3622 struct xfs_mount *mp = log->l_mp;
3623 struct xfs_cui_log_item *cuip;
3624 struct xfs_cui_log_format *cui_formatp;
3625
3626 cui_formatp = item->ri_buf[0].i_addr;
3627
3628 cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3629 error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3630 if (error) {
3631 xfs_cui_item_free(cuip);
3632 return error;
3633 }
3634 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3635
3636 spin_lock(&log->l_ailp->xa_lock);
3637 /*
3638 * The CUI has two references. One for the CUD and one for CUI to ensure
3639 * it makes it into the AIL. Insert the CUI into the AIL directly and
3640 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3641 * AIL lock.
3642 */
3643 xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3644 xfs_cui_release(cuip);
3645 return 0;
3646 }
3647
3648
3649 /*
3650 * This routine is called when an CUD format structure is found in a committed
3651 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3652 * was still in the log. To do this it searches the AIL for the CUI with an id
3653 * equal to that in the CUD format structure. If we find it we drop the CUD
3654 * reference, which removes the CUI from the AIL and frees it.
3655 */
3656 STATIC int
3657 xlog_recover_cud_pass2(
3658 struct xlog *log,
3659 struct xlog_recover_item *item)
3660 {
3661 struct xfs_cud_log_format *cud_formatp;
3662 struct xfs_cui_log_item *cuip = NULL;
3663 struct xfs_log_item *lip;
3664 uint64_t cui_id;
3665 struct xfs_ail_cursor cur;
3666 struct xfs_ail *ailp = log->l_ailp;
3667
3668 cud_formatp = item->ri_buf[0].i_addr;
3669 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3670 return -EFSCORRUPTED;
3671 cui_id = cud_formatp->cud_cui_id;
3672
3673 /*
3674 * Search for the CUI with the id in the CUD format structure in the
3675 * AIL.
3676 */
3677 spin_lock(&ailp->xa_lock);
3678 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3679 while (lip != NULL) {
3680 if (lip->li_type == XFS_LI_CUI) {
3681 cuip = (struct xfs_cui_log_item *)lip;
3682 if (cuip->cui_format.cui_id == cui_id) {
3683 /*
3684 * Drop the CUD reference to the CUI. This
3685 * removes the CUI from the AIL and frees it.
3686 */
3687 spin_unlock(&ailp->xa_lock);
3688 xfs_cui_release(cuip);
3689 spin_lock(&ailp->xa_lock);
3690 break;
3691 }
3692 }
3693 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3694 }
3695
3696 xfs_trans_ail_cursor_done(&cur);
3697 spin_unlock(&ailp->xa_lock);
3698
3699 return 0;
3700 }
3701
3702 /*
3703 * Copy an BUI format buffer from the given buf, and into the destination
3704 * BUI format structure. The BUI/BUD items were designed not to need any
3705 * special alignment handling.
3706 */
3707 static int
3708 xfs_bui_copy_format(
3709 struct xfs_log_iovec *buf,
3710 struct xfs_bui_log_format *dst_bui_fmt)
3711 {
3712 struct xfs_bui_log_format *src_bui_fmt;
3713 uint len;
3714
3715 src_bui_fmt = buf->i_addr;
3716 len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3717
3718 if (buf->i_len == len) {
3719 memcpy(dst_bui_fmt, src_bui_fmt, len);
3720 return 0;
3721 }
3722 return -EFSCORRUPTED;
3723 }
3724
3725 /*
3726 * This routine is called to create an in-core extent bmap update
3727 * item from the bui format structure which was logged on disk.
3728 * It allocates an in-core bui, copies the extents from the format
3729 * structure into it, and adds the bui to the AIL with the given
3730 * LSN.
3731 */
3732 STATIC int
3733 xlog_recover_bui_pass2(
3734 struct xlog *log,
3735 struct xlog_recover_item *item,
3736 xfs_lsn_t lsn)
3737 {
3738 int error;
3739 struct xfs_mount *mp = log->l_mp;
3740 struct xfs_bui_log_item *buip;
3741 struct xfs_bui_log_format *bui_formatp;
3742
3743 bui_formatp = item->ri_buf[0].i_addr;
3744
3745 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3746 return -EFSCORRUPTED;
3747 buip = xfs_bui_init(mp);
3748 error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3749 if (error) {
3750 xfs_bui_item_free(buip);
3751 return error;
3752 }
3753 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3754
3755 spin_lock(&log->l_ailp->xa_lock);
3756 /*
3757 * The RUI has two references. One for the RUD and one for RUI to ensure
3758 * it makes it into the AIL. Insert the RUI into the AIL directly and
3759 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3760 * AIL lock.
3761 */
3762 xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3763 xfs_bui_release(buip);
3764 return 0;
3765 }
3766
3767
3768 /*
3769 * This routine is called when an BUD format structure is found in a committed
3770 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3771 * was still in the log. To do this it searches the AIL for the BUI with an id
3772 * equal to that in the BUD format structure. If we find it we drop the BUD
3773 * reference, which removes the BUI from the AIL and frees it.
3774 */
3775 STATIC int
3776 xlog_recover_bud_pass2(
3777 struct xlog *log,
3778 struct xlog_recover_item *item)
3779 {
3780 struct xfs_bud_log_format *bud_formatp;
3781 struct xfs_bui_log_item *buip = NULL;
3782 struct xfs_log_item *lip;
3783 uint64_t bui_id;
3784 struct xfs_ail_cursor cur;
3785 struct xfs_ail *ailp = log->l_ailp;
3786
3787 bud_formatp = item->ri_buf[0].i_addr;
3788 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3789 return -EFSCORRUPTED;
3790 bui_id = bud_formatp->bud_bui_id;
3791
3792 /*
3793 * Search for the BUI with the id in the BUD format structure in the
3794 * AIL.
3795 */
3796 spin_lock(&ailp->xa_lock);
3797 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3798 while (lip != NULL) {
3799 if (lip->li_type == XFS_LI_BUI) {
3800 buip = (struct xfs_bui_log_item *)lip;
3801 if (buip->bui_format.bui_id == bui_id) {
3802 /*
3803 * Drop the BUD reference to the BUI. This
3804 * removes the BUI from the AIL and frees it.
3805 */
3806 spin_unlock(&ailp->xa_lock);
3807 xfs_bui_release(buip);
3808 spin_lock(&ailp->xa_lock);
3809 break;
3810 }
3811 }
3812 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3813 }
3814
3815 xfs_trans_ail_cursor_done(&cur);
3816 spin_unlock(&ailp->xa_lock);
3817
3818 return 0;
3819 }
3820
3821 /*
3822 * This routine is called when an inode create format structure is found in a
3823 * committed transaction in the log. It's purpose is to initialise the inodes
3824 * being allocated on disk. This requires us to get inode cluster buffers that
3825 * match the range to be initialised, stamped with inode templates and written
3826 * by delayed write so that subsequent modifications will hit the cached buffer
3827 * and only need writing out at the end of recovery.
3828 */
3829 STATIC int
3830 xlog_recover_do_icreate_pass2(
3831 struct xlog *log,
3832 struct list_head *buffer_list,
3833 xlog_recover_item_t *item)
3834 {
3835 struct xfs_mount *mp = log->l_mp;
3836 struct xfs_icreate_log *icl;
3837 xfs_agnumber_t agno;
3838 xfs_agblock_t agbno;
3839 unsigned int count;
3840 unsigned int isize;
3841 xfs_agblock_t length;
3842 int blks_per_cluster;
3843 int bb_per_cluster;
3844 int cancel_count;
3845 int nbufs;
3846 int i;
3847
3848 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3849 if (icl->icl_type != XFS_LI_ICREATE) {
3850 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3851 return -EINVAL;
3852 }
3853
3854 if (icl->icl_size != 1) {
3855 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3856 return -EINVAL;
3857 }
3858
3859 agno = be32_to_cpu(icl->icl_ag);
3860 if (agno >= mp->m_sb.sb_agcount) {
3861 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3862 return -EINVAL;
3863 }
3864 agbno = be32_to_cpu(icl->icl_agbno);
3865 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3866 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3867 return -EINVAL;
3868 }
3869 isize = be32_to_cpu(icl->icl_isize);
3870 if (isize != mp->m_sb.sb_inodesize) {
3871 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3872 return -EINVAL;
3873 }
3874 count = be32_to_cpu(icl->icl_count);
3875 if (!count) {
3876 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3877 return -EINVAL;
3878 }
3879 length = be32_to_cpu(icl->icl_length);
3880 if (!length || length >= mp->m_sb.sb_agblocks) {
3881 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3882 return -EINVAL;
3883 }
3884
3885 /*
3886 * The inode chunk is either full or sparse and we only support
3887 * m_ialloc_min_blks sized sparse allocations at this time.
3888 */
3889 if (length != mp->m_ialloc_blks &&
3890 length != mp->m_ialloc_min_blks) {
3891 xfs_warn(log->l_mp,
3892 "%s: unsupported chunk length", __FUNCTION__);
3893 return -EINVAL;
3894 }
3895
3896 /* verify inode count is consistent with extent length */
3897 if ((count >> mp->m_sb.sb_inopblog) != length) {
3898 xfs_warn(log->l_mp,
3899 "%s: inconsistent inode count and chunk length",
3900 __FUNCTION__);
3901 return -EINVAL;
3902 }
3903
3904 /*
3905 * The icreate transaction can cover multiple cluster buffers and these
3906 * buffers could have been freed and reused. Check the individual
3907 * buffers for cancellation so we don't overwrite anything written after
3908 * a cancellation.
3909 */
3910 blks_per_cluster = xfs_icluster_size_fsb(mp);
3911 bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3912 nbufs = length / blks_per_cluster;
3913 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3914 xfs_daddr_t daddr;
3915
3916 daddr = XFS_AGB_TO_DADDR(mp, agno,
3917 agbno + i * blks_per_cluster);
3918 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3919 cancel_count++;
3920 }
3921
3922 /*
3923 * We currently only use icreate for a single allocation at a time. This
3924 * means we should expect either all or none of the buffers to be
3925 * cancelled. Be conservative and skip replay if at least one buffer is
3926 * cancelled, but warn the user that something is awry if the buffers
3927 * are not consistent.
3928 *
3929 * XXX: This must be refined to only skip cancelled clusters once we use
3930 * icreate for multiple chunk allocations.
3931 */
3932 ASSERT(!cancel_count || cancel_count == nbufs);
3933 if (cancel_count) {
3934 if (cancel_count != nbufs)
3935 xfs_warn(mp,
3936 "WARNING: partial inode chunk cancellation, skipped icreate.");
3937 trace_xfs_log_recover_icreate_cancel(log, icl);
3938 return 0;
3939 }
3940
3941 trace_xfs_log_recover_icreate_recover(log, icl);
3942 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3943 length, be32_to_cpu(icl->icl_gen));
3944 }
3945
3946 STATIC void
3947 xlog_recover_buffer_ra_pass2(
3948 struct xlog *log,
3949 struct xlog_recover_item *item)
3950 {
3951 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3952 struct xfs_mount *mp = log->l_mp;
3953
3954 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3955 buf_f->blf_len, buf_f->blf_flags)) {
3956 return;
3957 }
3958
3959 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3960 buf_f->blf_len, NULL);
3961 }
3962
3963 STATIC void
3964 xlog_recover_inode_ra_pass2(
3965 struct xlog *log,
3966 struct xlog_recover_item *item)
3967 {
3968 struct xfs_inode_log_format ilf_buf;
3969 struct xfs_inode_log_format *ilfp;
3970 struct xfs_mount *mp = log->l_mp;
3971 int error;
3972
3973 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3974 ilfp = item->ri_buf[0].i_addr;
3975 } else {
3976 ilfp = &ilf_buf;
3977 memset(ilfp, 0, sizeof(*ilfp));
3978 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3979 if (error)
3980 return;
3981 }
3982
3983 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3984 return;
3985
3986 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3987 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3988 }
3989
3990 STATIC void
3991 xlog_recover_dquot_ra_pass2(
3992 struct xlog *log,
3993 struct xlog_recover_item *item)
3994 {
3995 struct xfs_mount *mp = log->l_mp;
3996 struct xfs_disk_dquot *recddq;
3997 struct xfs_dq_logformat *dq_f;
3998 uint type;
3999 int len;
4000
4001
4002 if (mp->m_qflags == 0)
4003 return;
4004
4005 recddq = item->ri_buf[1].i_addr;
4006 if (recddq == NULL)
4007 return;
4008 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
4009 return;
4010
4011 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
4012 ASSERT(type);
4013 if (log->l_quotaoffs_flag & type)
4014 return;
4015
4016 dq_f = item->ri_buf[0].i_addr;
4017 ASSERT(dq_f);
4018 ASSERT(dq_f->qlf_len == 1);
4019
4020 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
4021 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
4022 return;
4023
4024 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
4025 &xfs_dquot_buf_ra_ops);
4026 }
4027
4028 STATIC void
4029 xlog_recover_ra_pass2(
4030 struct xlog *log,
4031 struct xlog_recover_item *item)
4032 {
4033 switch (ITEM_TYPE(item)) {
4034 case XFS_LI_BUF:
4035 xlog_recover_buffer_ra_pass2(log, item);
4036 break;
4037 case XFS_LI_INODE:
4038 xlog_recover_inode_ra_pass2(log, item);
4039 break;
4040 case XFS_LI_DQUOT:
4041 xlog_recover_dquot_ra_pass2(log, item);
4042 break;
4043 case XFS_LI_EFI:
4044 case XFS_LI_EFD:
4045 case XFS_LI_QUOTAOFF:
4046 case XFS_LI_RUI:
4047 case XFS_LI_RUD:
4048 case XFS_LI_CUI:
4049 case XFS_LI_CUD:
4050 case XFS_LI_BUI:
4051 case XFS_LI_BUD:
4052 default:
4053 break;
4054 }
4055 }
4056
4057 STATIC int
4058 xlog_recover_commit_pass1(
4059 struct xlog *log,
4060 struct xlog_recover *trans,
4061 struct xlog_recover_item *item)
4062 {
4063 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
4064
4065 switch (ITEM_TYPE(item)) {
4066 case XFS_LI_BUF:
4067 return xlog_recover_buffer_pass1(log, item);
4068 case XFS_LI_QUOTAOFF:
4069 return xlog_recover_quotaoff_pass1(log, item);
4070 case XFS_LI_INODE:
4071 case XFS_LI_EFI:
4072 case XFS_LI_EFD:
4073 case XFS_LI_DQUOT:
4074 case XFS_LI_ICREATE:
4075 case XFS_LI_RUI:
4076 case XFS_LI_RUD:
4077 case XFS_LI_CUI:
4078 case XFS_LI_CUD:
4079 case XFS_LI_BUI:
4080 case XFS_LI_BUD:
4081 /* nothing to do in pass 1 */
4082 return 0;
4083 default:
4084 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4085 __func__, ITEM_TYPE(item));
4086 ASSERT(0);
4087 return -EIO;
4088 }
4089 }
4090
4091 STATIC int
4092 xlog_recover_commit_pass2(
4093 struct xlog *log,
4094 struct xlog_recover *trans,
4095 struct list_head *buffer_list,
4096 struct xlog_recover_item *item)
4097 {
4098 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4099
4100 switch (ITEM_TYPE(item)) {
4101 case XFS_LI_BUF:
4102 return xlog_recover_buffer_pass2(log, buffer_list, item,
4103 trans->r_lsn);
4104 case XFS_LI_INODE:
4105 return xlog_recover_inode_pass2(log, buffer_list, item,
4106 trans->r_lsn);
4107 case XFS_LI_EFI:
4108 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4109 case XFS_LI_EFD:
4110 return xlog_recover_efd_pass2(log, item);
4111 case XFS_LI_RUI:
4112 return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4113 case XFS_LI_RUD:
4114 return xlog_recover_rud_pass2(log, item);
4115 case XFS_LI_CUI:
4116 return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4117 case XFS_LI_CUD:
4118 return xlog_recover_cud_pass2(log, item);
4119 case XFS_LI_BUI:
4120 return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4121 case XFS_LI_BUD:
4122 return xlog_recover_bud_pass2(log, item);
4123 case XFS_LI_DQUOT:
4124 return xlog_recover_dquot_pass2(log, buffer_list, item,
4125 trans->r_lsn);
4126 case XFS_LI_ICREATE:
4127 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4128 case XFS_LI_QUOTAOFF:
4129 /* nothing to do in pass2 */
4130 return 0;
4131 default:
4132 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4133 __func__, ITEM_TYPE(item));
4134 ASSERT(0);
4135 return -EIO;
4136 }
4137 }
4138
4139 STATIC int
4140 xlog_recover_items_pass2(
4141 struct xlog *log,
4142 struct xlog_recover *trans,
4143 struct list_head *buffer_list,
4144 struct list_head *item_list)
4145 {
4146 struct xlog_recover_item *item;
4147 int error = 0;
4148
4149 list_for_each_entry(item, item_list, ri_list) {
4150 error = xlog_recover_commit_pass2(log, trans,
4151 buffer_list, item);
4152 if (error)
4153 return error;
4154 }
4155
4156 return error;
4157 }
4158
4159 /*
4160 * Perform the transaction.
4161 *
4162 * If the transaction modifies a buffer or inode, do it now. Otherwise,
4163 * EFIs and EFDs get queued up by adding entries into the AIL for them.
4164 */
4165 STATIC int
4166 xlog_recover_commit_trans(
4167 struct xlog *log,
4168 struct xlog_recover *trans,
4169 int pass,
4170 struct list_head *buffer_list)
4171 {
4172 int error = 0;
4173 int items_queued = 0;
4174 struct xlog_recover_item *item;
4175 struct xlog_recover_item *next;
4176 LIST_HEAD (ra_list);
4177 LIST_HEAD (done_list);
4178
4179 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4180
4181 hlist_del_init(&trans->r_list);
4182
4183 error = xlog_recover_reorder_trans(log, trans, pass);
4184 if (error)
4185 return error;
4186
4187 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4188 switch (pass) {
4189 case XLOG_RECOVER_PASS1:
4190 error = xlog_recover_commit_pass1(log, trans, item);
4191 break;
4192 case XLOG_RECOVER_PASS2:
4193 xlog_recover_ra_pass2(log, item);
4194 list_move_tail(&item->ri_list, &ra_list);
4195 items_queued++;
4196 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4197 error = xlog_recover_items_pass2(log, trans,
4198 buffer_list, &ra_list);
4199 list_splice_tail_init(&ra_list, &done_list);
4200 items_queued = 0;
4201 }
4202
4203 break;
4204 default:
4205 ASSERT(0);
4206 }
4207
4208 if (error)
4209 goto out;
4210 }
4211
4212 out:
4213 if (!list_empty(&ra_list)) {
4214 if (!error)
4215 error = xlog_recover_items_pass2(log, trans,
4216 buffer_list, &ra_list);
4217 list_splice_tail_init(&ra_list, &done_list);
4218 }
4219
4220 if (!list_empty(&done_list))
4221 list_splice_init(&done_list, &trans->r_itemq);
4222
4223 return error;
4224 }
4225
4226 STATIC void
4227 xlog_recover_add_item(
4228 struct list_head *head)
4229 {
4230 xlog_recover_item_t *item;
4231
4232 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
4233 INIT_LIST_HEAD(&item->ri_list);
4234 list_add_tail(&item->ri_list, head);
4235 }
4236
4237 STATIC int
4238 xlog_recover_add_to_cont_trans(
4239 struct xlog *log,
4240 struct xlog_recover *trans,
4241 char *dp,
4242 int len)
4243 {
4244 xlog_recover_item_t *item;
4245 char *ptr, *old_ptr;
4246 int old_len;
4247
4248 /*
4249 * If the transaction is empty, the header was split across this and the
4250 * previous record. Copy the rest of the header.
4251 */
4252 if (list_empty(&trans->r_itemq)) {
4253 ASSERT(len <= sizeof(struct xfs_trans_header));
4254 if (len > sizeof(struct xfs_trans_header)) {
4255 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4256 return -EIO;
4257 }
4258
4259 xlog_recover_add_item(&trans->r_itemq);
4260 ptr = (char *)&trans->r_theader +
4261 sizeof(struct xfs_trans_header) - len;
4262 memcpy(ptr, dp, len);
4263 return 0;
4264 }
4265
4266 /* take the tail entry */
4267 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4268
4269 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4270 old_len = item->ri_buf[item->ri_cnt-1].i_len;
4271
4272 ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
4273 memcpy(&ptr[old_len], dp, len);
4274 item->ri_buf[item->ri_cnt-1].i_len += len;
4275 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4276 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4277 return 0;
4278 }
4279
4280 /*
4281 * The next region to add is the start of a new region. It could be
4282 * a whole region or it could be the first part of a new region. Because
4283 * of this, the assumption here is that the type and size fields of all
4284 * format structures fit into the first 32 bits of the structure.
4285 *
4286 * This works because all regions must be 32 bit aligned. Therefore, we
4287 * either have both fields or we have neither field. In the case we have
4288 * neither field, the data part of the region is zero length. We only have
4289 * a log_op_header and can throw away the header since a new one will appear
4290 * later. If we have at least 4 bytes, then we can determine how many regions
4291 * will appear in the current log item.
4292 */
4293 STATIC int
4294 xlog_recover_add_to_trans(
4295 struct xlog *log,
4296 struct xlog_recover *trans,
4297 char *dp,
4298 int len)
4299 {
4300 xfs_inode_log_format_t *in_f; /* any will do */
4301 xlog_recover_item_t *item;
4302 char *ptr;
4303
4304 if (!len)
4305 return 0;
4306 if (list_empty(&trans->r_itemq)) {
4307 /* we need to catch log corruptions here */
4308 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4309 xfs_warn(log->l_mp, "%s: bad header magic number",
4310 __func__);
4311 ASSERT(0);
4312 return -EIO;
4313 }
4314
4315 if (len > sizeof(struct xfs_trans_header)) {
4316 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4317 ASSERT(0);
4318 return -EIO;
4319 }
4320
4321 /*
4322 * The transaction header can be arbitrarily split across op
4323 * records. If we don't have the whole thing here, copy what we
4324 * do have and handle the rest in the next record.
4325 */
4326 if (len == sizeof(struct xfs_trans_header))
4327 xlog_recover_add_item(&trans->r_itemq);
4328 memcpy(&trans->r_theader, dp, len);
4329 return 0;
4330 }
4331
4332 ptr = kmem_alloc(len, KM_SLEEP);
4333 memcpy(ptr, dp, len);
4334 in_f = (xfs_inode_log_format_t *)ptr;
4335
4336 /* take the tail entry */
4337 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4338 if (item->ri_total != 0 &&
4339 item->ri_total == item->ri_cnt) {
4340 /* tail item is in use, get a new one */
4341 xlog_recover_add_item(&trans->r_itemq);
4342 item = list_entry(trans->r_itemq.prev,
4343 xlog_recover_item_t, ri_list);
4344 }
4345
4346 if (item->ri_total == 0) { /* first region to be added */
4347 if (in_f->ilf_size == 0 ||
4348 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4349 xfs_warn(log->l_mp,
4350 "bad number of regions (%d) in inode log format",
4351 in_f->ilf_size);
4352 ASSERT(0);
4353 kmem_free(ptr);
4354 return -EIO;
4355 }
4356
4357 item->ri_total = in_f->ilf_size;
4358 item->ri_buf =
4359 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4360 KM_SLEEP);
4361 }
4362 ASSERT(item->ri_total > item->ri_cnt);
4363 /* Description region is ri_buf[0] */
4364 item->ri_buf[item->ri_cnt].i_addr = ptr;
4365 item->ri_buf[item->ri_cnt].i_len = len;
4366 item->ri_cnt++;
4367 trace_xfs_log_recover_item_add(log, trans, item, 0);
4368 return 0;
4369 }
4370
4371 /*
4372 * Free up any resources allocated by the transaction
4373 *
4374 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4375 */
4376 STATIC void
4377 xlog_recover_free_trans(
4378 struct xlog_recover *trans)
4379 {
4380 xlog_recover_item_t *item, *n;
4381 int i;
4382
4383 hlist_del_init(&trans->r_list);
4384
4385 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4386 /* Free the regions in the item. */
4387 list_del(&item->ri_list);
4388 for (i = 0; i < item->ri_cnt; i++)
4389 kmem_free(item->ri_buf[i].i_addr);
4390 /* Free the item itself */
4391 kmem_free(item->ri_buf);
4392 kmem_free(item);
4393 }
4394 /* Free the transaction recover structure */
4395 kmem_free(trans);
4396 }
4397
4398 /*
4399 * On error or completion, trans is freed.
4400 */
4401 STATIC int
4402 xlog_recovery_process_trans(
4403 struct xlog *log,
4404 struct xlog_recover *trans,
4405 char *dp,
4406 unsigned int len,
4407 unsigned int flags,
4408 int pass,
4409 struct list_head *buffer_list)
4410 {
4411 int error = 0;
4412 bool freeit = false;
4413
4414 /* mask off ophdr transaction container flags */
4415 flags &= ~XLOG_END_TRANS;
4416 if (flags & XLOG_WAS_CONT_TRANS)
4417 flags &= ~XLOG_CONTINUE_TRANS;
4418
4419 /*
4420 * Callees must not free the trans structure. We'll decide if we need to
4421 * free it or not based on the operation being done and it's result.
4422 */
4423 switch (flags) {
4424 /* expected flag values */
4425 case 0:
4426 case XLOG_CONTINUE_TRANS:
4427 error = xlog_recover_add_to_trans(log, trans, dp, len);
4428 break;
4429 case XLOG_WAS_CONT_TRANS:
4430 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4431 break;
4432 case XLOG_COMMIT_TRANS:
4433 error = xlog_recover_commit_trans(log, trans, pass,
4434 buffer_list);
4435 /* success or fail, we are now done with this transaction. */
4436 freeit = true;
4437 break;
4438
4439 /* unexpected flag values */
4440 case XLOG_UNMOUNT_TRANS:
4441 /* just skip trans */
4442 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4443 freeit = true;
4444 break;
4445 case XLOG_START_TRANS:
4446 default:
4447 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4448 ASSERT(0);
4449 error = -EIO;
4450 break;
4451 }
4452 if (error || freeit)
4453 xlog_recover_free_trans(trans);
4454 return error;
4455 }
4456
4457 /*
4458 * Lookup the transaction recovery structure associated with the ID in the
4459 * current ophdr. If the transaction doesn't exist and the start flag is set in
4460 * the ophdr, then allocate a new transaction for future ID matches to find.
4461 * Either way, return what we found during the lookup - an existing transaction
4462 * or nothing.
4463 */
4464 STATIC struct xlog_recover *
4465 xlog_recover_ophdr_to_trans(
4466 struct hlist_head rhash[],
4467 struct xlog_rec_header *rhead,
4468 struct xlog_op_header *ohead)
4469 {
4470 struct xlog_recover *trans;
4471 xlog_tid_t tid;
4472 struct hlist_head *rhp;
4473
4474 tid = be32_to_cpu(ohead->oh_tid);
4475 rhp = &rhash[XLOG_RHASH(tid)];
4476 hlist_for_each_entry(trans, rhp, r_list) {
4477 if (trans->r_log_tid == tid)
4478 return trans;
4479 }
4480
4481 /*
4482 * skip over non-start transaction headers - we could be
4483 * processing slack space before the next transaction starts
4484 */
4485 if (!(ohead->oh_flags & XLOG_START_TRANS))
4486 return NULL;
4487
4488 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4489
4490 /*
4491 * This is a new transaction so allocate a new recovery container to
4492 * hold the recovery ops that will follow.
4493 */
4494 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4495 trans->r_log_tid = tid;
4496 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4497 INIT_LIST_HEAD(&trans->r_itemq);
4498 INIT_HLIST_NODE(&trans->r_list);
4499 hlist_add_head(&trans->r_list, rhp);
4500
4501 /*
4502 * Nothing more to do for this ophdr. Items to be added to this new
4503 * transaction will be in subsequent ophdr containers.
4504 */
4505 return NULL;
4506 }
4507
4508 STATIC int
4509 xlog_recover_process_ophdr(
4510 struct xlog *log,
4511 struct hlist_head rhash[],
4512 struct xlog_rec_header *rhead,
4513 struct xlog_op_header *ohead,
4514 char *dp,
4515 char *end,
4516 int pass,
4517 struct list_head *buffer_list)
4518 {
4519 struct xlog_recover *trans;
4520 unsigned int len;
4521 int error;
4522
4523 /* Do we understand who wrote this op? */
4524 if (ohead->oh_clientid != XFS_TRANSACTION &&
4525 ohead->oh_clientid != XFS_LOG) {
4526 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4527 __func__, ohead->oh_clientid);
4528 ASSERT(0);
4529 return -EIO;
4530 }
4531
4532 /*
4533 * Check the ophdr contains all the data it is supposed to contain.
4534 */
4535 len = be32_to_cpu(ohead->oh_len);
4536 if (dp + len > end) {
4537 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4538 WARN_ON(1);
4539 return -EIO;
4540 }
4541
4542 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4543 if (!trans) {
4544 /* nothing to do, so skip over this ophdr */
4545 return 0;
4546 }
4547
4548 /*
4549 * The recovered buffer queue is drained only once we know that all
4550 * recovery items for the current LSN have been processed. This is
4551 * required because:
4552 *
4553 * - Buffer write submission updates the metadata LSN of the buffer.
4554 * - Log recovery skips items with a metadata LSN >= the current LSN of
4555 * the recovery item.
4556 * - Separate recovery items against the same metadata buffer can share
4557 * a current LSN. I.e., consider that the LSN of a recovery item is
4558 * defined as the starting LSN of the first record in which its
4559 * transaction appears, that a record can hold multiple transactions,
4560 * and/or that a transaction can span multiple records.
4561 *
4562 * In other words, we are allowed to submit a buffer from log recovery
4563 * once per current LSN. Otherwise, we may incorrectly skip recovery
4564 * items and cause corruption.
4565 *
4566 * We don't know up front whether buffers are updated multiple times per
4567 * LSN. Therefore, track the current LSN of each commit log record as it
4568 * is processed and drain the queue when it changes. Use commit records
4569 * because they are ordered correctly by the logging code.
4570 */
4571 if (log->l_recovery_lsn != trans->r_lsn &&
4572 ohead->oh_flags & XLOG_COMMIT_TRANS) {
4573 error = xfs_buf_delwri_submit(buffer_list);
4574 if (error)
4575 return error;
4576 log->l_recovery_lsn = trans->r_lsn;
4577 }
4578
4579 return xlog_recovery_process_trans(log, trans, dp, len,
4580 ohead->oh_flags, pass, buffer_list);
4581 }
4582
4583 /*
4584 * There are two valid states of the r_state field. 0 indicates that the
4585 * transaction structure is in a normal state. We have either seen the
4586 * start of the transaction or the last operation we added was not a partial
4587 * operation. If the last operation we added to the transaction was a
4588 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4589 *
4590 * NOTE: skip LRs with 0 data length.
4591 */
4592 STATIC int
4593 xlog_recover_process_data(
4594 struct xlog *log,
4595 struct hlist_head rhash[],
4596 struct xlog_rec_header *rhead,
4597 char *dp,
4598 int pass,
4599 struct list_head *buffer_list)
4600 {
4601 struct xlog_op_header *ohead;
4602 char *end;
4603 int num_logops;
4604 int error;
4605
4606 end = dp + be32_to_cpu(rhead->h_len);
4607 num_logops = be32_to_cpu(rhead->h_num_logops);
4608
4609 /* check the log format matches our own - else we can't recover */
4610 if (xlog_header_check_recover(log->l_mp, rhead))
4611 return -EIO;
4612
4613 trace_xfs_log_recover_record(log, rhead, pass);
4614 while ((dp < end) && num_logops) {
4615
4616 ohead = (struct xlog_op_header *)dp;
4617 dp += sizeof(*ohead);
4618 ASSERT(dp <= end);
4619
4620 /* errors will abort recovery */
4621 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4622 dp, end, pass, buffer_list);
4623 if (error)
4624 return error;
4625
4626 dp += be32_to_cpu(ohead->oh_len);
4627 num_logops--;
4628 }
4629 return 0;
4630 }
4631
4632 /* Recover the EFI if necessary. */
4633 STATIC int
4634 xlog_recover_process_efi(
4635 struct xfs_mount *mp,
4636 struct xfs_ail *ailp,
4637 struct xfs_log_item *lip)
4638 {
4639 struct xfs_efi_log_item *efip;
4640 int error;
4641
4642 /*
4643 * Skip EFIs that we've already processed.
4644 */
4645 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4646 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4647 return 0;
4648
4649 spin_unlock(&ailp->xa_lock);
4650 error = xfs_efi_recover(mp, efip);
4651 spin_lock(&ailp->xa_lock);
4652
4653 return error;
4654 }
4655
4656 /* Release the EFI since we're cancelling everything. */
4657 STATIC void
4658 xlog_recover_cancel_efi(
4659 struct xfs_mount *mp,
4660 struct xfs_ail *ailp,
4661 struct xfs_log_item *lip)
4662 {
4663 struct xfs_efi_log_item *efip;
4664
4665 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4666
4667 spin_unlock(&ailp->xa_lock);
4668 xfs_efi_release(efip);
4669 spin_lock(&ailp->xa_lock);
4670 }
4671
4672 /* Recover the RUI if necessary. */
4673 STATIC int
4674 xlog_recover_process_rui(
4675 struct xfs_mount *mp,
4676 struct xfs_ail *ailp,
4677 struct xfs_log_item *lip)
4678 {
4679 struct xfs_rui_log_item *ruip;
4680 int error;
4681
4682 /*
4683 * Skip RUIs that we've already processed.
4684 */
4685 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4686 if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4687 return 0;
4688
4689 spin_unlock(&ailp->xa_lock);
4690 error = xfs_rui_recover(mp, ruip);
4691 spin_lock(&ailp->xa_lock);
4692
4693 return error;
4694 }
4695
4696 /* Release the RUI since we're cancelling everything. */
4697 STATIC void
4698 xlog_recover_cancel_rui(
4699 struct xfs_mount *mp,
4700 struct xfs_ail *ailp,
4701 struct xfs_log_item *lip)
4702 {
4703 struct xfs_rui_log_item *ruip;
4704
4705 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4706
4707 spin_unlock(&ailp->xa_lock);
4708 xfs_rui_release(ruip);
4709 spin_lock(&ailp->xa_lock);
4710 }
4711
4712 /* Recover the CUI if necessary. */
4713 STATIC int
4714 xlog_recover_process_cui(
4715 struct xfs_mount *mp,
4716 struct xfs_ail *ailp,
4717 struct xfs_log_item *lip)
4718 {
4719 struct xfs_cui_log_item *cuip;
4720 int error;
4721
4722 /*
4723 * Skip CUIs that we've already processed.
4724 */
4725 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4726 if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4727 return 0;
4728
4729 spin_unlock(&ailp->xa_lock);
4730 error = xfs_cui_recover(mp, cuip);
4731 spin_lock(&ailp->xa_lock);
4732
4733 return error;
4734 }
4735
4736 /* Release the CUI since we're cancelling everything. */
4737 STATIC void
4738 xlog_recover_cancel_cui(
4739 struct xfs_mount *mp,
4740 struct xfs_ail *ailp,
4741 struct xfs_log_item *lip)
4742 {
4743 struct xfs_cui_log_item *cuip;
4744
4745 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4746
4747 spin_unlock(&ailp->xa_lock);
4748 xfs_cui_release(cuip);
4749 spin_lock(&ailp->xa_lock);
4750 }
4751
4752 /* Recover the BUI if necessary. */
4753 STATIC int
4754 xlog_recover_process_bui(
4755 struct xfs_mount *mp,
4756 struct xfs_ail *ailp,
4757 struct xfs_log_item *lip)
4758 {
4759 struct xfs_bui_log_item *buip;
4760 int error;
4761
4762 /*
4763 * Skip BUIs that we've already processed.
4764 */
4765 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4766 if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4767 return 0;
4768
4769 spin_unlock(&ailp->xa_lock);
4770 error = xfs_bui_recover(mp, buip);
4771 spin_lock(&ailp->xa_lock);
4772
4773 return error;
4774 }
4775
4776 /* Release the BUI since we're cancelling everything. */
4777 STATIC void
4778 xlog_recover_cancel_bui(
4779 struct xfs_mount *mp,
4780 struct xfs_ail *ailp,
4781 struct xfs_log_item *lip)
4782 {
4783 struct xfs_bui_log_item *buip;
4784
4785 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4786
4787 spin_unlock(&ailp->xa_lock);
4788 xfs_bui_release(buip);
4789 spin_lock(&ailp->xa_lock);
4790 }
4791
4792 /* Is this log item a deferred action intent? */
4793 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4794 {
4795 switch (lip->li_type) {
4796 case XFS_LI_EFI:
4797 case XFS_LI_RUI:
4798 case XFS_LI_CUI:
4799 case XFS_LI_BUI:
4800 return true;
4801 default:
4802 return false;
4803 }
4804 }
4805
4806 /*
4807 * When this is called, all of the log intent items which did not have
4808 * corresponding log done items should be in the AIL. What we do now
4809 * is update the data structures associated with each one.
4810 *
4811 * Since we process the log intent items in normal transactions, they
4812 * will be removed at some point after the commit. This prevents us
4813 * from just walking down the list processing each one. We'll use a
4814 * flag in the intent item to skip those that we've already processed
4815 * and use the AIL iteration mechanism's generation count to try to
4816 * speed this up at least a bit.
4817 *
4818 * When we start, we know that the intents are the only things in the
4819 * AIL. As we process them, however, other items are added to the
4820 * AIL.
4821 */
4822 STATIC int
4823 xlog_recover_process_intents(
4824 struct xlog *log)
4825 {
4826 struct xfs_log_item *lip;
4827 int error = 0;
4828 struct xfs_ail_cursor cur;
4829 struct xfs_ail *ailp;
4830 xfs_lsn_t last_lsn;
4831
4832 ailp = log->l_ailp;
4833 spin_lock(&ailp->xa_lock);
4834 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4835 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4836 while (lip != NULL) {
4837 /*
4838 * We're done when we see something other than an intent.
4839 * There should be no intents left in the AIL now.
4840 */
4841 if (!xlog_item_is_intent(lip)) {
4842 #ifdef DEBUG
4843 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4844 ASSERT(!xlog_item_is_intent(lip));
4845 #endif
4846 break;
4847 }
4848
4849 /*
4850 * We should never see a redo item with a LSN higher than
4851 * the last transaction we found in the log at the start
4852 * of recovery.
4853 */
4854 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4855
4856 switch (lip->li_type) {
4857 case XFS_LI_EFI:
4858 error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4859 break;
4860 case XFS_LI_RUI:
4861 error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4862 break;
4863 case XFS_LI_CUI:
4864 error = xlog_recover_process_cui(log->l_mp, ailp, lip);
4865 break;
4866 case XFS_LI_BUI:
4867 error = xlog_recover_process_bui(log->l_mp, ailp, lip);
4868 break;
4869 }
4870 if (error)
4871 goto out;
4872 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4873 }
4874 out:
4875 xfs_trans_ail_cursor_done(&cur);
4876 spin_unlock(&ailp->xa_lock);
4877 return error;
4878 }
4879
4880 /*
4881 * A cancel occurs when the mount has failed and we're bailing out.
4882 * Release all pending log intent items so they don't pin the AIL.
4883 */
4884 STATIC int
4885 xlog_recover_cancel_intents(
4886 struct xlog *log)
4887 {
4888 struct xfs_log_item *lip;
4889 int error = 0;
4890 struct xfs_ail_cursor cur;
4891 struct xfs_ail *ailp;
4892
4893 ailp = log->l_ailp;
4894 spin_lock(&ailp->xa_lock);
4895 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4896 while (lip != NULL) {
4897 /*
4898 * We're done when we see something other than an intent.
4899 * There should be no intents left in the AIL now.
4900 */
4901 if (!xlog_item_is_intent(lip)) {
4902 #ifdef DEBUG
4903 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4904 ASSERT(!xlog_item_is_intent(lip));
4905 #endif
4906 break;
4907 }
4908
4909 switch (lip->li_type) {
4910 case XFS_LI_EFI:
4911 xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4912 break;
4913 case XFS_LI_RUI:
4914 xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4915 break;
4916 case XFS_LI_CUI:
4917 xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4918 break;
4919 case XFS_LI_BUI:
4920 xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4921 break;
4922 }
4923
4924 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4925 }
4926
4927 xfs_trans_ail_cursor_done(&cur);
4928 spin_unlock(&ailp->xa_lock);
4929 return error;
4930 }
4931
4932 /*
4933 * This routine performs a transaction to null out a bad inode pointer
4934 * in an agi unlinked inode hash bucket.
4935 */
4936 STATIC void
4937 xlog_recover_clear_agi_bucket(
4938 xfs_mount_t *mp,
4939 xfs_agnumber_t agno,
4940 int bucket)
4941 {
4942 xfs_trans_t *tp;
4943 xfs_agi_t *agi;
4944 xfs_buf_t *agibp;
4945 int offset;
4946 int error;
4947
4948 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
4949 if (error)
4950 goto out_error;
4951
4952 error = xfs_read_agi(mp, tp, agno, &agibp);
4953 if (error)
4954 goto out_abort;
4955
4956 agi = XFS_BUF_TO_AGI(agibp);
4957 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4958 offset = offsetof(xfs_agi_t, agi_unlinked) +
4959 (sizeof(xfs_agino_t) * bucket);
4960 xfs_trans_log_buf(tp, agibp, offset,
4961 (offset + sizeof(xfs_agino_t) - 1));
4962
4963 error = xfs_trans_commit(tp);
4964 if (error)
4965 goto out_error;
4966 return;
4967
4968 out_abort:
4969 xfs_trans_cancel(tp);
4970 out_error:
4971 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4972 return;
4973 }
4974
4975 STATIC xfs_agino_t
4976 xlog_recover_process_one_iunlink(
4977 struct xfs_mount *mp,
4978 xfs_agnumber_t agno,
4979 xfs_agino_t agino,
4980 int bucket)
4981 {
4982 struct xfs_buf *ibp;
4983 struct xfs_dinode *dip;
4984 struct xfs_inode *ip;
4985 xfs_ino_t ino;
4986 int error;
4987
4988 ino = XFS_AGINO_TO_INO(mp, agno, agino);
4989 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4990 if (error)
4991 goto fail;
4992
4993 /*
4994 * Get the on disk inode to find the next inode in the bucket.
4995 */
4996 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4997 if (error)
4998 goto fail_iput;
4999
5000 xfs_iflags_clear(ip, XFS_IRECOVERY);
5001 ASSERT(VFS_I(ip)->i_nlink == 0);
5002 ASSERT(VFS_I(ip)->i_mode != 0);
5003
5004 /* setup for the next pass */
5005 agino = be32_to_cpu(dip->di_next_unlinked);
5006 xfs_buf_relse(ibp);
5007
5008 /*
5009 * Prevent any DMAPI event from being sent when the reference on
5010 * the inode is dropped.
5011 */
5012 ip->i_d.di_dmevmask = 0;
5013
5014 IRELE(ip);
5015 return agino;
5016
5017 fail_iput:
5018 IRELE(ip);
5019 fail:
5020 /*
5021 * We can't read in the inode this bucket points to, or this inode
5022 * is messed up. Just ditch this bucket of inodes. We will lose
5023 * some inodes and space, but at least we won't hang.
5024 *
5025 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5026 * clear the inode pointer in the bucket.
5027 */
5028 xlog_recover_clear_agi_bucket(mp, agno, bucket);
5029 return NULLAGINO;
5030 }
5031
5032 /*
5033 * xlog_iunlink_recover
5034 *
5035 * This is called during recovery to process any inodes which
5036 * we unlinked but not freed when the system crashed. These
5037 * inodes will be on the lists in the AGI blocks. What we do
5038 * here is scan all the AGIs and fully truncate and free any
5039 * inodes found on the lists. Each inode is removed from the
5040 * lists when it has been fully truncated and is freed. The
5041 * freeing of the inode and its removal from the list must be
5042 * atomic.
5043 */
5044 STATIC void
5045 xlog_recover_process_iunlinks(
5046 struct xlog *log)
5047 {
5048 xfs_mount_t *mp;
5049 xfs_agnumber_t agno;
5050 xfs_agi_t *agi;
5051 xfs_buf_t *agibp;
5052 xfs_agino_t agino;
5053 int bucket;
5054 int error;
5055 uint mp_dmevmask;
5056
5057 mp = log->l_mp;
5058
5059 /*
5060 * Prevent any DMAPI event from being sent while in this function.
5061 */
5062 mp_dmevmask = mp->m_dmevmask;
5063 mp->m_dmevmask = 0;
5064
5065 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5066 /*
5067 * Find the agi for this ag.
5068 */
5069 error = xfs_read_agi(mp, NULL, agno, &agibp);
5070 if (error) {
5071 /*
5072 * AGI is b0rked. Don't process it.
5073 *
5074 * We should probably mark the filesystem as corrupt
5075 * after we've recovered all the ag's we can....
5076 */
5077 continue;
5078 }
5079 /*
5080 * Unlock the buffer so that it can be acquired in the normal
5081 * course of the transaction to truncate and free each inode.
5082 * Because we are not racing with anyone else here for the AGI
5083 * buffer, we don't even need to hold it locked to read the
5084 * initial unlinked bucket entries out of the buffer. We keep
5085 * buffer reference though, so that it stays pinned in memory
5086 * while we need the buffer.
5087 */
5088 agi = XFS_BUF_TO_AGI(agibp);
5089 xfs_buf_unlock(agibp);
5090
5091 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5092 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5093 while (agino != NULLAGINO) {
5094 agino = xlog_recover_process_one_iunlink(mp,
5095 agno, agino, bucket);
5096 }
5097 }
5098 xfs_buf_rele(agibp);
5099 }
5100
5101 mp->m_dmevmask = mp_dmevmask;
5102 }
5103
5104 STATIC int
5105 xlog_unpack_data(
5106 struct xlog_rec_header *rhead,
5107 char *dp,
5108 struct xlog *log)
5109 {
5110 int i, j, k;
5111
5112 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5113 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5114 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5115 dp += BBSIZE;
5116 }
5117
5118 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5119 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5120 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5121 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5122 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5123 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5124 dp += BBSIZE;
5125 }
5126 }
5127
5128 return 0;
5129 }
5130
5131 /*
5132 * CRC check, unpack and process a log record.
5133 */
5134 STATIC int
5135 xlog_recover_process(
5136 struct xlog *log,
5137 struct hlist_head rhash[],
5138 struct xlog_rec_header *rhead,
5139 char *dp,
5140 int pass,
5141 struct list_head *buffer_list)
5142 {
5143 int error;
5144 __le32 old_crc = rhead->h_crc;
5145 __le32 crc;
5146
5147
5148 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5149
5150 /*
5151 * Nothing else to do if this is a CRC verification pass. Just return
5152 * if this a record with a non-zero crc. Unfortunately, mkfs always
5153 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5154 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5155 * know precisely what failed.
5156 */
5157 if (pass == XLOG_RECOVER_CRCPASS) {
5158 if (old_crc && crc != old_crc)
5159 return -EFSBADCRC;
5160 return 0;
5161 }
5162
5163 /*
5164 * We're in the normal recovery path. Issue a warning if and only if the
5165 * CRC in the header is non-zero. This is an advisory warning and the
5166 * zero CRC check prevents warnings from being emitted when upgrading
5167 * the kernel from one that does not add CRCs by default.
5168 */
5169 if (crc != old_crc) {
5170 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5171 xfs_alert(log->l_mp,
5172 "log record CRC mismatch: found 0x%x, expected 0x%x.",
5173 le32_to_cpu(old_crc),
5174 le32_to_cpu(crc));
5175 xfs_hex_dump(dp, 32);
5176 }
5177
5178 /*
5179 * If the filesystem is CRC enabled, this mismatch becomes a
5180 * fatal log corruption failure.
5181 */
5182 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5183 return -EFSCORRUPTED;
5184 }
5185
5186 error = xlog_unpack_data(rhead, dp, log);
5187 if (error)
5188 return error;
5189
5190 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5191 buffer_list);
5192 }
5193
5194 STATIC int
5195 xlog_valid_rec_header(
5196 struct xlog *log,
5197 struct xlog_rec_header *rhead,
5198 xfs_daddr_t blkno)
5199 {
5200 int hlen;
5201
5202 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5203 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5204 XFS_ERRLEVEL_LOW, log->l_mp);
5205 return -EFSCORRUPTED;
5206 }
5207 if (unlikely(
5208 (!rhead->h_version ||
5209 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5210 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5211 __func__, be32_to_cpu(rhead->h_version));
5212 return -EIO;
5213 }
5214
5215 /* LR body must have data or it wouldn't have been written */
5216 hlen = be32_to_cpu(rhead->h_len);
5217 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5218 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5219 XFS_ERRLEVEL_LOW, log->l_mp);
5220 return -EFSCORRUPTED;
5221 }
5222 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5223 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5224 XFS_ERRLEVEL_LOW, log->l_mp);
5225 return -EFSCORRUPTED;
5226 }
5227 return 0;
5228 }
5229
5230 /*
5231 * Read the log from tail to head and process the log records found.
5232 * Handle the two cases where the tail and head are in the same cycle
5233 * and where the active portion of the log wraps around the end of
5234 * the physical log separately. The pass parameter is passed through
5235 * to the routines called to process the data and is not looked at
5236 * here.
5237 */
5238 STATIC int
5239 xlog_do_recovery_pass(
5240 struct xlog *log,
5241 xfs_daddr_t head_blk,
5242 xfs_daddr_t tail_blk,
5243 int pass,
5244 xfs_daddr_t *first_bad) /* out: first bad log rec */
5245 {
5246 xlog_rec_header_t *rhead;
5247 xfs_daddr_t blk_no, rblk_no;
5248 xfs_daddr_t rhead_blk;
5249 char *offset;
5250 xfs_buf_t *hbp, *dbp;
5251 int error = 0, h_size, h_len;
5252 int error2 = 0;
5253 int bblks, split_bblks;
5254 int hblks, split_hblks, wrapped_hblks;
5255 int i;
5256 struct hlist_head rhash[XLOG_RHASH_SIZE];
5257 LIST_HEAD (buffer_list);
5258
5259 ASSERT(head_blk != tail_blk);
5260 blk_no = rhead_blk = tail_blk;
5261
5262 for (i = 0; i < XLOG_RHASH_SIZE; i++)
5263 INIT_HLIST_HEAD(&rhash[i]);
5264
5265 /*
5266 * Read the header of the tail block and get the iclog buffer size from
5267 * h_size. Use this to tell how many sectors make up the log header.
5268 */
5269 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5270 /*
5271 * When using variable length iclogs, read first sector of
5272 * iclog header and extract the header size from it. Get a
5273 * new hbp that is the correct size.
5274 */
5275 hbp = xlog_get_bp(log, 1);
5276 if (!hbp)
5277 return -ENOMEM;
5278
5279 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5280 if (error)
5281 goto bread_err1;
5282
5283 rhead = (xlog_rec_header_t *)offset;
5284 error = xlog_valid_rec_header(log, rhead, tail_blk);
5285 if (error)
5286 goto bread_err1;
5287
5288 /*
5289 * xfsprogs has a bug where record length is based on lsunit but
5290 * h_size (iclog size) is hardcoded to 32k. Now that we
5291 * unconditionally CRC verify the unmount record, this means the
5292 * log buffer can be too small for the record and cause an
5293 * overrun.
5294 *
5295 * Detect this condition here. Use lsunit for the buffer size as
5296 * long as this looks like the mkfs case. Otherwise, return an
5297 * error to avoid a buffer overrun.
5298 */
5299 h_size = be32_to_cpu(rhead->h_size);
5300 h_len = be32_to_cpu(rhead->h_len);
5301 if (h_len > h_size) {
5302 if (h_len <= log->l_mp->m_logbsize &&
5303 be32_to_cpu(rhead->h_num_logops) == 1) {
5304 xfs_warn(log->l_mp,
5305 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5306 h_size, log->l_mp->m_logbsize);
5307 h_size = log->l_mp->m_logbsize;
5308 } else
5309 return -EFSCORRUPTED;
5310 }
5311
5312 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5313 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5314 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5315 if (h_size % XLOG_HEADER_CYCLE_SIZE)
5316 hblks++;
5317 xlog_put_bp(hbp);
5318 hbp = xlog_get_bp(log, hblks);
5319 } else {
5320 hblks = 1;
5321 }
5322 } else {
5323 ASSERT(log->l_sectBBsize == 1);
5324 hblks = 1;
5325 hbp = xlog_get_bp(log, 1);
5326 h_size = XLOG_BIG_RECORD_BSIZE;
5327 }
5328
5329 if (!hbp)
5330 return -ENOMEM;
5331 dbp = xlog_get_bp(log, BTOBB(h_size));
5332 if (!dbp) {
5333 xlog_put_bp(hbp);
5334 return -ENOMEM;
5335 }
5336
5337 memset(rhash, 0, sizeof(rhash));
5338 if (tail_blk > head_blk) {
5339 /*
5340 * Perform recovery around the end of the physical log.
5341 * When the head is not on the same cycle number as the tail,
5342 * we can't do a sequential recovery.
5343 */
5344 while (blk_no < log->l_logBBsize) {
5345 /*
5346 * Check for header wrapping around physical end-of-log
5347 */
5348 offset = hbp->b_addr;
5349 split_hblks = 0;
5350 wrapped_hblks = 0;
5351 if (blk_no + hblks <= log->l_logBBsize) {
5352 /* Read header in one read */
5353 error = xlog_bread(log, blk_no, hblks, hbp,
5354 &offset);
5355 if (error)
5356 goto bread_err2;
5357 } else {
5358 /* This LR is split across physical log end */
5359 if (blk_no != log->l_logBBsize) {
5360 /* some data before physical log end */
5361 ASSERT(blk_no <= INT_MAX);
5362 split_hblks = log->l_logBBsize - (int)blk_no;
5363 ASSERT(split_hblks > 0);
5364 error = xlog_bread(log, blk_no,
5365 split_hblks, hbp,
5366 &offset);
5367 if (error)
5368 goto bread_err2;
5369 }
5370
5371 /*
5372 * Note: this black magic still works with
5373 * large sector sizes (non-512) only because:
5374 * - we increased the buffer size originally
5375 * by 1 sector giving us enough extra space
5376 * for the second read;
5377 * - the log start is guaranteed to be sector
5378 * aligned;
5379 * - we read the log end (LR header start)
5380 * _first_, then the log start (LR header end)
5381 * - order is important.
5382 */
5383 wrapped_hblks = hblks - split_hblks;
5384 error = xlog_bread_offset(log, 0,
5385 wrapped_hblks, hbp,
5386 offset + BBTOB(split_hblks));
5387 if (error)
5388 goto bread_err2;
5389 }
5390 rhead = (xlog_rec_header_t *)offset;
5391 error = xlog_valid_rec_header(log, rhead,
5392 split_hblks ? blk_no : 0);
5393 if (error)
5394 goto bread_err2;
5395
5396 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5397 blk_no += hblks;
5398
5399 /*
5400 * Read the log record data in multiple reads if it
5401 * wraps around the end of the log. Note that if the
5402 * header already wrapped, blk_no could point past the
5403 * end of the log. The record data is contiguous in
5404 * that case.
5405 */
5406 if (blk_no + bblks <= log->l_logBBsize ||
5407 blk_no >= log->l_logBBsize) {
5408 /* mod blk_no in case the header wrapped and
5409 * pushed it beyond the end of the log */
5410 rblk_no = do_mod(blk_no, log->l_logBBsize);
5411 error = xlog_bread(log, rblk_no, bblks, dbp,
5412 &offset);
5413 if (error)
5414 goto bread_err2;
5415 } else {
5416 /* This log record is split across the
5417 * physical end of log */
5418 offset = dbp->b_addr;
5419 split_bblks = 0;
5420 if (blk_no != log->l_logBBsize) {
5421 /* some data is before the physical
5422 * end of log */
5423 ASSERT(!wrapped_hblks);
5424 ASSERT(blk_no <= INT_MAX);
5425 split_bblks =
5426 log->l_logBBsize - (int)blk_no;
5427 ASSERT(split_bblks > 0);
5428 error = xlog_bread(log, blk_no,
5429 split_bblks, dbp,
5430 &offset);
5431 if (error)
5432 goto bread_err2;
5433 }
5434
5435 /*
5436 * Note: this black magic still works with
5437 * large sector sizes (non-512) only because:
5438 * - we increased the buffer size originally
5439 * by 1 sector giving us enough extra space
5440 * for the second read;
5441 * - the log start is guaranteed to be sector
5442 * aligned;
5443 * - we read the log end (LR header start)
5444 * _first_, then the log start (LR header end)
5445 * - order is important.
5446 */
5447 error = xlog_bread_offset(log, 0,
5448 bblks - split_bblks, dbp,
5449 offset + BBTOB(split_bblks));
5450 if (error)
5451 goto bread_err2;
5452 }
5453
5454 error = xlog_recover_process(log, rhash, rhead, offset,
5455 pass, &buffer_list);
5456 if (error)
5457 goto bread_err2;
5458
5459 blk_no += bblks;
5460 rhead_blk = blk_no;
5461 }
5462
5463 ASSERT(blk_no >= log->l_logBBsize);
5464 blk_no -= log->l_logBBsize;
5465 rhead_blk = blk_no;
5466 }
5467
5468 /* read first part of physical log */
5469 while (blk_no < head_blk) {
5470 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5471 if (error)
5472 goto bread_err2;
5473
5474 rhead = (xlog_rec_header_t *)offset;
5475 error = xlog_valid_rec_header(log, rhead, blk_no);
5476 if (error)
5477 goto bread_err2;
5478
5479 /* blocks in data section */
5480 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5481 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5482 &offset);
5483 if (error)
5484 goto bread_err2;
5485
5486 error = xlog_recover_process(log, rhash, rhead, offset, pass,
5487 &buffer_list);
5488 if (error)
5489 goto bread_err2;
5490
5491 blk_no += bblks + hblks;
5492 rhead_blk = blk_no;
5493 }
5494
5495 bread_err2:
5496 xlog_put_bp(dbp);
5497 bread_err1:
5498 xlog_put_bp(hbp);
5499
5500 /*
5501 * Submit buffers that have been added from the last record processed,
5502 * regardless of error status.
5503 */
5504 if (!list_empty(&buffer_list))
5505 error2 = xfs_buf_delwri_submit(&buffer_list);
5506
5507 if (error && first_bad)
5508 *first_bad = rhead_blk;
5509
5510 /*
5511 * Transactions are freed at commit time but transactions without commit
5512 * records on disk are never committed. Free any that may be left in the
5513 * hash table.
5514 */
5515 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5516 struct hlist_node *tmp;
5517 struct xlog_recover *trans;
5518
5519 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5520 xlog_recover_free_trans(trans);
5521 }
5522
5523 return error ? error : error2;
5524 }
5525
5526 /*
5527 * Do the recovery of the log. We actually do this in two phases.
5528 * The two passes are necessary in order to implement the function
5529 * of cancelling a record written into the log. The first pass
5530 * determines those things which have been cancelled, and the
5531 * second pass replays log items normally except for those which
5532 * have been cancelled. The handling of the replay and cancellations
5533 * takes place in the log item type specific routines.
5534 *
5535 * The table of items which have cancel records in the log is allocated
5536 * and freed at this level, since only here do we know when all of
5537 * the log recovery has been completed.
5538 */
5539 STATIC int
5540 xlog_do_log_recovery(
5541 struct xlog *log,
5542 xfs_daddr_t head_blk,
5543 xfs_daddr_t tail_blk)
5544 {
5545 int error, i;
5546
5547 ASSERT(head_blk != tail_blk);
5548
5549 /*
5550 * First do a pass to find all of the cancelled buf log items.
5551 * Store them in the buf_cancel_table for use in the second pass.
5552 */
5553 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5554 sizeof(struct list_head),
5555 KM_SLEEP);
5556 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5557 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5558
5559 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5560 XLOG_RECOVER_PASS1, NULL);
5561 if (error != 0) {
5562 kmem_free(log->l_buf_cancel_table);
5563 log->l_buf_cancel_table = NULL;
5564 return error;
5565 }
5566 /*
5567 * Then do a second pass to actually recover the items in the log.
5568 * When it is complete free the table of buf cancel items.
5569 */
5570 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5571 XLOG_RECOVER_PASS2, NULL);
5572 #ifdef DEBUG
5573 if (!error) {
5574 int i;
5575
5576 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5577 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5578 }
5579 #endif /* DEBUG */
5580
5581 kmem_free(log->l_buf_cancel_table);
5582 log->l_buf_cancel_table = NULL;
5583
5584 return error;
5585 }
5586
5587 /*
5588 * Do the actual recovery
5589 */
5590 STATIC int
5591 xlog_do_recover(
5592 struct xlog *log,
5593 xfs_daddr_t head_blk,
5594 xfs_daddr_t tail_blk)
5595 {
5596 struct xfs_mount *mp = log->l_mp;
5597 int error;
5598 xfs_buf_t *bp;
5599 xfs_sb_t *sbp;
5600
5601 /*
5602 * First replay the images in the log.
5603 */
5604 error = xlog_do_log_recovery(log, head_blk, tail_blk);
5605 if (error)
5606 return error;
5607
5608 /*
5609 * If IO errors happened during recovery, bail out.
5610 */
5611 if (XFS_FORCED_SHUTDOWN(mp)) {
5612 return -EIO;
5613 }
5614
5615 /*
5616 * We now update the tail_lsn since much of the recovery has completed
5617 * and there may be space available to use. If there were no extent
5618 * or iunlinks, we can free up the entire log and set the tail_lsn to
5619 * be the last_sync_lsn. This was set in xlog_find_tail to be the
5620 * lsn of the last known good LR on disk. If there are extent frees
5621 * or iunlinks they will have some entries in the AIL; so we look at
5622 * the AIL to determine how to set the tail_lsn.
5623 */
5624 xlog_assign_tail_lsn(mp);
5625
5626 /*
5627 * Now that we've finished replaying all buffer and inode
5628 * updates, re-read in the superblock and reverify it.
5629 */
5630 bp = xfs_getsb(mp, 0);
5631 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5632 ASSERT(!(bp->b_flags & XBF_WRITE));
5633 bp->b_flags |= XBF_READ;
5634 bp->b_ops = &xfs_sb_buf_ops;
5635
5636 error = xfs_buf_submit_wait(bp);
5637 if (error) {
5638 if (!XFS_FORCED_SHUTDOWN(mp)) {
5639 xfs_buf_ioerror_alert(bp, __func__);
5640 ASSERT(0);
5641 }
5642 xfs_buf_relse(bp);
5643 return error;
5644 }
5645
5646 /* Convert superblock from on-disk format */
5647 sbp = &mp->m_sb;
5648 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5649 xfs_buf_relse(bp);
5650
5651 /* re-initialise in-core superblock and geometry structures */
5652 xfs_reinit_percpu_counters(mp);
5653 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5654 if (error) {
5655 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5656 return error;
5657 }
5658 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5659
5660 xlog_recover_check_summary(log);
5661
5662 /* Normal transactions can now occur */
5663 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5664 return 0;
5665 }
5666
5667 /*
5668 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5669 *
5670 * Return error or zero.
5671 */
5672 int
5673 xlog_recover(
5674 struct xlog *log)
5675 {
5676 xfs_daddr_t head_blk, tail_blk;
5677 int error;
5678
5679 /* find the tail of the log */
5680 error = xlog_find_tail(log, &head_blk, &tail_blk);
5681 if (error)
5682 return error;
5683
5684 /*
5685 * The superblock was read before the log was available and thus the LSN
5686 * could not be verified. Check the superblock LSN against the current
5687 * LSN now that it's known.
5688 */
5689 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5690 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5691 return -EINVAL;
5692
5693 if (tail_blk != head_blk) {
5694 /* There used to be a comment here:
5695 *
5696 * disallow recovery on read-only mounts. note -- mount
5697 * checks for ENOSPC and turns it into an intelligent
5698 * error message.
5699 * ...but this is no longer true. Now, unless you specify
5700 * NORECOVERY (in which case this function would never be
5701 * called), we just go ahead and recover. We do this all
5702 * under the vfs layer, so we can get away with it unless
5703 * the device itself is read-only, in which case we fail.
5704 */
5705 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5706 return error;
5707 }
5708
5709 /*
5710 * Version 5 superblock log feature mask validation. We know the
5711 * log is dirty so check if there are any unknown log features
5712 * in what we need to recover. If there are unknown features
5713 * (e.g. unsupported transactions, then simply reject the
5714 * attempt at recovery before touching anything.
5715 */
5716 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5717 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5718 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5719 xfs_warn(log->l_mp,
5720 "Superblock has unknown incompatible log features (0x%x) enabled.",
5721 (log->l_mp->m_sb.sb_features_log_incompat &
5722 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5723 xfs_warn(log->l_mp,
5724 "The log can not be fully and/or safely recovered by this kernel.");
5725 xfs_warn(log->l_mp,
5726 "Please recover the log on a kernel that supports the unknown features.");
5727 return -EINVAL;
5728 }
5729
5730 /*
5731 * Delay log recovery if the debug hook is set. This is debug
5732 * instrumention to coordinate simulation of I/O failures with
5733 * log recovery.
5734 */
5735 if (xfs_globals.log_recovery_delay) {
5736 xfs_notice(log->l_mp,
5737 "Delaying log recovery for %d seconds.",
5738 xfs_globals.log_recovery_delay);
5739 msleep(xfs_globals.log_recovery_delay * 1000);
5740 }
5741
5742 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5743 log->l_mp->m_logname ? log->l_mp->m_logname
5744 : "internal");
5745
5746 error = xlog_do_recover(log, head_blk, tail_blk);
5747 log->l_flags |= XLOG_RECOVERY_NEEDED;
5748 }
5749 return error;
5750 }
5751
5752 /*
5753 * In the first part of recovery we replay inodes and buffers and build
5754 * up the list of extent free items which need to be processed. Here
5755 * we process the extent free items and clean up the on disk unlinked
5756 * inode lists. This is separated from the first part of recovery so
5757 * that the root and real-time bitmap inodes can be read in from disk in
5758 * between the two stages. This is necessary so that we can free space
5759 * in the real-time portion of the file system.
5760 */
5761 int
5762 xlog_recover_finish(
5763 struct xlog *log)
5764 {
5765 /*
5766 * Now we're ready to do the transactions needed for the
5767 * rest of recovery. Start with completing all the extent
5768 * free intent records and then process the unlinked inode
5769 * lists. At this point, we essentially run in normal mode
5770 * except that we're still performing recovery actions
5771 * rather than accepting new requests.
5772 */
5773 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5774 int error;
5775 error = xlog_recover_process_intents(log);
5776 if (error) {
5777 xfs_alert(log->l_mp, "Failed to recover intents");
5778 return error;
5779 }
5780
5781 /*
5782 * Sync the log to get all the intents out of the AIL.
5783 * This isn't absolutely necessary, but it helps in
5784 * case the unlink transactions would have problems
5785 * pushing the intents out of the way.
5786 */
5787 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5788
5789 xlog_recover_process_iunlinks(log);
5790
5791 xlog_recover_check_summary(log);
5792
5793 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5794 log->l_mp->m_logname ? log->l_mp->m_logname
5795 : "internal");
5796 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5797 } else {
5798 xfs_info(log->l_mp, "Ending clean mount");
5799 }
5800 return 0;
5801 }
5802
5803 int
5804 xlog_recover_cancel(
5805 struct xlog *log)
5806 {
5807 int error = 0;
5808
5809 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5810 error = xlog_recover_cancel_intents(log);
5811
5812 return error;
5813 }
5814
5815 #if defined(DEBUG)
5816 /*
5817 * Read all of the agf and agi counters and check that they
5818 * are consistent with the superblock counters.
5819 */
5820 void
5821 xlog_recover_check_summary(
5822 struct xlog *log)
5823 {
5824 xfs_mount_t *mp;
5825 xfs_agf_t *agfp;
5826 xfs_buf_t *agfbp;
5827 xfs_buf_t *agibp;
5828 xfs_agnumber_t agno;
5829 uint64_t freeblks;
5830 uint64_t itotal;
5831 uint64_t ifree;
5832 int error;
5833
5834 mp = log->l_mp;
5835
5836 freeblks = 0LL;
5837 itotal = 0LL;
5838 ifree = 0LL;
5839 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5840 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5841 if (error) {
5842 xfs_alert(mp, "%s agf read failed agno %d error %d",
5843 __func__, agno, error);
5844 } else {
5845 agfp = XFS_BUF_TO_AGF(agfbp);
5846 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5847 be32_to_cpu(agfp->agf_flcount);
5848 xfs_buf_relse(agfbp);
5849 }
5850
5851 error = xfs_read_agi(mp, NULL, agno, &agibp);
5852 if (error) {
5853 xfs_alert(mp, "%s agi read failed agno %d error %d",
5854 __func__, agno, error);
5855 } else {
5856 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5857
5858 itotal += be32_to_cpu(agi->agi_count);
5859 ifree += be32_to_cpu(agi->agi_freecount);
5860 xfs_buf_relse(agibp);
5861 }
5862 }
5863 }
5864 #endif /* DEBUG */