]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/xfs_log_recover.c
837e8d09aa5a3bc4d969fc9988998ead4d9cd3af
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / xfs_log_recover.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_trans.h"
31 #include "xfs_log.h"
32 #include "xfs_log_priv.h"
33 #include "xfs_log_recover.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_alloc.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_cksum.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_bmap_btree.h"
44 #include "xfs_error.h"
45 #include "xfs_dir2.h"
46
47 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
48
49 STATIC int
50 xlog_find_zeroed(
51 struct xlog *,
52 xfs_daddr_t *);
53 STATIC int
54 xlog_clear_stale_blocks(
55 struct xlog *,
56 xfs_lsn_t);
57 #if defined(DEBUG)
58 STATIC void
59 xlog_recover_check_summary(
60 struct xlog *);
61 #else
62 #define xlog_recover_check_summary(log)
63 #endif
64 STATIC int
65 xlog_do_recovery_pass(
66 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
67
68 /*
69 * This structure is used during recovery to record the buf log items which
70 * have been canceled and should not be replayed.
71 */
72 struct xfs_buf_cancel {
73 xfs_daddr_t bc_blkno;
74 uint bc_len;
75 int bc_refcount;
76 struct list_head bc_list;
77 };
78
79 /*
80 * Sector aligned buffer routines for buffer create/read/write/access
81 */
82
83 /*
84 * Verify the given count of basic blocks is valid number of blocks
85 * to specify for an operation involving the given XFS log buffer.
86 * Returns nonzero if the count is valid, 0 otherwise.
87 */
88
89 static inline int
90 xlog_buf_bbcount_valid(
91 struct xlog *log,
92 int bbcount)
93 {
94 return bbcount > 0 && bbcount <= log->l_logBBsize;
95 }
96
97 /*
98 * Allocate a buffer to hold log data. The buffer needs to be able
99 * to map to a range of nbblks basic blocks at any valid (basic
100 * block) offset within the log.
101 */
102 STATIC xfs_buf_t *
103 xlog_get_bp(
104 struct xlog *log,
105 int nbblks)
106 {
107 struct xfs_buf *bp;
108
109 if (!xlog_buf_bbcount_valid(log, nbblks)) {
110 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
111 nbblks);
112 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
113 return NULL;
114 }
115
116 /*
117 * We do log I/O in units of log sectors (a power-of-2
118 * multiple of the basic block size), so we round up the
119 * requested size to accommodate the basic blocks required
120 * for complete log sectors.
121 *
122 * In addition, the buffer may be used for a non-sector-
123 * aligned block offset, in which case an I/O of the
124 * requested size could extend beyond the end of the
125 * buffer. If the requested size is only 1 basic block it
126 * will never straddle a sector boundary, so this won't be
127 * an issue. Nor will this be a problem if the log I/O is
128 * done in basic blocks (sector size 1). But otherwise we
129 * extend the buffer by one extra log sector to ensure
130 * there's space to accommodate this possibility.
131 */
132 if (nbblks > 1 && log->l_sectBBsize > 1)
133 nbblks += log->l_sectBBsize;
134 nbblks = round_up(nbblks, log->l_sectBBsize);
135
136 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
137 if (bp)
138 xfs_buf_unlock(bp);
139 return bp;
140 }
141
142 STATIC void
143 xlog_put_bp(
144 xfs_buf_t *bp)
145 {
146 xfs_buf_free(bp);
147 }
148
149 /*
150 * Return the address of the start of the given block number's data
151 * in a log buffer. The buffer covers a log sector-aligned region.
152 */
153 STATIC char *
154 xlog_align(
155 struct xlog *log,
156 xfs_daddr_t blk_no,
157 int nbblks,
158 struct xfs_buf *bp)
159 {
160 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
161
162 ASSERT(offset + nbblks <= bp->b_length);
163 return bp->b_addr + BBTOB(offset);
164 }
165
166
167 /*
168 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
169 */
170 STATIC int
171 xlog_bread_noalign(
172 struct xlog *log,
173 xfs_daddr_t blk_no,
174 int nbblks,
175 struct xfs_buf *bp)
176 {
177 int error;
178
179 if (!xlog_buf_bbcount_valid(log, nbblks)) {
180 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
181 nbblks);
182 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
183 return -EFSCORRUPTED;
184 }
185
186 blk_no = round_down(blk_no, log->l_sectBBsize);
187 nbblks = round_up(nbblks, log->l_sectBBsize);
188
189 ASSERT(nbblks > 0);
190 ASSERT(nbblks <= bp->b_length);
191
192 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
193 bp->b_flags |= XBF_READ;
194 bp->b_io_length = nbblks;
195 bp->b_error = 0;
196
197 error = xfs_buf_submit_wait(bp);
198 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
199 xfs_buf_ioerror_alert(bp, __func__);
200 return error;
201 }
202
203 STATIC int
204 xlog_bread(
205 struct xlog *log,
206 xfs_daddr_t blk_no,
207 int nbblks,
208 struct xfs_buf *bp,
209 char **offset)
210 {
211 int error;
212
213 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
214 if (error)
215 return error;
216
217 *offset = xlog_align(log, blk_no, nbblks, bp);
218 return 0;
219 }
220
221 /*
222 * Read at an offset into the buffer. Returns with the buffer in it's original
223 * state regardless of the result of the read.
224 */
225 STATIC int
226 xlog_bread_offset(
227 struct xlog *log,
228 xfs_daddr_t blk_no, /* block to read from */
229 int nbblks, /* blocks to read */
230 struct xfs_buf *bp,
231 char *offset)
232 {
233 char *orig_offset = bp->b_addr;
234 int orig_len = BBTOB(bp->b_length);
235 int error, error2;
236
237 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
238 if (error)
239 return error;
240
241 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
242
243 /* must reset buffer pointer even on error */
244 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
245 if (error)
246 return error;
247 return error2;
248 }
249
250 /*
251 * Write out the buffer at the given block for the given number of blocks.
252 * The buffer is kept locked across the write and is returned locked.
253 * This can only be used for synchronous log writes.
254 */
255 STATIC int
256 xlog_bwrite(
257 struct xlog *log,
258 xfs_daddr_t blk_no,
259 int nbblks,
260 struct xfs_buf *bp)
261 {
262 int error;
263
264 if (!xlog_buf_bbcount_valid(log, nbblks)) {
265 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
266 nbblks);
267 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
268 return -EFSCORRUPTED;
269 }
270
271 blk_no = round_down(blk_no, log->l_sectBBsize);
272 nbblks = round_up(nbblks, log->l_sectBBsize);
273
274 ASSERT(nbblks > 0);
275 ASSERT(nbblks <= bp->b_length);
276
277 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
278 xfs_buf_hold(bp);
279 xfs_buf_lock(bp);
280 bp->b_io_length = nbblks;
281 bp->b_error = 0;
282
283 error = xfs_bwrite(bp);
284 if (error)
285 xfs_buf_ioerror_alert(bp, __func__);
286 xfs_buf_relse(bp);
287 return error;
288 }
289
290 #ifdef DEBUG
291 /*
292 * dump debug superblock and log record information
293 */
294 STATIC void
295 xlog_header_check_dump(
296 xfs_mount_t *mp,
297 xlog_rec_header_t *head)
298 {
299 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
300 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
301 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
302 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
303 }
304 #else
305 #define xlog_header_check_dump(mp, head)
306 #endif
307
308 /*
309 * check log record header for recovery
310 */
311 STATIC int
312 xlog_header_check_recover(
313 xfs_mount_t *mp,
314 xlog_rec_header_t *head)
315 {
316 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
317
318 /*
319 * IRIX doesn't write the h_fmt field and leaves it zeroed
320 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
321 * a dirty log created in IRIX.
322 */
323 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
324 xfs_warn(mp,
325 "dirty log written in incompatible format - can't recover");
326 xlog_header_check_dump(mp, head);
327 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
328 XFS_ERRLEVEL_HIGH, mp);
329 return -EFSCORRUPTED;
330 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
331 xfs_warn(mp,
332 "dirty log entry has mismatched uuid - can't recover");
333 xlog_header_check_dump(mp, head);
334 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
335 XFS_ERRLEVEL_HIGH, mp);
336 return -EFSCORRUPTED;
337 }
338 return 0;
339 }
340
341 /*
342 * read the head block of the log and check the header
343 */
344 STATIC int
345 xlog_header_check_mount(
346 xfs_mount_t *mp,
347 xlog_rec_header_t *head)
348 {
349 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
350
351 if (uuid_is_nil(&head->h_fs_uuid)) {
352 /*
353 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
354 * h_fs_uuid is nil, we assume this log was last mounted
355 * by IRIX and continue.
356 */
357 xfs_warn(mp, "nil uuid in log - IRIX style log");
358 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
359 xfs_warn(mp, "log has mismatched uuid - can't recover");
360 xlog_header_check_dump(mp, head);
361 XFS_ERROR_REPORT("xlog_header_check_mount",
362 XFS_ERRLEVEL_HIGH, mp);
363 return -EFSCORRUPTED;
364 }
365 return 0;
366 }
367
368 STATIC void
369 xlog_recover_iodone(
370 struct xfs_buf *bp)
371 {
372 if (bp->b_error) {
373 /*
374 * We're not going to bother about retrying
375 * this during recovery. One strike!
376 */
377 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
378 xfs_buf_ioerror_alert(bp, __func__);
379 xfs_force_shutdown(bp->b_target->bt_mount,
380 SHUTDOWN_META_IO_ERROR);
381 }
382 }
383 bp->b_iodone = NULL;
384 xfs_buf_ioend(bp);
385 }
386
387 /*
388 * This routine finds (to an approximation) the first block in the physical
389 * log which contains the given cycle. It uses a binary search algorithm.
390 * Note that the algorithm can not be perfect because the disk will not
391 * necessarily be perfect.
392 */
393 STATIC int
394 xlog_find_cycle_start(
395 struct xlog *log,
396 struct xfs_buf *bp,
397 xfs_daddr_t first_blk,
398 xfs_daddr_t *last_blk,
399 uint cycle)
400 {
401 char *offset;
402 xfs_daddr_t mid_blk;
403 xfs_daddr_t end_blk;
404 uint mid_cycle;
405 int error;
406
407 end_blk = *last_blk;
408 mid_blk = BLK_AVG(first_blk, end_blk);
409 while (mid_blk != first_blk && mid_blk != end_blk) {
410 error = xlog_bread(log, mid_blk, 1, bp, &offset);
411 if (error)
412 return error;
413 mid_cycle = xlog_get_cycle(offset);
414 if (mid_cycle == cycle)
415 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
416 else
417 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
418 mid_blk = BLK_AVG(first_blk, end_blk);
419 }
420 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
421 (mid_blk == end_blk && mid_blk-1 == first_blk));
422
423 *last_blk = end_blk;
424
425 return 0;
426 }
427
428 /*
429 * Check that a range of blocks does not contain stop_on_cycle_no.
430 * Fill in *new_blk with the block offset where such a block is
431 * found, or with -1 (an invalid block number) if there is no such
432 * block in the range. The scan needs to occur from front to back
433 * and the pointer into the region must be updated since a later
434 * routine will need to perform another test.
435 */
436 STATIC int
437 xlog_find_verify_cycle(
438 struct xlog *log,
439 xfs_daddr_t start_blk,
440 int nbblks,
441 uint stop_on_cycle_no,
442 xfs_daddr_t *new_blk)
443 {
444 xfs_daddr_t i, j;
445 uint cycle;
446 xfs_buf_t *bp;
447 xfs_daddr_t bufblks;
448 char *buf = NULL;
449 int error = 0;
450
451 /*
452 * Greedily allocate a buffer big enough to handle the full
453 * range of basic blocks we'll be examining. If that fails,
454 * try a smaller size. We need to be able to read at least
455 * a log sector, or we're out of luck.
456 */
457 bufblks = 1 << ffs(nbblks);
458 while (bufblks > log->l_logBBsize)
459 bufblks >>= 1;
460 while (!(bp = xlog_get_bp(log, bufblks))) {
461 bufblks >>= 1;
462 if (bufblks < log->l_sectBBsize)
463 return -ENOMEM;
464 }
465
466 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
467 int bcount;
468
469 bcount = min(bufblks, (start_blk + nbblks - i));
470
471 error = xlog_bread(log, i, bcount, bp, &buf);
472 if (error)
473 goto out;
474
475 for (j = 0; j < bcount; j++) {
476 cycle = xlog_get_cycle(buf);
477 if (cycle == stop_on_cycle_no) {
478 *new_blk = i+j;
479 goto out;
480 }
481
482 buf += BBSIZE;
483 }
484 }
485
486 *new_blk = -1;
487
488 out:
489 xlog_put_bp(bp);
490 return error;
491 }
492
493 /*
494 * Potentially backup over partial log record write.
495 *
496 * In the typical case, last_blk is the number of the block directly after
497 * a good log record. Therefore, we subtract one to get the block number
498 * of the last block in the given buffer. extra_bblks contains the number
499 * of blocks we would have read on a previous read. This happens when the
500 * last log record is split over the end of the physical log.
501 *
502 * extra_bblks is the number of blocks potentially verified on a previous
503 * call to this routine.
504 */
505 STATIC int
506 xlog_find_verify_log_record(
507 struct xlog *log,
508 xfs_daddr_t start_blk,
509 xfs_daddr_t *last_blk,
510 int extra_bblks)
511 {
512 xfs_daddr_t i;
513 xfs_buf_t *bp;
514 char *offset = NULL;
515 xlog_rec_header_t *head = NULL;
516 int error = 0;
517 int smallmem = 0;
518 int num_blks = *last_blk - start_blk;
519 int xhdrs;
520
521 ASSERT(start_blk != 0 || *last_blk != start_blk);
522
523 if (!(bp = xlog_get_bp(log, num_blks))) {
524 if (!(bp = xlog_get_bp(log, 1)))
525 return -ENOMEM;
526 smallmem = 1;
527 } else {
528 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
529 if (error)
530 goto out;
531 offset += ((num_blks - 1) << BBSHIFT);
532 }
533
534 for (i = (*last_blk) - 1; i >= 0; i--) {
535 if (i < start_blk) {
536 /* valid log record not found */
537 xfs_warn(log->l_mp,
538 "Log inconsistent (didn't find previous header)");
539 ASSERT(0);
540 error = -EIO;
541 goto out;
542 }
543
544 if (smallmem) {
545 error = xlog_bread(log, i, 1, bp, &offset);
546 if (error)
547 goto out;
548 }
549
550 head = (xlog_rec_header_t *)offset;
551
552 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
553 break;
554
555 if (!smallmem)
556 offset -= BBSIZE;
557 }
558
559 /*
560 * We hit the beginning of the physical log & still no header. Return
561 * to caller. If caller can handle a return of -1, then this routine
562 * will be called again for the end of the physical log.
563 */
564 if (i == -1) {
565 error = 1;
566 goto out;
567 }
568
569 /*
570 * We have the final block of the good log (the first block
571 * of the log record _before_ the head. So we check the uuid.
572 */
573 if ((error = xlog_header_check_mount(log->l_mp, head)))
574 goto out;
575
576 /*
577 * We may have found a log record header before we expected one.
578 * last_blk will be the 1st block # with a given cycle #. We may end
579 * up reading an entire log record. In this case, we don't want to
580 * reset last_blk. Only when last_blk points in the middle of a log
581 * record do we update last_blk.
582 */
583 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
584 uint h_size = be32_to_cpu(head->h_size);
585
586 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
587 if (h_size % XLOG_HEADER_CYCLE_SIZE)
588 xhdrs++;
589 } else {
590 xhdrs = 1;
591 }
592
593 if (*last_blk - i + extra_bblks !=
594 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
595 *last_blk = i;
596
597 out:
598 xlog_put_bp(bp);
599 return error;
600 }
601
602 /*
603 * Head is defined to be the point of the log where the next log write
604 * could go. This means that incomplete LR writes at the end are
605 * eliminated when calculating the head. We aren't guaranteed that previous
606 * LR have complete transactions. We only know that a cycle number of
607 * current cycle number -1 won't be present in the log if we start writing
608 * from our current block number.
609 *
610 * last_blk contains the block number of the first block with a given
611 * cycle number.
612 *
613 * Return: zero if normal, non-zero if error.
614 */
615 STATIC int
616 xlog_find_head(
617 struct xlog *log,
618 xfs_daddr_t *return_head_blk)
619 {
620 xfs_buf_t *bp;
621 char *offset;
622 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
623 int num_scan_bblks;
624 uint first_half_cycle, last_half_cycle;
625 uint stop_on_cycle;
626 int error, log_bbnum = log->l_logBBsize;
627
628 /* Is the end of the log device zeroed? */
629 error = xlog_find_zeroed(log, &first_blk);
630 if (error < 0) {
631 xfs_warn(log->l_mp, "empty log check failed");
632 return error;
633 }
634 if (error == 1) {
635 *return_head_blk = first_blk;
636
637 /* Is the whole lot zeroed? */
638 if (!first_blk) {
639 /* Linux XFS shouldn't generate totally zeroed logs -
640 * mkfs etc write a dummy unmount record to a fresh
641 * log so we can store the uuid in there
642 */
643 xfs_warn(log->l_mp, "totally zeroed log");
644 }
645
646 return 0;
647 }
648
649 first_blk = 0; /* get cycle # of 1st block */
650 bp = xlog_get_bp(log, 1);
651 if (!bp)
652 return -ENOMEM;
653
654 error = xlog_bread(log, 0, 1, bp, &offset);
655 if (error)
656 goto bp_err;
657
658 first_half_cycle = xlog_get_cycle(offset);
659
660 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
661 error = xlog_bread(log, last_blk, 1, bp, &offset);
662 if (error)
663 goto bp_err;
664
665 last_half_cycle = xlog_get_cycle(offset);
666 ASSERT(last_half_cycle != 0);
667
668 /*
669 * If the 1st half cycle number is equal to the last half cycle number,
670 * then the entire log is stamped with the same cycle number. In this
671 * case, head_blk can't be set to zero (which makes sense). The below
672 * math doesn't work out properly with head_blk equal to zero. Instead,
673 * we set it to log_bbnum which is an invalid block number, but this
674 * value makes the math correct. If head_blk doesn't changed through
675 * all the tests below, *head_blk is set to zero at the very end rather
676 * than log_bbnum. In a sense, log_bbnum and zero are the same block
677 * in a circular file.
678 */
679 if (first_half_cycle == last_half_cycle) {
680 /*
681 * In this case we believe that the entire log should have
682 * cycle number last_half_cycle. We need to scan backwards
683 * from the end verifying that there are no holes still
684 * containing last_half_cycle - 1. If we find such a hole,
685 * then the start of that hole will be the new head. The
686 * simple case looks like
687 * x | x ... | x - 1 | x
688 * Another case that fits this picture would be
689 * x | x + 1 | x ... | x
690 * In this case the head really is somewhere at the end of the
691 * log, as one of the latest writes at the beginning was
692 * incomplete.
693 * One more case is
694 * x | x + 1 | x ... | x - 1 | x
695 * This is really the combination of the above two cases, and
696 * the head has to end up at the start of the x-1 hole at the
697 * end of the log.
698 *
699 * In the 256k log case, we will read from the beginning to the
700 * end of the log and search for cycle numbers equal to x-1.
701 * We don't worry about the x+1 blocks that we encounter,
702 * because we know that they cannot be the head since the log
703 * started with x.
704 */
705 head_blk = log_bbnum;
706 stop_on_cycle = last_half_cycle - 1;
707 } else {
708 /*
709 * In this case we want to find the first block with cycle
710 * number matching last_half_cycle. We expect the log to be
711 * some variation on
712 * x + 1 ... | x ... | x
713 * The first block with cycle number x (last_half_cycle) will
714 * be where the new head belongs. First we do a binary search
715 * for the first occurrence of last_half_cycle. The binary
716 * search may not be totally accurate, so then we scan back
717 * from there looking for occurrences of last_half_cycle before
718 * us. If that backwards scan wraps around the beginning of
719 * the log, then we look for occurrences of last_half_cycle - 1
720 * at the end of the log. The cases we're looking for look
721 * like
722 * v binary search stopped here
723 * x + 1 ... | x | x + 1 | x ... | x
724 * ^ but we want to locate this spot
725 * or
726 * <---------> less than scan distance
727 * x + 1 ... | x ... | x - 1 | x
728 * ^ we want to locate this spot
729 */
730 stop_on_cycle = last_half_cycle;
731 if ((error = xlog_find_cycle_start(log, bp, first_blk,
732 &head_blk, last_half_cycle)))
733 goto bp_err;
734 }
735
736 /*
737 * Now validate the answer. Scan back some number of maximum possible
738 * blocks and make sure each one has the expected cycle number. The
739 * maximum is determined by the total possible amount of buffering
740 * in the in-core log. The following number can be made tighter if
741 * we actually look at the block size of the filesystem.
742 */
743 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
744 if (head_blk >= num_scan_bblks) {
745 /*
746 * We are guaranteed that the entire check can be performed
747 * in one buffer.
748 */
749 start_blk = head_blk - num_scan_bblks;
750 if ((error = xlog_find_verify_cycle(log,
751 start_blk, num_scan_bblks,
752 stop_on_cycle, &new_blk)))
753 goto bp_err;
754 if (new_blk != -1)
755 head_blk = new_blk;
756 } else { /* need to read 2 parts of log */
757 /*
758 * We are going to scan backwards in the log in two parts.
759 * First we scan the physical end of the log. In this part
760 * of the log, we are looking for blocks with cycle number
761 * last_half_cycle - 1.
762 * If we find one, then we know that the log starts there, as
763 * we've found a hole that didn't get written in going around
764 * the end of the physical log. The simple case for this is
765 * x + 1 ... | x ... | x - 1 | x
766 * <---------> less than scan distance
767 * If all of the blocks at the end of the log have cycle number
768 * last_half_cycle, then we check the blocks at the start of
769 * the log looking for occurrences of last_half_cycle. If we
770 * find one, then our current estimate for the location of the
771 * first occurrence of last_half_cycle is wrong and we move
772 * back to the hole we've found. This case looks like
773 * x + 1 ... | x | x + 1 | x ...
774 * ^ binary search stopped here
775 * Another case we need to handle that only occurs in 256k
776 * logs is
777 * x + 1 ... | x ... | x+1 | x ...
778 * ^ binary search stops here
779 * In a 256k log, the scan at the end of the log will see the
780 * x + 1 blocks. We need to skip past those since that is
781 * certainly not the head of the log. By searching for
782 * last_half_cycle-1 we accomplish that.
783 */
784 ASSERT(head_blk <= INT_MAX &&
785 (xfs_daddr_t) num_scan_bblks >= head_blk);
786 start_blk = log_bbnum - (num_scan_bblks - head_blk);
787 if ((error = xlog_find_verify_cycle(log, start_blk,
788 num_scan_bblks - (int)head_blk,
789 (stop_on_cycle - 1), &new_blk)))
790 goto bp_err;
791 if (new_blk != -1) {
792 head_blk = new_blk;
793 goto validate_head;
794 }
795
796 /*
797 * Scan beginning of log now. The last part of the physical
798 * log is good. This scan needs to verify that it doesn't find
799 * the last_half_cycle.
800 */
801 start_blk = 0;
802 ASSERT(head_blk <= INT_MAX);
803 if ((error = xlog_find_verify_cycle(log,
804 start_blk, (int)head_blk,
805 stop_on_cycle, &new_blk)))
806 goto bp_err;
807 if (new_blk != -1)
808 head_blk = new_blk;
809 }
810
811 validate_head:
812 /*
813 * Now we need to make sure head_blk is not pointing to a block in
814 * the middle of a log record.
815 */
816 num_scan_bblks = XLOG_REC_SHIFT(log);
817 if (head_blk >= num_scan_bblks) {
818 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
819
820 /* start ptr at last block ptr before head_blk */
821 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
822 if (error == 1)
823 error = -EIO;
824 if (error)
825 goto bp_err;
826 } else {
827 start_blk = 0;
828 ASSERT(head_blk <= INT_MAX);
829 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
830 if (error < 0)
831 goto bp_err;
832 if (error == 1) {
833 /* We hit the beginning of the log during our search */
834 start_blk = log_bbnum - (num_scan_bblks - head_blk);
835 new_blk = log_bbnum;
836 ASSERT(start_blk <= INT_MAX &&
837 (xfs_daddr_t) log_bbnum-start_blk >= 0);
838 ASSERT(head_blk <= INT_MAX);
839 error = xlog_find_verify_log_record(log, start_blk,
840 &new_blk, (int)head_blk);
841 if (error == 1)
842 error = -EIO;
843 if (error)
844 goto bp_err;
845 if (new_blk != log_bbnum)
846 head_blk = new_blk;
847 } else if (error)
848 goto bp_err;
849 }
850
851 xlog_put_bp(bp);
852 if (head_blk == log_bbnum)
853 *return_head_blk = 0;
854 else
855 *return_head_blk = head_blk;
856 /*
857 * When returning here, we have a good block number. Bad block
858 * means that during a previous crash, we didn't have a clean break
859 * from cycle number N to cycle number N-1. In this case, we need
860 * to find the first block with cycle number N-1.
861 */
862 return 0;
863
864 bp_err:
865 xlog_put_bp(bp);
866
867 if (error)
868 xfs_warn(log->l_mp, "failed to find log head");
869 return error;
870 }
871
872 /*
873 * Seek backwards in the log for log record headers.
874 *
875 * Given a starting log block, walk backwards until we find the provided number
876 * of records or hit the provided tail block. The return value is the number of
877 * records encountered or a negative error code. The log block and buffer
878 * pointer of the last record seen are returned in rblk and rhead respectively.
879 */
880 STATIC int
881 xlog_rseek_logrec_hdr(
882 struct xlog *log,
883 xfs_daddr_t head_blk,
884 xfs_daddr_t tail_blk,
885 int count,
886 struct xfs_buf *bp,
887 xfs_daddr_t *rblk,
888 struct xlog_rec_header **rhead,
889 bool *wrapped)
890 {
891 int i;
892 int error;
893 int found = 0;
894 char *offset = NULL;
895 xfs_daddr_t end_blk;
896
897 *wrapped = false;
898
899 /*
900 * Walk backwards from the head block until we hit the tail or the first
901 * block in the log.
902 */
903 end_blk = head_blk > tail_blk ? tail_blk : 0;
904 for (i = (int) head_blk - 1; i >= end_blk; i--) {
905 error = xlog_bread(log, i, 1, bp, &offset);
906 if (error)
907 goto out_error;
908
909 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
910 *rblk = i;
911 *rhead = (struct xlog_rec_header *) offset;
912 if (++found == count)
913 break;
914 }
915 }
916
917 /*
918 * If we haven't hit the tail block or the log record header count,
919 * start looking again from the end of the physical log. Note that
920 * callers can pass head == tail if the tail is not yet known.
921 */
922 if (tail_blk >= head_blk && found != count) {
923 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
924 error = xlog_bread(log, i, 1, bp, &offset);
925 if (error)
926 goto out_error;
927
928 if (*(__be32 *)offset ==
929 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
930 *wrapped = true;
931 *rblk = i;
932 *rhead = (struct xlog_rec_header *) offset;
933 if (++found == count)
934 break;
935 }
936 }
937 }
938
939 return found;
940
941 out_error:
942 return error;
943 }
944
945 /*
946 * Seek forward in the log for log record headers.
947 *
948 * Given head and tail blocks, walk forward from the tail block until we find
949 * the provided number of records or hit the head block. The return value is the
950 * number of records encountered or a negative error code. The log block and
951 * buffer pointer of the last record seen are returned in rblk and rhead
952 * respectively.
953 */
954 STATIC int
955 xlog_seek_logrec_hdr(
956 struct xlog *log,
957 xfs_daddr_t head_blk,
958 xfs_daddr_t tail_blk,
959 int count,
960 struct xfs_buf *bp,
961 xfs_daddr_t *rblk,
962 struct xlog_rec_header **rhead,
963 bool *wrapped)
964 {
965 int i;
966 int error;
967 int found = 0;
968 char *offset = NULL;
969 xfs_daddr_t end_blk;
970
971 *wrapped = false;
972
973 /*
974 * Walk forward from the tail block until we hit the head or the last
975 * block in the log.
976 */
977 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
978 for (i = (int) tail_blk; i <= end_blk; i++) {
979 error = xlog_bread(log, i, 1, bp, &offset);
980 if (error)
981 goto out_error;
982
983 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
984 *rblk = i;
985 *rhead = (struct xlog_rec_header *) offset;
986 if (++found == count)
987 break;
988 }
989 }
990
991 /*
992 * If we haven't hit the head block or the log record header count,
993 * start looking again from the start of the physical log.
994 */
995 if (tail_blk > head_blk && found != count) {
996 for (i = 0; i < (int) head_blk; i++) {
997 error = xlog_bread(log, i, 1, bp, &offset);
998 if (error)
999 goto out_error;
1000
1001 if (*(__be32 *)offset ==
1002 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1003 *wrapped = true;
1004 *rblk = i;
1005 *rhead = (struct xlog_rec_header *) offset;
1006 if (++found == count)
1007 break;
1008 }
1009 }
1010 }
1011
1012 return found;
1013
1014 out_error:
1015 return error;
1016 }
1017
1018 /*
1019 * Check the log tail for torn writes. This is required when torn writes are
1020 * detected at the head and the head had to be walked back to a previous record.
1021 * The tail of the previous record must now be verified to ensure the torn
1022 * writes didn't corrupt the previous tail.
1023 *
1024 * Return an error if CRC verification fails as recovery cannot proceed.
1025 */
1026 STATIC int
1027 xlog_verify_tail(
1028 struct xlog *log,
1029 xfs_daddr_t head_blk,
1030 xfs_daddr_t tail_blk)
1031 {
1032 struct xlog_rec_header *thead;
1033 struct xfs_buf *bp;
1034 xfs_daddr_t first_bad;
1035 int count;
1036 int error = 0;
1037 bool wrapped;
1038 xfs_daddr_t tmp_head;
1039
1040 bp = xlog_get_bp(log, 1);
1041 if (!bp)
1042 return -ENOMEM;
1043
1044 /*
1045 * Seek XLOG_MAX_ICLOGS + 1 records past the current tail record to get
1046 * a temporary head block that points after the last possible
1047 * concurrently written record of the tail.
1048 */
1049 count = xlog_seek_logrec_hdr(log, head_blk, tail_blk,
1050 XLOG_MAX_ICLOGS + 1, bp, &tmp_head, &thead,
1051 &wrapped);
1052 if (count < 0) {
1053 error = count;
1054 goto out;
1055 }
1056
1057 /*
1058 * If the call above didn't find XLOG_MAX_ICLOGS + 1 records, we ran
1059 * into the actual log head. tmp_head points to the start of the record
1060 * so update it to the actual head block.
1061 */
1062 if (count < XLOG_MAX_ICLOGS + 1)
1063 tmp_head = head_blk;
1064
1065 /*
1066 * We now have a tail and temporary head block that covers at least
1067 * XLOG_MAX_ICLOGS records from the tail. We need to verify that these
1068 * records were completely written. Run a CRC verification pass from
1069 * tail to head and return the result.
1070 */
1071 error = xlog_do_recovery_pass(log, tmp_head, tail_blk,
1072 XLOG_RECOVER_CRCPASS, &first_bad);
1073
1074 out:
1075 xlog_put_bp(bp);
1076 return error;
1077 }
1078
1079 /*
1080 * Detect and trim torn writes from the head of the log.
1081 *
1082 * Storage without sector atomicity guarantees can result in torn writes in the
1083 * log in the event of a crash. Our only means to detect this scenario is via
1084 * CRC verification. While we can't always be certain that CRC verification
1085 * failure is due to a torn write vs. an unrelated corruption, we do know that
1086 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1087 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1088 * the log and treat failures in this range as torn writes as a matter of
1089 * policy. In the event of CRC failure, the head is walked back to the last good
1090 * record in the log and the tail is updated from that record and verified.
1091 */
1092 STATIC int
1093 xlog_verify_head(
1094 struct xlog *log,
1095 xfs_daddr_t *head_blk, /* in/out: unverified head */
1096 xfs_daddr_t *tail_blk, /* out: tail block */
1097 struct xfs_buf *bp,
1098 xfs_daddr_t *rhead_blk, /* start blk of last record */
1099 struct xlog_rec_header **rhead, /* ptr to last record */
1100 bool *wrapped) /* last rec. wraps phys. log */
1101 {
1102 struct xlog_rec_header *tmp_rhead;
1103 struct xfs_buf *tmp_bp;
1104 xfs_daddr_t first_bad;
1105 xfs_daddr_t tmp_rhead_blk;
1106 int found;
1107 int error;
1108 bool tmp_wrapped;
1109
1110 /*
1111 * Search backwards through the log looking for the log record header
1112 * block. This wraps all the way back around to the head so something is
1113 * seriously wrong if we can't find it.
1114 */
1115 found = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp, rhead_blk,
1116 rhead, wrapped);
1117 if (found < 0)
1118 return found;
1119 if (!found) {
1120 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1121 return -EIO;
1122 }
1123
1124 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1125
1126 /*
1127 * Now that we have a tail block, check the head of the log for torn
1128 * writes. Search again until we hit the tail or the maximum number of
1129 * log record I/Os that could have been in flight at one time. Use a
1130 * temporary buffer so we don't trash the rhead/bp pointer from the
1131 * call above.
1132 */
1133 tmp_bp = xlog_get_bp(log, 1);
1134 if (!tmp_bp)
1135 return -ENOMEM;
1136 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1137 XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1138 &tmp_rhead, &tmp_wrapped);
1139 xlog_put_bp(tmp_bp);
1140 if (error < 0)
1141 return error;
1142
1143 /*
1144 * Now run a CRC verification pass over the records starting at the
1145 * block found above to the current head. If a CRC failure occurs, the
1146 * log block of the first bad record is saved in first_bad.
1147 */
1148 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1149 XLOG_RECOVER_CRCPASS, &first_bad);
1150 if (error == -EFSBADCRC) {
1151 /*
1152 * We've hit a potential torn write. Reset the error and warn
1153 * about it.
1154 */
1155 error = 0;
1156 xfs_warn(log->l_mp,
1157 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1158 first_bad, *head_blk);
1159
1160 /*
1161 * Get the header block and buffer pointer for the last good
1162 * record before the bad record.
1163 *
1164 * Note that xlog_find_tail() clears the blocks at the new head
1165 * (i.e., the records with invalid CRC) if the cycle number
1166 * matches the the current cycle.
1167 */
1168 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1169 rhead_blk, rhead, wrapped);
1170 if (found < 0)
1171 return found;
1172 if (found == 0) /* XXX: right thing to do here? */
1173 return -EIO;
1174
1175 /*
1176 * Reset the head block to the starting block of the first bad
1177 * log record and set the tail block based on the last good
1178 * record.
1179 *
1180 * Bail out if the updated head/tail match as this indicates
1181 * possible corruption outside of the acceptable
1182 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1183 */
1184 *head_blk = first_bad;
1185 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1186 if (*head_blk == *tail_blk) {
1187 ASSERT(0);
1188 return 0;
1189 }
1190
1191 /*
1192 * Now verify the tail based on the updated head. This is
1193 * required because the torn writes trimmed from the head could
1194 * have been written over the tail of a previous record. Return
1195 * any errors since recovery cannot proceed if the tail is
1196 * corrupt.
1197 *
1198 * XXX: This leaves a gap in truly robust protection from torn
1199 * writes in the log. If the head is behind the tail, the tail
1200 * pushes forward to create some space and then a crash occurs
1201 * causing the writes into the previous record's tail region to
1202 * tear, log recovery isn't able to recover.
1203 *
1204 * How likely is this to occur? If possible, can we do something
1205 * more intelligent here? Is it safe to push the tail forward if
1206 * we can determine that the tail is within the range of the
1207 * torn write (e.g., the kernel can only overwrite the tail if
1208 * it has actually been pushed forward)? Alternatively, could we
1209 * somehow prevent this condition at runtime?
1210 */
1211 error = xlog_verify_tail(log, *head_blk, *tail_blk);
1212 }
1213
1214 return error;
1215 }
1216
1217 /*
1218 * Find the sync block number or the tail of the log.
1219 *
1220 * This will be the block number of the last record to have its
1221 * associated buffers synced to disk. Every log record header has
1222 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1223 * to get a sync block number. The only concern is to figure out which
1224 * log record header to believe.
1225 *
1226 * The following algorithm uses the log record header with the largest
1227 * lsn. The entire log record does not need to be valid. We only care
1228 * that the header is valid.
1229 *
1230 * We could speed up search by using current head_blk buffer, but it is not
1231 * available.
1232 */
1233 STATIC int
1234 xlog_find_tail(
1235 struct xlog *log,
1236 xfs_daddr_t *head_blk,
1237 xfs_daddr_t *tail_blk)
1238 {
1239 xlog_rec_header_t *rhead;
1240 xlog_op_header_t *op_head;
1241 char *offset = NULL;
1242 xfs_buf_t *bp;
1243 int error;
1244 xfs_daddr_t umount_data_blk;
1245 xfs_daddr_t after_umount_blk;
1246 xfs_daddr_t rhead_blk;
1247 xfs_lsn_t tail_lsn;
1248 int hblks;
1249 bool wrapped = false;
1250
1251 /*
1252 * Find previous log record
1253 */
1254 if ((error = xlog_find_head(log, head_blk)))
1255 return error;
1256
1257 bp = xlog_get_bp(log, 1);
1258 if (!bp)
1259 return -ENOMEM;
1260 if (*head_blk == 0) { /* special case */
1261 error = xlog_bread(log, 0, 1, bp, &offset);
1262 if (error)
1263 goto done;
1264
1265 if (xlog_get_cycle(offset) == 0) {
1266 *tail_blk = 0;
1267 /* leave all other log inited values alone */
1268 goto done;
1269 }
1270 }
1271
1272 /*
1273 * Trim the head block back to skip over torn records. We can have
1274 * multiple log I/Os in flight at any time, so we assume CRC failures
1275 * back through the previous several records are torn writes and skip
1276 * them.
1277 */
1278 ASSERT(*head_blk < INT_MAX);
1279 error = xlog_verify_head(log, head_blk, tail_blk, bp, &rhead_blk,
1280 &rhead, &wrapped);
1281 if (error)
1282 goto done;
1283
1284 /*
1285 * Reset log values according to the state of the log when we
1286 * crashed. In the case where head_blk == 0, we bump curr_cycle
1287 * one because the next write starts a new cycle rather than
1288 * continuing the cycle of the last good log record. At this
1289 * point we have guaranteed that all partial log records have been
1290 * accounted for. Therefore, we know that the last good log record
1291 * written was complete and ended exactly on the end boundary
1292 * of the physical log.
1293 */
1294 log->l_prev_block = rhead_blk;
1295 log->l_curr_block = (int)*head_blk;
1296 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1297 if (wrapped)
1298 log->l_curr_cycle++;
1299 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1300 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1301 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1302 BBTOB(log->l_curr_block));
1303 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1304 BBTOB(log->l_curr_block));
1305
1306 /*
1307 * Look for unmount record. If we find it, then we know there
1308 * was a clean unmount. Since 'i' could be the last block in
1309 * the physical log, we convert to a log block before comparing
1310 * to the head_blk.
1311 *
1312 * Save the current tail lsn to use to pass to
1313 * xlog_clear_stale_blocks() below. We won't want to clear the
1314 * unmount record if there is one, so we pass the lsn of the
1315 * unmount record rather than the block after it.
1316 */
1317 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1318 int h_size = be32_to_cpu(rhead->h_size);
1319 int h_version = be32_to_cpu(rhead->h_version);
1320
1321 if ((h_version & XLOG_VERSION_2) &&
1322 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1323 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1324 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1325 hblks++;
1326 } else {
1327 hblks = 1;
1328 }
1329 } else {
1330 hblks = 1;
1331 }
1332 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1333 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1334 tail_lsn = atomic64_read(&log->l_tail_lsn);
1335 if (*head_blk == after_umount_blk &&
1336 be32_to_cpu(rhead->h_num_logops) == 1) {
1337 umount_data_blk = rhead_blk + hblks;
1338 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1339 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1340 if (error)
1341 goto done;
1342
1343 op_head = (xlog_op_header_t *)offset;
1344 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1345 /*
1346 * Set tail and last sync so that newly written
1347 * log records will point recovery to after the
1348 * current unmount record.
1349 */
1350 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1351 log->l_curr_cycle, after_umount_blk);
1352 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1353 log->l_curr_cycle, after_umount_blk);
1354 *tail_blk = after_umount_blk;
1355
1356 /*
1357 * Note that the unmount was clean. If the unmount
1358 * was not clean, we need to know this to rebuild the
1359 * superblock counters from the perag headers if we
1360 * have a filesystem using non-persistent counters.
1361 */
1362 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1363 }
1364 }
1365
1366 /*
1367 * Make sure that there are no blocks in front of the head
1368 * with the same cycle number as the head. This can happen
1369 * because we allow multiple outstanding log writes concurrently,
1370 * and the later writes might make it out before earlier ones.
1371 *
1372 * We use the lsn from before modifying it so that we'll never
1373 * overwrite the unmount record after a clean unmount.
1374 *
1375 * Do this only if we are going to recover the filesystem
1376 *
1377 * NOTE: This used to say "if (!readonly)"
1378 * However on Linux, we can & do recover a read-only filesystem.
1379 * We only skip recovery if NORECOVERY is specified on mount,
1380 * in which case we would not be here.
1381 *
1382 * But... if the -device- itself is readonly, just skip this.
1383 * We can't recover this device anyway, so it won't matter.
1384 */
1385 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1386 error = xlog_clear_stale_blocks(log, tail_lsn);
1387
1388 done:
1389 xlog_put_bp(bp);
1390
1391 if (error)
1392 xfs_warn(log->l_mp, "failed to locate log tail");
1393 return error;
1394 }
1395
1396 /*
1397 * Is the log zeroed at all?
1398 *
1399 * The last binary search should be changed to perform an X block read
1400 * once X becomes small enough. You can then search linearly through
1401 * the X blocks. This will cut down on the number of reads we need to do.
1402 *
1403 * If the log is partially zeroed, this routine will pass back the blkno
1404 * of the first block with cycle number 0. It won't have a complete LR
1405 * preceding it.
1406 *
1407 * Return:
1408 * 0 => the log is completely written to
1409 * 1 => use *blk_no as the first block of the log
1410 * <0 => error has occurred
1411 */
1412 STATIC int
1413 xlog_find_zeroed(
1414 struct xlog *log,
1415 xfs_daddr_t *blk_no)
1416 {
1417 xfs_buf_t *bp;
1418 char *offset;
1419 uint first_cycle, last_cycle;
1420 xfs_daddr_t new_blk, last_blk, start_blk;
1421 xfs_daddr_t num_scan_bblks;
1422 int error, log_bbnum = log->l_logBBsize;
1423
1424 *blk_no = 0;
1425
1426 /* check totally zeroed log */
1427 bp = xlog_get_bp(log, 1);
1428 if (!bp)
1429 return -ENOMEM;
1430 error = xlog_bread(log, 0, 1, bp, &offset);
1431 if (error)
1432 goto bp_err;
1433
1434 first_cycle = xlog_get_cycle(offset);
1435 if (first_cycle == 0) { /* completely zeroed log */
1436 *blk_no = 0;
1437 xlog_put_bp(bp);
1438 return 1;
1439 }
1440
1441 /* check partially zeroed log */
1442 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1443 if (error)
1444 goto bp_err;
1445
1446 last_cycle = xlog_get_cycle(offset);
1447 if (last_cycle != 0) { /* log completely written to */
1448 xlog_put_bp(bp);
1449 return 0;
1450 } else if (first_cycle != 1) {
1451 /*
1452 * If the cycle of the last block is zero, the cycle of
1453 * the first block must be 1. If it's not, maybe we're
1454 * not looking at a log... Bail out.
1455 */
1456 xfs_warn(log->l_mp,
1457 "Log inconsistent or not a log (last==0, first!=1)");
1458 error = -EINVAL;
1459 goto bp_err;
1460 }
1461
1462 /* we have a partially zeroed log */
1463 last_blk = log_bbnum-1;
1464 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1465 goto bp_err;
1466
1467 /*
1468 * Validate the answer. Because there is no way to guarantee that
1469 * the entire log is made up of log records which are the same size,
1470 * we scan over the defined maximum blocks. At this point, the maximum
1471 * is not chosen to mean anything special. XXXmiken
1472 */
1473 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1474 ASSERT(num_scan_bblks <= INT_MAX);
1475
1476 if (last_blk < num_scan_bblks)
1477 num_scan_bblks = last_blk;
1478 start_blk = last_blk - num_scan_bblks;
1479
1480 /*
1481 * We search for any instances of cycle number 0 that occur before
1482 * our current estimate of the head. What we're trying to detect is
1483 * 1 ... | 0 | 1 | 0...
1484 * ^ binary search ends here
1485 */
1486 if ((error = xlog_find_verify_cycle(log, start_blk,
1487 (int)num_scan_bblks, 0, &new_blk)))
1488 goto bp_err;
1489 if (new_blk != -1)
1490 last_blk = new_blk;
1491
1492 /*
1493 * Potentially backup over partial log record write. We don't need
1494 * to search the end of the log because we know it is zero.
1495 */
1496 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1497 if (error == 1)
1498 error = -EIO;
1499 if (error)
1500 goto bp_err;
1501
1502 *blk_no = last_blk;
1503 bp_err:
1504 xlog_put_bp(bp);
1505 if (error)
1506 return error;
1507 return 1;
1508 }
1509
1510 /*
1511 * These are simple subroutines used by xlog_clear_stale_blocks() below
1512 * to initialize a buffer full of empty log record headers and write
1513 * them into the log.
1514 */
1515 STATIC void
1516 xlog_add_record(
1517 struct xlog *log,
1518 char *buf,
1519 int cycle,
1520 int block,
1521 int tail_cycle,
1522 int tail_block)
1523 {
1524 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1525
1526 memset(buf, 0, BBSIZE);
1527 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1528 recp->h_cycle = cpu_to_be32(cycle);
1529 recp->h_version = cpu_to_be32(
1530 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1531 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1532 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1533 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1534 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1535 }
1536
1537 STATIC int
1538 xlog_write_log_records(
1539 struct xlog *log,
1540 int cycle,
1541 int start_block,
1542 int blocks,
1543 int tail_cycle,
1544 int tail_block)
1545 {
1546 char *offset;
1547 xfs_buf_t *bp;
1548 int balign, ealign;
1549 int sectbb = log->l_sectBBsize;
1550 int end_block = start_block + blocks;
1551 int bufblks;
1552 int error = 0;
1553 int i, j = 0;
1554
1555 /*
1556 * Greedily allocate a buffer big enough to handle the full
1557 * range of basic blocks to be written. If that fails, try
1558 * a smaller size. We need to be able to write at least a
1559 * log sector, or we're out of luck.
1560 */
1561 bufblks = 1 << ffs(blocks);
1562 while (bufblks > log->l_logBBsize)
1563 bufblks >>= 1;
1564 while (!(bp = xlog_get_bp(log, bufblks))) {
1565 bufblks >>= 1;
1566 if (bufblks < sectbb)
1567 return -ENOMEM;
1568 }
1569
1570 /* We may need to do a read at the start to fill in part of
1571 * the buffer in the starting sector not covered by the first
1572 * write below.
1573 */
1574 balign = round_down(start_block, sectbb);
1575 if (balign != start_block) {
1576 error = xlog_bread_noalign(log, start_block, 1, bp);
1577 if (error)
1578 goto out_put_bp;
1579
1580 j = start_block - balign;
1581 }
1582
1583 for (i = start_block; i < end_block; i += bufblks) {
1584 int bcount, endcount;
1585
1586 bcount = min(bufblks, end_block - start_block);
1587 endcount = bcount - j;
1588
1589 /* We may need to do a read at the end to fill in part of
1590 * the buffer in the final sector not covered by the write.
1591 * If this is the same sector as the above read, skip it.
1592 */
1593 ealign = round_down(end_block, sectbb);
1594 if (j == 0 && (start_block + endcount > ealign)) {
1595 offset = bp->b_addr + BBTOB(ealign - start_block);
1596 error = xlog_bread_offset(log, ealign, sectbb,
1597 bp, offset);
1598 if (error)
1599 break;
1600
1601 }
1602
1603 offset = xlog_align(log, start_block, endcount, bp);
1604 for (; j < endcount; j++) {
1605 xlog_add_record(log, offset, cycle, i+j,
1606 tail_cycle, tail_block);
1607 offset += BBSIZE;
1608 }
1609 error = xlog_bwrite(log, start_block, endcount, bp);
1610 if (error)
1611 break;
1612 start_block += endcount;
1613 j = 0;
1614 }
1615
1616 out_put_bp:
1617 xlog_put_bp(bp);
1618 return error;
1619 }
1620
1621 /*
1622 * This routine is called to blow away any incomplete log writes out
1623 * in front of the log head. We do this so that we won't become confused
1624 * if we come up, write only a little bit more, and then crash again.
1625 * If we leave the partial log records out there, this situation could
1626 * cause us to think those partial writes are valid blocks since they
1627 * have the current cycle number. We get rid of them by overwriting them
1628 * with empty log records with the old cycle number rather than the
1629 * current one.
1630 *
1631 * The tail lsn is passed in rather than taken from
1632 * the log so that we will not write over the unmount record after a
1633 * clean unmount in a 512 block log. Doing so would leave the log without
1634 * any valid log records in it until a new one was written. If we crashed
1635 * during that time we would not be able to recover.
1636 */
1637 STATIC int
1638 xlog_clear_stale_blocks(
1639 struct xlog *log,
1640 xfs_lsn_t tail_lsn)
1641 {
1642 int tail_cycle, head_cycle;
1643 int tail_block, head_block;
1644 int tail_distance, max_distance;
1645 int distance;
1646 int error;
1647
1648 tail_cycle = CYCLE_LSN(tail_lsn);
1649 tail_block = BLOCK_LSN(tail_lsn);
1650 head_cycle = log->l_curr_cycle;
1651 head_block = log->l_curr_block;
1652
1653 /*
1654 * Figure out the distance between the new head of the log
1655 * and the tail. We want to write over any blocks beyond the
1656 * head that we may have written just before the crash, but
1657 * we don't want to overwrite the tail of the log.
1658 */
1659 if (head_cycle == tail_cycle) {
1660 /*
1661 * The tail is behind the head in the physical log,
1662 * so the distance from the head to the tail is the
1663 * distance from the head to the end of the log plus
1664 * the distance from the beginning of the log to the
1665 * tail.
1666 */
1667 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1668 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1669 XFS_ERRLEVEL_LOW, log->l_mp);
1670 return -EFSCORRUPTED;
1671 }
1672 tail_distance = tail_block + (log->l_logBBsize - head_block);
1673 } else {
1674 /*
1675 * The head is behind the tail in the physical log,
1676 * so the distance from the head to the tail is just
1677 * the tail block minus the head block.
1678 */
1679 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1680 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1681 XFS_ERRLEVEL_LOW, log->l_mp);
1682 return -EFSCORRUPTED;
1683 }
1684 tail_distance = tail_block - head_block;
1685 }
1686
1687 /*
1688 * If the head is right up against the tail, we can't clear
1689 * anything.
1690 */
1691 if (tail_distance <= 0) {
1692 ASSERT(tail_distance == 0);
1693 return 0;
1694 }
1695
1696 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1697 /*
1698 * Take the smaller of the maximum amount of outstanding I/O
1699 * we could have and the distance to the tail to clear out.
1700 * We take the smaller so that we don't overwrite the tail and
1701 * we don't waste all day writing from the head to the tail
1702 * for no reason.
1703 */
1704 max_distance = MIN(max_distance, tail_distance);
1705
1706 if ((head_block + max_distance) <= log->l_logBBsize) {
1707 /*
1708 * We can stomp all the blocks we need to without
1709 * wrapping around the end of the log. Just do it
1710 * in a single write. Use the cycle number of the
1711 * current cycle minus one so that the log will look like:
1712 * n ... | n - 1 ...
1713 */
1714 error = xlog_write_log_records(log, (head_cycle - 1),
1715 head_block, max_distance, tail_cycle,
1716 tail_block);
1717 if (error)
1718 return error;
1719 } else {
1720 /*
1721 * We need to wrap around the end of the physical log in
1722 * order to clear all the blocks. Do it in two separate
1723 * I/Os. The first write should be from the head to the
1724 * end of the physical log, and it should use the current
1725 * cycle number minus one just like above.
1726 */
1727 distance = log->l_logBBsize - head_block;
1728 error = xlog_write_log_records(log, (head_cycle - 1),
1729 head_block, distance, tail_cycle,
1730 tail_block);
1731
1732 if (error)
1733 return error;
1734
1735 /*
1736 * Now write the blocks at the start of the physical log.
1737 * This writes the remainder of the blocks we want to clear.
1738 * It uses the current cycle number since we're now on the
1739 * same cycle as the head so that we get:
1740 * n ... n ... | n - 1 ...
1741 * ^^^^^ blocks we're writing
1742 */
1743 distance = max_distance - (log->l_logBBsize - head_block);
1744 error = xlog_write_log_records(log, head_cycle, 0, distance,
1745 tail_cycle, tail_block);
1746 if (error)
1747 return error;
1748 }
1749
1750 return 0;
1751 }
1752
1753 /******************************************************************************
1754 *
1755 * Log recover routines
1756 *
1757 ******************************************************************************
1758 */
1759
1760 /*
1761 * Sort the log items in the transaction.
1762 *
1763 * The ordering constraints are defined by the inode allocation and unlink
1764 * behaviour. The rules are:
1765 *
1766 * 1. Every item is only logged once in a given transaction. Hence it
1767 * represents the last logged state of the item. Hence ordering is
1768 * dependent on the order in which operations need to be performed so
1769 * required initial conditions are always met.
1770 *
1771 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1772 * there's nothing to replay from them so we can simply cull them
1773 * from the transaction. However, we can't do that until after we've
1774 * replayed all the other items because they may be dependent on the
1775 * cancelled buffer and replaying the cancelled buffer can remove it
1776 * form the cancelled buffer table. Hence they have tobe done last.
1777 *
1778 * 3. Inode allocation buffers must be replayed before inode items that
1779 * read the buffer and replay changes into it. For filesystems using the
1780 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1781 * treated the same as inode allocation buffers as they create and
1782 * initialise the buffers directly.
1783 *
1784 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1785 * This ensures that inodes are completely flushed to the inode buffer
1786 * in a "free" state before we remove the unlinked inode list pointer.
1787 *
1788 * Hence the ordering needs to be inode allocation buffers first, inode items
1789 * second, inode unlink buffers third and cancelled buffers last.
1790 *
1791 * But there's a problem with that - we can't tell an inode allocation buffer
1792 * apart from a regular buffer, so we can't separate them. We can, however,
1793 * tell an inode unlink buffer from the others, and so we can separate them out
1794 * from all the other buffers and move them to last.
1795 *
1796 * Hence, 4 lists, in order from head to tail:
1797 * - buffer_list for all buffers except cancelled/inode unlink buffers
1798 * - item_list for all non-buffer items
1799 * - inode_buffer_list for inode unlink buffers
1800 * - cancel_list for the cancelled buffers
1801 *
1802 * Note that we add objects to the tail of the lists so that first-to-last
1803 * ordering is preserved within the lists. Adding objects to the head of the
1804 * list means when we traverse from the head we walk them in last-to-first
1805 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1806 * but for all other items there may be specific ordering that we need to
1807 * preserve.
1808 */
1809 STATIC int
1810 xlog_recover_reorder_trans(
1811 struct xlog *log,
1812 struct xlog_recover *trans,
1813 int pass)
1814 {
1815 xlog_recover_item_t *item, *n;
1816 int error = 0;
1817 LIST_HEAD(sort_list);
1818 LIST_HEAD(cancel_list);
1819 LIST_HEAD(buffer_list);
1820 LIST_HEAD(inode_buffer_list);
1821 LIST_HEAD(inode_list);
1822
1823 list_splice_init(&trans->r_itemq, &sort_list);
1824 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1825 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1826
1827 switch (ITEM_TYPE(item)) {
1828 case XFS_LI_ICREATE:
1829 list_move_tail(&item->ri_list, &buffer_list);
1830 break;
1831 case XFS_LI_BUF:
1832 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1833 trace_xfs_log_recover_item_reorder_head(log,
1834 trans, item, pass);
1835 list_move(&item->ri_list, &cancel_list);
1836 break;
1837 }
1838 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1839 list_move(&item->ri_list, &inode_buffer_list);
1840 break;
1841 }
1842 list_move_tail(&item->ri_list, &buffer_list);
1843 break;
1844 case XFS_LI_INODE:
1845 case XFS_LI_DQUOT:
1846 case XFS_LI_QUOTAOFF:
1847 case XFS_LI_EFD:
1848 case XFS_LI_EFI:
1849 trace_xfs_log_recover_item_reorder_tail(log,
1850 trans, item, pass);
1851 list_move_tail(&item->ri_list, &inode_list);
1852 break;
1853 default:
1854 xfs_warn(log->l_mp,
1855 "%s: unrecognized type of log operation",
1856 __func__);
1857 ASSERT(0);
1858 /*
1859 * return the remaining items back to the transaction
1860 * item list so they can be freed in caller.
1861 */
1862 if (!list_empty(&sort_list))
1863 list_splice_init(&sort_list, &trans->r_itemq);
1864 error = -EIO;
1865 goto out;
1866 }
1867 }
1868 out:
1869 ASSERT(list_empty(&sort_list));
1870 if (!list_empty(&buffer_list))
1871 list_splice(&buffer_list, &trans->r_itemq);
1872 if (!list_empty(&inode_list))
1873 list_splice_tail(&inode_list, &trans->r_itemq);
1874 if (!list_empty(&inode_buffer_list))
1875 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1876 if (!list_empty(&cancel_list))
1877 list_splice_tail(&cancel_list, &trans->r_itemq);
1878 return error;
1879 }
1880
1881 /*
1882 * Build up the table of buf cancel records so that we don't replay
1883 * cancelled data in the second pass. For buffer records that are
1884 * not cancel records, there is nothing to do here so we just return.
1885 *
1886 * If we get a cancel record which is already in the table, this indicates
1887 * that the buffer was cancelled multiple times. In order to ensure
1888 * that during pass 2 we keep the record in the table until we reach its
1889 * last occurrence in the log, we keep a reference count in the cancel
1890 * record in the table to tell us how many times we expect to see this
1891 * record during the second pass.
1892 */
1893 STATIC int
1894 xlog_recover_buffer_pass1(
1895 struct xlog *log,
1896 struct xlog_recover_item *item)
1897 {
1898 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1899 struct list_head *bucket;
1900 struct xfs_buf_cancel *bcp;
1901
1902 /*
1903 * If this isn't a cancel buffer item, then just return.
1904 */
1905 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1906 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1907 return 0;
1908 }
1909
1910 /*
1911 * Insert an xfs_buf_cancel record into the hash table of them.
1912 * If there is already an identical record, bump its reference count.
1913 */
1914 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1915 list_for_each_entry(bcp, bucket, bc_list) {
1916 if (bcp->bc_blkno == buf_f->blf_blkno &&
1917 bcp->bc_len == buf_f->blf_len) {
1918 bcp->bc_refcount++;
1919 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1920 return 0;
1921 }
1922 }
1923
1924 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1925 bcp->bc_blkno = buf_f->blf_blkno;
1926 bcp->bc_len = buf_f->blf_len;
1927 bcp->bc_refcount = 1;
1928 list_add_tail(&bcp->bc_list, bucket);
1929
1930 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1931 return 0;
1932 }
1933
1934 /*
1935 * Check to see whether the buffer being recovered has a corresponding
1936 * entry in the buffer cancel record table. If it is, return the cancel
1937 * buffer structure to the caller.
1938 */
1939 STATIC struct xfs_buf_cancel *
1940 xlog_peek_buffer_cancelled(
1941 struct xlog *log,
1942 xfs_daddr_t blkno,
1943 uint len,
1944 ushort flags)
1945 {
1946 struct list_head *bucket;
1947 struct xfs_buf_cancel *bcp;
1948
1949 if (!log->l_buf_cancel_table) {
1950 /* empty table means no cancelled buffers in the log */
1951 ASSERT(!(flags & XFS_BLF_CANCEL));
1952 return NULL;
1953 }
1954
1955 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1956 list_for_each_entry(bcp, bucket, bc_list) {
1957 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1958 return bcp;
1959 }
1960
1961 /*
1962 * We didn't find a corresponding entry in the table, so return 0 so
1963 * that the buffer is NOT cancelled.
1964 */
1965 ASSERT(!(flags & XFS_BLF_CANCEL));
1966 return NULL;
1967 }
1968
1969 /*
1970 * If the buffer is being cancelled then return 1 so that it will be cancelled,
1971 * otherwise return 0. If the buffer is actually a buffer cancel item
1972 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
1973 * table and remove it from the table if this is the last reference.
1974 *
1975 * We remove the cancel record from the table when we encounter its last
1976 * occurrence in the log so that if the same buffer is re-used again after its
1977 * last cancellation we actually replay the changes made at that point.
1978 */
1979 STATIC int
1980 xlog_check_buffer_cancelled(
1981 struct xlog *log,
1982 xfs_daddr_t blkno,
1983 uint len,
1984 ushort flags)
1985 {
1986 struct xfs_buf_cancel *bcp;
1987
1988 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
1989 if (!bcp)
1990 return 0;
1991
1992 /*
1993 * We've go a match, so return 1 so that the recovery of this buffer
1994 * is cancelled. If this buffer is actually a buffer cancel log
1995 * item, then decrement the refcount on the one in the table and
1996 * remove it if this is the last reference.
1997 */
1998 if (flags & XFS_BLF_CANCEL) {
1999 if (--bcp->bc_refcount == 0) {
2000 list_del(&bcp->bc_list);
2001 kmem_free(bcp);
2002 }
2003 }
2004 return 1;
2005 }
2006
2007 /*
2008 * Perform recovery for a buffer full of inodes. In these buffers, the only
2009 * data which should be recovered is that which corresponds to the
2010 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2011 * data for the inodes is always logged through the inodes themselves rather
2012 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2013 *
2014 * The only time when buffers full of inodes are fully recovered is when the
2015 * buffer is full of newly allocated inodes. In this case the buffer will
2016 * not be marked as an inode buffer and so will be sent to
2017 * xlog_recover_do_reg_buffer() below during recovery.
2018 */
2019 STATIC int
2020 xlog_recover_do_inode_buffer(
2021 struct xfs_mount *mp,
2022 xlog_recover_item_t *item,
2023 struct xfs_buf *bp,
2024 xfs_buf_log_format_t *buf_f)
2025 {
2026 int i;
2027 int item_index = 0;
2028 int bit = 0;
2029 int nbits = 0;
2030 int reg_buf_offset = 0;
2031 int reg_buf_bytes = 0;
2032 int next_unlinked_offset;
2033 int inodes_per_buf;
2034 xfs_agino_t *logged_nextp;
2035 xfs_agino_t *buffer_nextp;
2036
2037 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2038
2039 /*
2040 * Post recovery validation only works properly on CRC enabled
2041 * filesystems.
2042 */
2043 if (xfs_sb_version_hascrc(&mp->m_sb))
2044 bp->b_ops = &xfs_inode_buf_ops;
2045
2046 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2047 for (i = 0; i < inodes_per_buf; i++) {
2048 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2049 offsetof(xfs_dinode_t, di_next_unlinked);
2050
2051 while (next_unlinked_offset >=
2052 (reg_buf_offset + reg_buf_bytes)) {
2053 /*
2054 * The next di_next_unlinked field is beyond
2055 * the current logged region. Find the next
2056 * logged region that contains or is beyond
2057 * the current di_next_unlinked field.
2058 */
2059 bit += nbits;
2060 bit = xfs_next_bit(buf_f->blf_data_map,
2061 buf_f->blf_map_size, bit);
2062
2063 /*
2064 * If there are no more logged regions in the
2065 * buffer, then we're done.
2066 */
2067 if (bit == -1)
2068 return 0;
2069
2070 nbits = xfs_contig_bits(buf_f->blf_data_map,
2071 buf_f->blf_map_size, bit);
2072 ASSERT(nbits > 0);
2073 reg_buf_offset = bit << XFS_BLF_SHIFT;
2074 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2075 item_index++;
2076 }
2077
2078 /*
2079 * If the current logged region starts after the current
2080 * di_next_unlinked field, then move on to the next
2081 * di_next_unlinked field.
2082 */
2083 if (next_unlinked_offset < reg_buf_offset)
2084 continue;
2085
2086 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2087 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2088 ASSERT((reg_buf_offset + reg_buf_bytes) <=
2089 BBTOB(bp->b_io_length));
2090
2091 /*
2092 * The current logged region contains a copy of the
2093 * current di_next_unlinked field. Extract its value
2094 * and copy it to the buffer copy.
2095 */
2096 logged_nextp = item->ri_buf[item_index].i_addr +
2097 next_unlinked_offset - reg_buf_offset;
2098 if (unlikely(*logged_nextp == 0)) {
2099 xfs_alert(mp,
2100 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2101 "Trying to replay bad (0) inode di_next_unlinked field.",
2102 item, bp);
2103 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2104 XFS_ERRLEVEL_LOW, mp);
2105 return -EFSCORRUPTED;
2106 }
2107
2108 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2109 *buffer_nextp = *logged_nextp;
2110
2111 /*
2112 * If necessary, recalculate the CRC in the on-disk inode. We
2113 * have to leave the inode in a consistent state for whoever
2114 * reads it next....
2115 */
2116 xfs_dinode_calc_crc(mp,
2117 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2118
2119 }
2120
2121 return 0;
2122 }
2123
2124 /*
2125 * V5 filesystems know the age of the buffer on disk being recovered. We can
2126 * have newer objects on disk than we are replaying, and so for these cases we
2127 * don't want to replay the current change as that will make the buffer contents
2128 * temporarily invalid on disk.
2129 *
2130 * The magic number might not match the buffer type we are going to recover
2131 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2132 * extract the LSN of the existing object in the buffer based on it's current
2133 * magic number. If we don't recognise the magic number in the buffer, then
2134 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2135 * so can recover the buffer.
2136 *
2137 * Note: we cannot rely solely on magic number matches to determine that the
2138 * buffer has a valid LSN - we also need to verify that it belongs to this
2139 * filesystem, so we need to extract the object's LSN and compare it to that
2140 * which we read from the superblock. If the UUIDs don't match, then we've got a
2141 * stale metadata block from an old filesystem instance that we need to recover
2142 * over the top of.
2143 */
2144 static xfs_lsn_t
2145 xlog_recover_get_buf_lsn(
2146 struct xfs_mount *mp,
2147 struct xfs_buf *bp)
2148 {
2149 __uint32_t magic32;
2150 __uint16_t magic16;
2151 __uint16_t magicda;
2152 void *blk = bp->b_addr;
2153 uuid_t *uuid;
2154 xfs_lsn_t lsn = -1;
2155
2156 /* v4 filesystems always recover immediately */
2157 if (!xfs_sb_version_hascrc(&mp->m_sb))
2158 goto recover_immediately;
2159
2160 magic32 = be32_to_cpu(*(__be32 *)blk);
2161 switch (magic32) {
2162 case XFS_ABTB_CRC_MAGIC:
2163 case XFS_ABTC_CRC_MAGIC:
2164 case XFS_ABTB_MAGIC:
2165 case XFS_ABTC_MAGIC:
2166 case XFS_IBT_CRC_MAGIC:
2167 case XFS_IBT_MAGIC: {
2168 struct xfs_btree_block *btb = blk;
2169
2170 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2171 uuid = &btb->bb_u.s.bb_uuid;
2172 break;
2173 }
2174 case XFS_BMAP_CRC_MAGIC:
2175 case XFS_BMAP_MAGIC: {
2176 struct xfs_btree_block *btb = blk;
2177
2178 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2179 uuid = &btb->bb_u.l.bb_uuid;
2180 break;
2181 }
2182 case XFS_AGF_MAGIC:
2183 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2184 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2185 break;
2186 case XFS_AGFL_MAGIC:
2187 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2188 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2189 break;
2190 case XFS_AGI_MAGIC:
2191 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2192 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2193 break;
2194 case XFS_SYMLINK_MAGIC:
2195 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2196 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2197 break;
2198 case XFS_DIR3_BLOCK_MAGIC:
2199 case XFS_DIR3_DATA_MAGIC:
2200 case XFS_DIR3_FREE_MAGIC:
2201 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2202 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2203 break;
2204 case XFS_ATTR3_RMT_MAGIC:
2205 /*
2206 * Remote attr blocks are written synchronously, rather than
2207 * being logged. That means they do not contain a valid LSN
2208 * (i.e. transactionally ordered) in them, and hence any time we
2209 * see a buffer to replay over the top of a remote attribute
2210 * block we should simply do so.
2211 */
2212 goto recover_immediately;
2213 case XFS_SB_MAGIC:
2214 /*
2215 * superblock uuids are magic. We may or may not have a
2216 * sb_meta_uuid on disk, but it will be set in the in-core
2217 * superblock. We set the uuid pointer for verification
2218 * according to the superblock feature mask to ensure we check
2219 * the relevant UUID in the superblock.
2220 */
2221 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2222 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2223 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2224 else
2225 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2226 break;
2227 default:
2228 break;
2229 }
2230
2231 if (lsn != (xfs_lsn_t)-1) {
2232 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2233 goto recover_immediately;
2234 return lsn;
2235 }
2236
2237 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2238 switch (magicda) {
2239 case XFS_DIR3_LEAF1_MAGIC:
2240 case XFS_DIR3_LEAFN_MAGIC:
2241 case XFS_DA3_NODE_MAGIC:
2242 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2243 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2244 break;
2245 default:
2246 break;
2247 }
2248
2249 if (lsn != (xfs_lsn_t)-1) {
2250 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2251 goto recover_immediately;
2252 return lsn;
2253 }
2254
2255 /*
2256 * We do individual object checks on dquot and inode buffers as they
2257 * have their own individual LSN records. Also, we could have a stale
2258 * buffer here, so we have to at least recognise these buffer types.
2259 *
2260 * A notd complexity here is inode unlinked list processing - it logs
2261 * the inode directly in the buffer, but we don't know which inodes have
2262 * been modified, and there is no global buffer LSN. Hence we need to
2263 * recover all inode buffer types immediately. This problem will be
2264 * fixed by logical logging of the unlinked list modifications.
2265 */
2266 magic16 = be16_to_cpu(*(__be16 *)blk);
2267 switch (magic16) {
2268 case XFS_DQUOT_MAGIC:
2269 case XFS_DINODE_MAGIC:
2270 goto recover_immediately;
2271 default:
2272 break;
2273 }
2274
2275 /* unknown buffer contents, recover immediately */
2276
2277 recover_immediately:
2278 return (xfs_lsn_t)-1;
2279
2280 }
2281
2282 /*
2283 * Validate the recovered buffer is of the correct type and attach the
2284 * appropriate buffer operations to them for writeback. Magic numbers are in a
2285 * few places:
2286 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2287 * the first 32 bits of the buffer (most blocks),
2288 * inside a struct xfs_da_blkinfo at the start of the buffer.
2289 */
2290 static void
2291 xlog_recover_validate_buf_type(
2292 struct xfs_mount *mp,
2293 struct xfs_buf *bp,
2294 xfs_buf_log_format_t *buf_f)
2295 {
2296 struct xfs_da_blkinfo *info = bp->b_addr;
2297 __uint32_t magic32;
2298 __uint16_t magic16;
2299 __uint16_t magicda;
2300
2301 /*
2302 * We can only do post recovery validation on items on CRC enabled
2303 * fielsystems as we need to know when the buffer was written to be able
2304 * to determine if we should have replayed the item. If we replay old
2305 * metadata over a newer buffer, then it will enter a temporarily
2306 * inconsistent state resulting in verification failures. Hence for now
2307 * just avoid the verification stage for non-crc filesystems
2308 */
2309 if (!xfs_sb_version_hascrc(&mp->m_sb))
2310 return;
2311
2312 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2313 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2314 magicda = be16_to_cpu(info->magic);
2315 switch (xfs_blft_from_flags(buf_f)) {
2316 case XFS_BLFT_BTREE_BUF:
2317 switch (magic32) {
2318 case XFS_ABTB_CRC_MAGIC:
2319 case XFS_ABTC_CRC_MAGIC:
2320 case XFS_ABTB_MAGIC:
2321 case XFS_ABTC_MAGIC:
2322 bp->b_ops = &xfs_allocbt_buf_ops;
2323 break;
2324 case XFS_IBT_CRC_MAGIC:
2325 case XFS_FIBT_CRC_MAGIC:
2326 case XFS_IBT_MAGIC:
2327 case XFS_FIBT_MAGIC:
2328 bp->b_ops = &xfs_inobt_buf_ops;
2329 break;
2330 case XFS_BMAP_CRC_MAGIC:
2331 case XFS_BMAP_MAGIC:
2332 bp->b_ops = &xfs_bmbt_buf_ops;
2333 break;
2334 default:
2335 xfs_warn(mp, "Bad btree block magic!");
2336 ASSERT(0);
2337 break;
2338 }
2339 break;
2340 case XFS_BLFT_AGF_BUF:
2341 if (magic32 != XFS_AGF_MAGIC) {
2342 xfs_warn(mp, "Bad AGF block magic!");
2343 ASSERT(0);
2344 break;
2345 }
2346 bp->b_ops = &xfs_agf_buf_ops;
2347 break;
2348 case XFS_BLFT_AGFL_BUF:
2349 if (magic32 != XFS_AGFL_MAGIC) {
2350 xfs_warn(mp, "Bad AGFL block magic!");
2351 ASSERT(0);
2352 break;
2353 }
2354 bp->b_ops = &xfs_agfl_buf_ops;
2355 break;
2356 case XFS_BLFT_AGI_BUF:
2357 if (magic32 != XFS_AGI_MAGIC) {
2358 xfs_warn(mp, "Bad AGI block magic!");
2359 ASSERT(0);
2360 break;
2361 }
2362 bp->b_ops = &xfs_agi_buf_ops;
2363 break;
2364 case XFS_BLFT_UDQUOT_BUF:
2365 case XFS_BLFT_PDQUOT_BUF:
2366 case XFS_BLFT_GDQUOT_BUF:
2367 #ifdef CONFIG_XFS_QUOTA
2368 if (magic16 != XFS_DQUOT_MAGIC) {
2369 xfs_warn(mp, "Bad DQUOT block magic!");
2370 ASSERT(0);
2371 break;
2372 }
2373 bp->b_ops = &xfs_dquot_buf_ops;
2374 #else
2375 xfs_alert(mp,
2376 "Trying to recover dquots without QUOTA support built in!");
2377 ASSERT(0);
2378 #endif
2379 break;
2380 case XFS_BLFT_DINO_BUF:
2381 if (magic16 != XFS_DINODE_MAGIC) {
2382 xfs_warn(mp, "Bad INODE block magic!");
2383 ASSERT(0);
2384 break;
2385 }
2386 bp->b_ops = &xfs_inode_buf_ops;
2387 break;
2388 case XFS_BLFT_SYMLINK_BUF:
2389 if (magic32 != XFS_SYMLINK_MAGIC) {
2390 xfs_warn(mp, "Bad symlink block magic!");
2391 ASSERT(0);
2392 break;
2393 }
2394 bp->b_ops = &xfs_symlink_buf_ops;
2395 break;
2396 case XFS_BLFT_DIR_BLOCK_BUF:
2397 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2398 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2399 xfs_warn(mp, "Bad dir block magic!");
2400 ASSERT(0);
2401 break;
2402 }
2403 bp->b_ops = &xfs_dir3_block_buf_ops;
2404 break;
2405 case XFS_BLFT_DIR_DATA_BUF:
2406 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2407 magic32 != XFS_DIR3_DATA_MAGIC) {
2408 xfs_warn(mp, "Bad dir data magic!");
2409 ASSERT(0);
2410 break;
2411 }
2412 bp->b_ops = &xfs_dir3_data_buf_ops;
2413 break;
2414 case XFS_BLFT_DIR_FREE_BUF:
2415 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2416 magic32 != XFS_DIR3_FREE_MAGIC) {
2417 xfs_warn(mp, "Bad dir3 free magic!");
2418 ASSERT(0);
2419 break;
2420 }
2421 bp->b_ops = &xfs_dir3_free_buf_ops;
2422 break;
2423 case XFS_BLFT_DIR_LEAF1_BUF:
2424 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2425 magicda != XFS_DIR3_LEAF1_MAGIC) {
2426 xfs_warn(mp, "Bad dir leaf1 magic!");
2427 ASSERT(0);
2428 break;
2429 }
2430 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2431 break;
2432 case XFS_BLFT_DIR_LEAFN_BUF:
2433 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2434 magicda != XFS_DIR3_LEAFN_MAGIC) {
2435 xfs_warn(mp, "Bad dir leafn magic!");
2436 ASSERT(0);
2437 break;
2438 }
2439 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2440 break;
2441 case XFS_BLFT_DA_NODE_BUF:
2442 if (magicda != XFS_DA_NODE_MAGIC &&
2443 magicda != XFS_DA3_NODE_MAGIC) {
2444 xfs_warn(mp, "Bad da node magic!");
2445 ASSERT(0);
2446 break;
2447 }
2448 bp->b_ops = &xfs_da3_node_buf_ops;
2449 break;
2450 case XFS_BLFT_ATTR_LEAF_BUF:
2451 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2452 magicda != XFS_ATTR3_LEAF_MAGIC) {
2453 xfs_warn(mp, "Bad attr leaf magic!");
2454 ASSERT(0);
2455 break;
2456 }
2457 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2458 break;
2459 case XFS_BLFT_ATTR_RMT_BUF:
2460 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2461 xfs_warn(mp, "Bad attr remote magic!");
2462 ASSERT(0);
2463 break;
2464 }
2465 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2466 break;
2467 case XFS_BLFT_SB_BUF:
2468 if (magic32 != XFS_SB_MAGIC) {
2469 xfs_warn(mp, "Bad SB block magic!");
2470 ASSERT(0);
2471 break;
2472 }
2473 bp->b_ops = &xfs_sb_buf_ops;
2474 break;
2475 default:
2476 xfs_warn(mp, "Unknown buffer type %d!",
2477 xfs_blft_from_flags(buf_f));
2478 break;
2479 }
2480 }
2481
2482 /*
2483 * Perform a 'normal' buffer recovery. Each logged region of the
2484 * buffer should be copied over the corresponding region in the
2485 * given buffer. The bitmap in the buf log format structure indicates
2486 * where to place the logged data.
2487 */
2488 STATIC void
2489 xlog_recover_do_reg_buffer(
2490 struct xfs_mount *mp,
2491 xlog_recover_item_t *item,
2492 struct xfs_buf *bp,
2493 xfs_buf_log_format_t *buf_f)
2494 {
2495 int i;
2496 int bit;
2497 int nbits;
2498 int error;
2499
2500 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2501
2502 bit = 0;
2503 i = 1; /* 0 is the buf format structure */
2504 while (1) {
2505 bit = xfs_next_bit(buf_f->blf_data_map,
2506 buf_f->blf_map_size, bit);
2507 if (bit == -1)
2508 break;
2509 nbits = xfs_contig_bits(buf_f->blf_data_map,
2510 buf_f->blf_map_size, bit);
2511 ASSERT(nbits > 0);
2512 ASSERT(item->ri_buf[i].i_addr != NULL);
2513 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2514 ASSERT(BBTOB(bp->b_io_length) >=
2515 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2516
2517 /*
2518 * The dirty regions logged in the buffer, even though
2519 * contiguous, may span multiple chunks. This is because the
2520 * dirty region may span a physical page boundary in a buffer
2521 * and hence be split into two separate vectors for writing into
2522 * the log. Hence we need to trim nbits back to the length of
2523 * the current region being copied out of the log.
2524 */
2525 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2526 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2527
2528 /*
2529 * Do a sanity check if this is a dquot buffer. Just checking
2530 * the first dquot in the buffer should do. XXXThis is
2531 * probably a good thing to do for other buf types also.
2532 */
2533 error = 0;
2534 if (buf_f->blf_flags &
2535 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2536 if (item->ri_buf[i].i_addr == NULL) {
2537 xfs_alert(mp,
2538 "XFS: NULL dquot in %s.", __func__);
2539 goto next;
2540 }
2541 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2542 xfs_alert(mp,
2543 "XFS: dquot too small (%d) in %s.",
2544 item->ri_buf[i].i_len, __func__);
2545 goto next;
2546 }
2547 error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2548 -1, 0, XFS_QMOPT_DOWARN,
2549 "dquot_buf_recover");
2550 if (error)
2551 goto next;
2552 }
2553
2554 memcpy(xfs_buf_offset(bp,
2555 (uint)bit << XFS_BLF_SHIFT), /* dest */
2556 item->ri_buf[i].i_addr, /* source */
2557 nbits<<XFS_BLF_SHIFT); /* length */
2558 next:
2559 i++;
2560 bit += nbits;
2561 }
2562
2563 /* Shouldn't be any more regions */
2564 ASSERT(i == item->ri_total);
2565
2566 xlog_recover_validate_buf_type(mp, bp, buf_f);
2567 }
2568
2569 /*
2570 * Perform a dquot buffer recovery.
2571 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2572 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2573 * Else, treat it as a regular buffer and do recovery.
2574 *
2575 * Return false if the buffer was tossed and true if we recovered the buffer to
2576 * indicate to the caller if the buffer needs writing.
2577 */
2578 STATIC bool
2579 xlog_recover_do_dquot_buffer(
2580 struct xfs_mount *mp,
2581 struct xlog *log,
2582 struct xlog_recover_item *item,
2583 struct xfs_buf *bp,
2584 struct xfs_buf_log_format *buf_f)
2585 {
2586 uint type;
2587
2588 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2589
2590 /*
2591 * Filesystems are required to send in quota flags at mount time.
2592 */
2593 if (!mp->m_qflags)
2594 return false;
2595
2596 type = 0;
2597 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2598 type |= XFS_DQ_USER;
2599 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2600 type |= XFS_DQ_PROJ;
2601 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2602 type |= XFS_DQ_GROUP;
2603 /*
2604 * This type of quotas was turned off, so ignore this buffer
2605 */
2606 if (log->l_quotaoffs_flag & type)
2607 return false;
2608
2609 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2610 return true;
2611 }
2612
2613 /*
2614 * This routine replays a modification made to a buffer at runtime.
2615 * There are actually two types of buffer, regular and inode, which
2616 * are handled differently. Inode buffers are handled differently
2617 * in that we only recover a specific set of data from them, namely
2618 * the inode di_next_unlinked fields. This is because all other inode
2619 * data is actually logged via inode records and any data we replay
2620 * here which overlaps that may be stale.
2621 *
2622 * When meta-data buffers are freed at run time we log a buffer item
2623 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2624 * of the buffer in the log should not be replayed at recovery time.
2625 * This is so that if the blocks covered by the buffer are reused for
2626 * file data before we crash we don't end up replaying old, freed
2627 * meta-data into a user's file.
2628 *
2629 * To handle the cancellation of buffer log items, we make two passes
2630 * over the log during recovery. During the first we build a table of
2631 * those buffers which have been cancelled, and during the second we
2632 * only replay those buffers which do not have corresponding cancel
2633 * records in the table. See xlog_recover_buffer_pass[1,2] above
2634 * for more details on the implementation of the table of cancel records.
2635 */
2636 STATIC int
2637 xlog_recover_buffer_pass2(
2638 struct xlog *log,
2639 struct list_head *buffer_list,
2640 struct xlog_recover_item *item,
2641 xfs_lsn_t current_lsn)
2642 {
2643 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2644 xfs_mount_t *mp = log->l_mp;
2645 xfs_buf_t *bp;
2646 int error;
2647 uint buf_flags;
2648 xfs_lsn_t lsn;
2649
2650 /*
2651 * In this pass we only want to recover all the buffers which have
2652 * not been cancelled and are not cancellation buffers themselves.
2653 */
2654 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2655 buf_f->blf_len, buf_f->blf_flags)) {
2656 trace_xfs_log_recover_buf_cancel(log, buf_f);
2657 return 0;
2658 }
2659
2660 trace_xfs_log_recover_buf_recover(log, buf_f);
2661
2662 buf_flags = 0;
2663 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2664 buf_flags |= XBF_UNMAPPED;
2665
2666 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2667 buf_flags, NULL);
2668 if (!bp)
2669 return -ENOMEM;
2670 error = bp->b_error;
2671 if (error) {
2672 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2673 goto out_release;
2674 }
2675
2676 /*
2677 * Recover the buffer only if we get an LSN from it and it's less than
2678 * the lsn of the transaction we are replaying.
2679 *
2680 * Note that we have to be extremely careful of readahead here.
2681 * Readahead does not attach verfiers to the buffers so if we don't
2682 * actually do any replay after readahead because of the LSN we found
2683 * in the buffer if more recent than that current transaction then we
2684 * need to attach the verifier directly. Failure to do so can lead to
2685 * future recovery actions (e.g. EFI and unlinked list recovery) can
2686 * operate on the buffers and they won't get the verifier attached. This
2687 * can lead to blocks on disk having the correct content but a stale
2688 * CRC.
2689 *
2690 * It is safe to assume these clean buffers are currently up to date.
2691 * If the buffer is dirtied by a later transaction being replayed, then
2692 * the verifier will be reset to match whatever recover turns that
2693 * buffer into.
2694 */
2695 lsn = xlog_recover_get_buf_lsn(mp, bp);
2696 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2697 xlog_recover_validate_buf_type(mp, bp, buf_f);
2698 goto out_release;
2699 }
2700
2701 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2702 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2703 if (error)
2704 goto out_release;
2705 } else if (buf_f->blf_flags &
2706 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2707 bool dirty;
2708
2709 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2710 if (!dirty)
2711 goto out_release;
2712 } else {
2713 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2714 }
2715
2716 /*
2717 * Perform delayed write on the buffer. Asynchronous writes will be
2718 * slower when taking into account all the buffers to be flushed.
2719 *
2720 * Also make sure that only inode buffers with good sizes stay in
2721 * the buffer cache. The kernel moves inodes in buffers of 1 block
2722 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2723 * buffers in the log can be a different size if the log was generated
2724 * by an older kernel using unclustered inode buffers or a newer kernel
2725 * running with a different inode cluster size. Regardless, if the
2726 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2727 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2728 * the buffer out of the buffer cache so that the buffer won't
2729 * overlap with future reads of those inodes.
2730 */
2731 if (XFS_DINODE_MAGIC ==
2732 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2733 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2734 (__uint32_t)log->l_mp->m_inode_cluster_size))) {
2735 xfs_buf_stale(bp);
2736 error = xfs_bwrite(bp);
2737 } else {
2738 ASSERT(bp->b_target->bt_mount == mp);
2739 bp->b_iodone = xlog_recover_iodone;
2740 xfs_buf_delwri_queue(bp, buffer_list);
2741 }
2742
2743 out_release:
2744 xfs_buf_relse(bp);
2745 return error;
2746 }
2747
2748 /*
2749 * Inode fork owner changes
2750 *
2751 * If we have been told that we have to reparent the inode fork, it's because an
2752 * extent swap operation on a CRC enabled filesystem has been done and we are
2753 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2754 * owners of it.
2755 *
2756 * The complexity here is that we don't have an inode context to work with, so
2757 * after we've replayed the inode we need to instantiate one. This is where the
2758 * fun begins.
2759 *
2760 * We are in the middle of log recovery, so we can't run transactions. That
2761 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2762 * that will result in the corresponding iput() running the inode through
2763 * xfs_inactive(). If we've just replayed an inode core that changes the link
2764 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2765 * transactions (bad!).
2766 *
2767 * So, to avoid this, we instantiate an inode directly from the inode core we've
2768 * just recovered. We have the buffer still locked, and all we really need to
2769 * instantiate is the inode core and the forks being modified. We can do this
2770 * manually, then run the inode btree owner change, and then tear down the
2771 * xfs_inode without having to run any transactions at all.
2772 *
2773 * Also, because we don't have a transaction context available here but need to
2774 * gather all the buffers we modify for writeback so we pass the buffer_list
2775 * instead for the operation to use.
2776 */
2777
2778 STATIC int
2779 xfs_recover_inode_owner_change(
2780 struct xfs_mount *mp,
2781 struct xfs_dinode *dip,
2782 struct xfs_inode_log_format *in_f,
2783 struct list_head *buffer_list)
2784 {
2785 struct xfs_inode *ip;
2786 int error;
2787
2788 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2789
2790 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2791 if (!ip)
2792 return -ENOMEM;
2793
2794 /* instantiate the inode */
2795 xfs_dinode_from_disk(&ip->i_d, dip);
2796 ASSERT(ip->i_d.di_version >= 3);
2797
2798 error = xfs_iformat_fork(ip, dip);
2799 if (error)
2800 goto out_free_ip;
2801
2802
2803 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2804 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2805 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2806 ip->i_ino, buffer_list);
2807 if (error)
2808 goto out_free_ip;
2809 }
2810
2811 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2812 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2813 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2814 ip->i_ino, buffer_list);
2815 if (error)
2816 goto out_free_ip;
2817 }
2818
2819 out_free_ip:
2820 xfs_inode_free(ip);
2821 return error;
2822 }
2823
2824 STATIC int
2825 xlog_recover_inode_pass2(
2826 struct xlog *log,
2827 struct list_head *buffer_list,
2828 struct xlog_recover_item *item,
2829 xfs_lsn_t current_lsn)
2830 {
2831 xfs_inode_log_format_t *in_f;
2832 xfs_mount_t *mp = log->l_mp;
2833 xfs_buf_t *bp;
2834 xfs_dinode_t *dip;
2835 int len;
2836 char *src;
2837 char *dest;
2838 int error;
2839 int attr_index;
2840 uint fields;
2841 xfs_icdinode_t *dicp;
2842 uint isize;
2843 int need_free = 0;
2844
2845 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2846 in_f = item->ri_buf[0].i_addr;
2847 } else {
2848 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2849 need_free = 1;
2850 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2851 if (error)
2852 goto error;
2853 }
2854
2855 /*
2856 * Inode buffers can be freed, look out for it,
2857 * and do not replay the inode.
2858 */
2859 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2860 in_f->ilf_len, 0)) {
2861 error = 0;
2862 trace_xfs_log_recover_inode_cancel(log, in_f);
2863 goto error;
2864 }
2865 trace_xfs_log_recover_inode_recover(log, in_f);
2866
2867 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2868 &xfs_inode_buf_ops);
2869 if (!bp) {
2870 error = -ENOMEM;
2871 goto error;
2872 }
2873 error = bp->b_error;
2874 if (error) {
2875 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2876 goto out_release;
2877 }
2878 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2879 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2880
2881 /*
2882 * Make sure the place we're flushing out to really looks
2883 * like an inode!
2884 */
2885 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2886 xfs_alert(mp,
2887 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2888 __func__, dip, bp, in_f->ilf_ino);
2889 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2890 XFS_ERRLEVEL_LOW, mp);
2891 error = -EFSCORRUPTED;
2892 goto out_release;
2893 }
2894 dicp = item->ri_buf[1].i_addr;
2895 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2896 xfs_alert(mp,
2897 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2898 __func__, item, in_f->ilf_ino);
2899 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2900 XFS_ERRLEVEL_LOW, mp);
2901 error = -EFSCORRUPTED;
2902 goto out_release;
2903 }
2904
2905 /*
2906 * If the inode has an LSN in it, recover the inode only if it's less
2907 * than the lsn of the transaction we are replaying. Note: we still
2908 * need to replay an owner change even though the inode is more recent
2909 * than the transaction as there is no guarantee that all the btree
2910 * blocks are more recent than this transaction, too.
2911 */
2912 if (dip->di_version >= 3) {
2913 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
2914
2915 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2916 trace_xfs_log_recover_inode_skip(log, in_f);
2917 error = 0;
2918 goto out_owner_change;
2919 }
2920 }
2921
2922 /*
2923 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2924 * are transactional and if ordering is necessary we can determine that
2925 * more accurately by the LSN field in the V3 inode core. Don't trust
2926 * the inode versions we might be changing them here - use the
2927 * superblock flag to determine whether we need to look at di_flushiter
2928 * to skip replay when the on disk inode is newer than the log one
2929 */
2930 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2931 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2932 /*
2933 * Deal with the wrap case, DI_MAX_FLUSH is less
2934 * than smaller numbers
2935 */
2936 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2937 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2938 /* do nothing */
2939 } else {
2940 trace_xfs_log_recover_inode_skip(log, in_f);
2941 error = 0;
2942 goto out_release;
2943 }
2944 }
2945
2946 /* Take the opportunity to reset the flush iteration count */
2947 dicp->di_flushiter = 0;
2948
2949 if (unlikely(S_ISREG(dicp->di_mode))) {
2950 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2951 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2952 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2953 XFS_ERRLEVEL_LOW, mp, dicp);
2954 xfs_alert(mp,
2955 "%s: Bad regular inode log record, rec ptr 0x%p, "
2956 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2957 __func__, item, dip, bp, in_f->ilf_ino);
2958 error = -EFSCORRUPTED;
2959 goto out_release;
2960 }
2961 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2962 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2963 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2964 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2965 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2966 XFS_ERRLEVEL_LOW, mp, dicp);
2967 xfs_alert(mp,
2968 "%s: Bad dir inode log record, rec ptr 0x%p, "
2969 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2970 __func__, item, dip, bp, in_f->ilf_ino);
2971 error = -EFSCORRUPTED;
2972 goto out_release;
2973 }
2974 }
2975 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2976 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2977 XFS_ERRLEVEL_LOW, mp, dicp);
2978 xfs_alert(mp,
2979 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2980 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2981 __func__, item, dip, bp, in_f->ilf_ino,
2982 dicp->di_nextents + dicp->di_anextents,
2983 dicp->di_nblocks);
2984 error = -EFSCORRUPTED;
2985 goto out_release;
2986 }
2987 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2988 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2989 XFS_ERRLEVEL_LOW, mp, dicp);
2990 xfs_alert(mp,
2991 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2992 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2993 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2994 error = -EFSCORRUPTED;
2995 goto out_release;
2996 }
2997 isize = xfs_icdinode_size(dicp->di_version);
2998 if (unlikely(item->ri_buf[1].i_len > isize)) {
2999 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3000 XFS_ERRLEVEL_LOW, mp, dicp);
3001 xfs_alert(mp,
3002 "%s: Bad inode log record length %d, rec ptr 0x%p",
3003 __func__, item->ri_buf[1].i_len, item);
3004 error = -EFSCORRUPTED;
3005 goto out_release;
3006 }
3007
3008 /* The core is in in-core format */
3009 xfs_dinode_to_disk(dip, dicp);
3010
3011 /* the rest is in on-disk format */
3012 if (item->ri_buf[1].i_len > isize) {
3013 memcpy((char *)dip + isize,
3014 item->ri_buf[1].i_addr + isize,
3015 item->ri_buf[1].i_len - isize);
3016 }
3017
3018 fields = in_f->ilf_fields;
3019 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
3020 case XFS_ILOG_DEV:
3021 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3022 break;
3023 case XFS_ILOG_UUID:
3024 memcpy(XFS_DFORK_DPTR(dip),
3025 &in_f->ilf_u.ilfu_uuid,
3026 sizeof(uuid_t));
3027 break;
3028 }
3029
3030 if (in_f->ilf_size == 2)
3031 goto out_owner_change;
3032 len = item->ri_buf[2].i_len;
3033 src = item->ri_buf[2].i_addr;
3034 ASSERT(in_f->ilf_size <= 4);
3035 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3036 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3037 (len == in_f->ilf_dsize));
3038
3039 switch (fields & XFS_ILOG_DFORK) {
3040 case XFS_ILOG_DDATA:
3041 case XFS_ILOG_DEXT:
3042 memcpy(XFS_DFORK_DPTR(dip), src, len);
3043 break;
3044
3045 case XFS_ILOG_DBROOT:
3046 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3047 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3048 XFS_DFORK_DSIZE(dip, mp));
3049 break;
3050
3051 default:
3052 /*
3053 * There are no data fork flags set.
3054 */
3055 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3056 break;
3057 }
3058
3059 /*
3060 * If we logged any attribute data, recover it. There may or
3061 * may not have been any other non-core data logged in this
3062 * transaction.
3063 */
3064 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3065 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3066 attr_index = 3;
3067 } else {
3068 attr_index = 2;
3069 }
3070 len = item->ri_buf[attr_index].i_len;
3071 src = item->ri_buf[attr_index].i_addr;
3072 ASSERT(len == in_f->ilf_asize);
3073
3074 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3075 case XFS_ILOG_ADATA:
3076 case XFS_ILOG_AEXT:
3077 dest = XFS_DFORK_APTR(dip);
3078 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3079 memcpy(dest, src, len);
3080 break;
3081
3082 case XFS_ILOG_ABROOT:
3083 dest = XFS_DFORK_APTR(dip);
3084 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3085 len, (xfs_bmdr_block_t*)dest,
3086 XFS_DFORK_ASIZE(dip, mp));
3087 break;
3088
3089 default:
3090 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3091 ASSERT(0);
3092 error = -EIO;
3093 goto out_release;
3094 }
3095 }
3096
3097 out_owner_change:
3098 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3099 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3100 buffer_list);
3101 /* re-generate the checksum. */
3102 xfs_dinode_calc_crc(log->l_mp, dip);
3103
3104 ASSERT(bp->b_target->bt_mount == mp);
3105 bp->b_iodone = xlog_recover_iodone;
3106 xfs_buf_delwri_queue(bp, buffer_list);
3107
3108 out_release:
3109 xfs_buf_relse(bp);
3110 error:
3111 if (need_free)
3112 kmem_free(in_f);
3113 return error;
3114 }
3115
3116 /*
3117 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3118 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3119 * of that type.
3120 */
3121 STATIC int
3122 xlog_recover_quotaoff_pass1(
3123 struct xlog *log,
3124 struct xlog_recover_item *item)
3125 {
3126 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3127 ASSERT(qoff_f);
3128
3129 /*
3130 * The logitem format's flag tells us if this was user quotaoff,
3131 * group/project quotaoff or both.
3132 */
3133 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3134 log->l_quotaoffs_flag |= XFS_DQ_USER;
3135 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3136 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3137 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3138 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3139
3140 return 0;
3141 }
3142
3143 /*
3144 * Recover a dquot record
3145 */
3146 STATIC int
3147 xlog_recover_dquot_pass2(
3148 struct xlog *log,
3149 struct list_head *buffer_list,
3150 struct xlog_recover_item *item,
3151 xfs_lsn_t current_lsn)
3152 {
3153 xfs_mount_t *mp = log->l_mp;
3154 xfs_buf_t *bp;
3155 struct xfs_disk_dquot *ddq, *recddq;
3156 int error;
3157 xfs_dq_logformat_t *dq_f;
3158 uint type;
3159
3160
3161 /*
3162 * Filesystems are required to send in quota flags at mount time.
3163 */
3164 if (mp->m_qflags == 0)
3165 return 0;
3166
3167 recddq = item->ri_buf[1].i_addr;
3168 if (recddq == NULL) {
3169 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3170 return -EIO;
3171 }
3172 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3173 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3174 item->ri_buf[1].i_len, __func__);
3175 return -EIO;
3176 }
3177
3178 /*
3179 * This type of quotas was turned off, so ignore this record.
3180 */
3181 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3182 ASSERT(type);
3183 if (log->l_quotaoffs_flag & type)
3184 return 0;
3185
3186 /*
3187 * At this point we know that quota was _not_ turned off.
3188 * Since the mount flags are not indicating to us otherwise, this
3189 * must mean that quota is on, and the dquot needs to be replayed.
3190 * Remember that we may not have fully recovered the superblock yet,
3191 * so we can't do the usual trick of looking at the SB quota bits.
3192 *
3193 * The other possibility, of course, is that the quota subsystem was
3194 * removed since the last mount - ENOSYS.
3195 */
3196 dq_f = item->ri_buf[0].i_addr;
3197 ASSERT(dq_f);
3198 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3199 "xlog_recover_dquot_pass2 (log copy)");
3200 if (error)
3201 return -EIO;
3202 ASSERT(dq_f->qlf_len == 1);
3203
3204 /*
3205 * At this point we are assuming that the dquots have been allocated
3206 * and hence the buffer has valid dquots stamped in it. It should,
3207 * therefore, pass verifier validation. If the dquot is bad, then the
3208 * we'll return an error here, so we don't need to specifically check
3209 * the dquot in the buffer after the verifier has run.
3210 */
3211 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3212 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3213 &xfs_dquot_buf_ops);
3214 if (error)
3215 return error;
3216
3217 ASSERT(bp);
3218 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3219
3220 /*
3221 * If the dquot has an LSN in it, recover the dquot only if it's less
3222 * than the lsn of the transaction we are replaying.
3223 */
3224 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3225 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3226 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3227
3228 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3229 goto out_release;
3230 }
3231 }
3232
3233 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3234 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3235 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3236 XFS_DQUOT_CRC_OFF);
3237 }
3238
3239 ASSERT(dq_f->qlf_size == 2);
3240 ASSERT(bp->b_target->bt_mount == mp);
3241 bp->b_iodone = xlog_recover_iodone;
3242 xfs_buf_delwri_queue(bp, buffer_list);
3243
3244 out_release:
3245 xfs_buf_relse(bp);
3246 return 0;
3247 }
3248
3249 /*
3250 * This routine is called to create an in-core extent free intent
3251 * item from the efi format structure which was logged on disk.
3252 * It allocates an in-core efi, copies the extents from the format
3253 * structure into it, and adds the efi to the AIL with the given
3254 * LSN.
3255 */
3256 STATIC int
3257 xlog_recover_efi_pass2(
3258 struct xlog *log,
3259 struct xlog_recover_item *item,
3260 xfs_lsn_t lsn)
3261 {
3262 int error;
3263 struct xfs_mount *mp = log->l_mp;
3264 struct xfs_efi_log_item *efip;
3265 struct xfs_efi_log_format *efi_formatp;
3266
3267 efi_formatp = item->ri_buf[0].i_addr;
3268
3269 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3270 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3271 if (error) {
3272 xfs_efi_item_free(efip);
3273 return error;
3274 }
3275 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3276
3277 spin_lock(&log->l_ailp->xa_lock);
3278 /*
3279 * The EFI has two references. One for the EFD and one for EFI to ensure
3280 * it makes it into the AIL. Insert the EFI into the AIL directly and
3281 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3282 * AIL lock.
3283 */
3284 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3285 xfs_efi_release(efip);
3286 return 0;
3287 }
3288
3289
3290 /*
3291 * This routine is called when an EFD format structure is found in a committed
3292 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3293 * was still in the log. To do this it searches the AIL for the EFI with an id
3294 * equal to that in the EFD format structure. If we find it we drop the EFD
3295 * reference, which removes the EFI from the AIL and frees it.
3296 */
3297 STATIC int
3298 xlog_recover_efd_pass2(
3299 struct xlog *log,
3300 struct xlog_recover_item *item)
3301 {
3302 xfs_efd_log_format_t *efd_formatp;
3303 xfs_efi_log_item_t *efip = NULL;
3304 xfs_log_item_t *lip;
3305 __uint64_t efi_id;
3306 struct xfs_ail_cursor cur;
3307 struct xfs_ail *ailp = log->l_ailp;
3308
3309 efd_formatp = item->ri_buf[0].i_addr;
3310 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3311 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3312 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3313 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3314 efi_id = efd_formatp->efd_efi_id;
3315
3316 /*
3317 * Search for the EFI with the id in the EFD format structure in the
3318 * AIL.
3319 */
3320 spin_lock(&ailp->xa_lock);
3321 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3322 while (lip != NULL) {
3323 if (lip->li_type == XFS_LI_EFI) {
3324 efip = (xfs_efi_log_item_t *)lip;
3325 if (efip->efi_format.efi_id == efi_id) {
3326 /*
3327 * Drop the EFD reference to the EFI. This
3328 * removes the EFI from the AIL and frees it.
3329 */
3330 spin_unlock(&ailp->xa_lock);
3331 xfs_efi_release(efip);
3332 spin_lock(&ailp->xa_lock);
3333 break;
3334 }
3335 }
3336 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3337 }
3338
3339 xfs_trans_ail_cursor_done(&cur);
3340 spin_unlock(&ailp->xa_lock);
3341
3342 return 0;
3343 }
3344
3345 /*
3346 * This routine is called when an inode create format structure is found in a
3347 * committed transaction in the log. It's purpose is to initialise the inodes
3348 * being allocated on disk. This requires us to get inode cluster buffers that
3349 * match the range to be intialised, stamped with inode templates and written
3350 * by delayed write so that subsequent modifications will hit the cached buffer
3351 * and only need writing out at the end of recovery.
3352 */
3353 STATIC int
3354 xlog_recover_do_icreate_pass2(
3355 struct xlog *log,
3356 struct list_head *buffer_list,
3357 xlog_recover_item_t *item)
3358 {
3359 struct xfs_mount *mp = log->l_mp;
3360 struct xfs_icreate_log *icl;
3361 xfs_agnumber_t agno;
3362 xfs_agblock_t agbno;
3363 unsigned int count;
3364 unsigned int isize;
3365 xfs_agblock_t length;
3366 int blks_per_cluster;
3367 int bb_per_cluster;
3368 int cancel_count;
3369 int nbufs;
3370 int i;
3371
3372 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3373 if (icl->icl_type != XFS_LI_ICREATE) {
3374 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3375 return -EINVAL;
3376 }
3377
3378 if (icl->icl_size != 1) {
3379 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3380 return -EINVAL;
3381 }
3382
3383 agno = be32_to_cpu(icl->icl_ag);
3384 if (agno >= mp->m_sb.sb_agcount) {
3385 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3386 return -EINVAL;
3387 }
3388 agbno = be32_to_cpu(icl->icl_agbno);
3389 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3390 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3391 return -EINVAL;
3392 }
3393 isize = be32_to_cpu(icl->icl_isize);
3394 if (isize != mp->m_sb.sb_inodesize) {
3395 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3396 return -EINVAL;
3397 }
3398 count = be32_to_cpu(icl->icl_count);
3399 if (!count) {
3400 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3401 return -EINVAL;
3402 }
3403 length = be32_to_cpu(icl->icl_length);
3404 if (!length || length >= mp->m_sb.sb_agblocks) {
3405 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3406 return -EINVAL;
3407 }
3408
3409 /*
3410 * The inode chunk is either full or sparse and we only support
3411 * m_ialloc_min_blks sized sparse allocations at this time.
3412 */
3413 if (length != mp->m_ialloc_blks &&
3414 length != mp->m_ialloc_min_blks) {
3415 xfs_warn(log->l_mp,
3416 "%s: unsupported chunk length", __FUNCTION__);
3417 return -EINVAL;
3418 }
3419
3420 /* verify inode count is consistent with extent length */
3421 if ((count >> mp->m_sb.sb_inopblog) != length) {
3422 xfs_warn(log->l_mp,
3423 "%s: inconsistent inode count and chunk length",
3424 __FUNCTION__);
3425 return -EINVAL;
3426 }
3427
3428 /*
3429 * The icreate transaction can cover multiple cluster buffers and these
3430 * buffers could have been freed and reused. Check the individual
3431 * buffers for cancellation so we don't overwrite anything written after
3432 * a cancellation.
3433 */
3434 blks_per_cluster = xfs_icluster_size_fsb(mp);
3435 bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3436 nbufs = length / blks_per_cluster;
3437 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3438 xfs_daddr_t daddr;
3439
3440 daddr = XFS_AGB_TO_DADDR(mp, agno,
3441 agbno + i * blks_per_cluster);
3442 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3443 cancel_count++;
3444 }
3445
3446 /*
3447 * We currently only use icreate for a single allocation at a time. This
3448 * means we should expect either all or none of the buffers to be
3449 * cancelled. Be conservative and skip replay if at least one buffer is
3450 * cancelled, but warn the user that something is awry if the buffers
3451 * are not consistent.
3452 *
3453 * XXX: This must be refined to only skip cancelled clusters once we use
3454 * icreate for multiple chunk allocations.
3455 */
3456 ASSERT(!cancel_count || cancel_count == nbufs);
3457 if (cancel_count) {
3458 if (cancel_count != nbufs)
3459 xfs_warn(mp,
3460 "WARNING: partial inode chunk cancellation, skipped icreate.");
3461 trace_xfs_log_recover_icreate_cancel(log, icl);
3462 return 0;
3463 }
3464
3465 trace_xfs_log_recover_icreate_recover(log, icl);
3466 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3467 length, be32_to_cpu(icl->icl_gen));
3468 }
3469
3470 STATIC void
3471 xlog_recover_buffer_ra_pass2(
3472 struct xlog *log,
3473 struct xlog_recover_item *item)
3474 {
3475 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3476 struct xfs_mount *mp = log->l_mp;
3477
3478 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3479 buf_f->blf_len, buf_f->blf_flags)) {
3480 return;
3481 }
3482
3483 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3484 buf_f->blf_len, NULL);
3485 }
3486
3487 STATIC void
3488 xlog_recover_inode_ra_pass2(
3489 struct xlog *log,
3490 struct xlog_recover_item *item)
3491 {
3492 struct xfs_inode_log_format ilf_buf;
3493 struct xfs_inode_log_format *ilfp;
3494 struct xfs_mount *mp = log->l_mp;
3495 int error;
3496
3497 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3498 ilfp = item->ri_buf[0].i_addr;
3499 } else {
3500 ilfp = &ilf_buf;
3501 memset(ilfp, 0, sizeof(*ilfp));
3502 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3503 if (error)
3504 return;
3505 }
3506
3507 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3508 return;
3509
3510 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3511 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3512 }
3513
3514 STATIC void
3515 xlog_recover_dquot_ra_pass2(
3516 struct xlog *log,
3517 struct xlog_recover_item *item)
3518 {
3519 struct xfs_mount *mp = log->l_mp;
3520 struct xfs_disk_dquot *recddq;
3521 struct xfs_dq_logformat *dq_f;
3522 uint type;
3523 int len;
3524
3525
3526 if (mp->m_qflags == 0)
3527 return;
3528
3529 recddq = item->ri_buf[1].i_addr;
3530 if (recddq == NULL)
3531 return;
3532 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3533 return;
3534
3535 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3536 ASSERT(type);
3537 if (log->l_quotaoffs_flag & type)
3538 return;
3539
3540 dq_f = item->ri_buf[0].i_addr;
3541 ASSERT(dq_f);
3542 ASSERT(dq_f->qlf_len == 1);
3543
3544 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3545 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3546 return;
3547
3548 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3549 &xfs_dquot_buf_ra_ops);
3550 }
3551
3552 STATIC void
3553 xlog_recover_ra_pass2(
3554 struct xlog *log,
3555 struct xlog_recover_item *item)
3556 {
3557 switch (ITEM_TYPE(item)) {
3558 case XFS_LI_BUF:
3559 xlog_recover_buffer_ra_pass2(log, item);
3560 break;
3561 case XFS_LI_INODE:
3562 xlog_recover_inode_ra_pass2(log, item);
3563 break;
3564 case XFS_LI_DQUOT:
3565 xlog_recover_dquot_ra_pass2(log, item);
3566 break;
3567 case XFS_LI_EFI:
3568 case XFS_LI_EFD:
3569 case XFS_LI_QUOTAOFF:
3570 default:
3571 break;
3572 }
3573 }
3574
3575 STATIC int
3576 xlog_recover_commit_pass1(
3577 struct xlog *log,
3578 struct xlog_recover *trans,
3579 struct xlog_recover_item *item)
3580 {
3581 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3582
3583 switch (ITEM_TYPE(item)) {
3584 case XFS_LI_BUF:
3585 return xlog_recover_buffer_pass1(log, item);
3586 case XFS_LI_QUOTAOFF:
3587 return xlog_recover_quotaoff_pass1(log, item);
3588 case XFS_LI_INODE:
3589 case XFS_LI_EFI:
3590 case XFS_LI_EFD:
3591 case XFS_LI_DQUOT:
3592 case XFS_LI_ICREATE:
3593 /* nothing to do in pass 1 */
3594 return 0;
3595 default:
3596 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3597 __func__, ITEM_TYPE(item));
3598 ASSERT(0);
3599 return -EIO;
3600 }
3601 }
3602
3603 STATIC int
3604 xlog_recover_commit_pass2(
3605 struct xlog *log,
3606 struct xlog_recover *trans,
3607 struct list_head *buffer_list,
3608 struct xlog_recover_item *item)
3609 {
3610 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3611
3612 switch (ITEM_TYPE(item)) {
3613 case XFS_LI_BUF:
3614 return xlog_recover_buffer_pass2(log, buffer_list, item,
3615 trans->r_lsn);
3616 case XFS_LI_INODE:
3617 return xlog_recover_inode_pass2(log, buffer_list, item,
3618 trans->r_lsn);
3619 case XFS_LI_EFI:
3620 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3621 case XFS_LI_EFD:
3622 return xlog_recover_efd_pass2(log, item);
3623 case XFS_LI_DQUOT:
3624 return xlog_recover_dquot_pass2(log, buffer_list, item,
3625 trans->r_lsn);
3626 case XFS_LI_ICREATE:
3627 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3628 case XFS_LI_QUOTAOFF:
3629 /* nothing to do in pass2 */
3630 return 0;
3631 default:
3632 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3633 __func__, ITEM_TYPE(item));
3634 ASSERT(0);
3635 return -EIO;
3636 }
3637 }
3638
3639 STATIC int
3640 xlog_recover_items_pass2(
3641 struct xlog *log,
3642 struct xlog_recover *trans,
3643 struct list_head *buffer_list,
3644 struct list_head *item_list)
3645 {
3646 struct xlog_recover_item *item;
3647 int error = 0;
3648
3649 list_for_each_entry(item, item_list, ri_list) {
3650 error = xlog_recover_commit_pass2(log, trans,
3651 buffer_list, item);
3652 if (error)
3653 return error;
3654 }
3655
3656 return error;
3657 }
3658
3659 /*
3660 * Perform the transaction.
3661 *
3662 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3663 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3664 */
3665 STATIC int
3666 xlog_recover_commit_trans(
3667 struct xlog *log,
3668 struct xlog_recover *trans,
3669 int pass)
3670 {
3671 int error = 0;
3672 int error2;
3673 int items_queued = 0;
3674 struct xlog_recover_item *item;
3675 struct xlog_recover_item *next;
3676 LIST_HEAD (buffer_list);
3677 LIST_HEAD (ra_list);
3678 LIST_HEAD (done_list);
3679
3680 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3681
3682 hlist_del(&trans->r_list);
3683
3684 error = xlog_recover_reorder_trans(log, trans, pass);
3685 if (error)
3686 return error;
3687
3688 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3689 switch (pass) {
3690 case XLOG_RECOVER_PASS1:
3691 error = xlog_recover_commit_pass1(log, trans, item);
3692 break;
3693 case XLOG_RECOVER_PASS2:
3694 xlog_recover_ra_pass2(log, item);
3695 list_move_tail(&item->ri_list, &ra_list);
3696 items_queued++;
3697 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3698 error = xlog_recover_items_pass2(log, trans,
3699 &buffer_list, &ra_list);
3700 list_splice_tail_init(&ra_list, &done_list);
3701 items_queued = 0;
3702 }
3703
3704 break;
3705 default:
3706 ASSERT(0);
3707 }
3708
3709 if (error)
3710 goto out;
3711 }
3712
3713 out:
3714 if (!list_empty(&ra_list)) {
3715 if (!error)
3716 error = xlog_recover_items_pass2(log, trans,
3717 &buffer_list, &ra_list);
3718 list_splice_tail_init(&ra_list, &done_list);
3719 }
3720
3721 if (!list_empty(&done_list))
3722 list_splice_init(&done_list, &trans->r_itemq);
3723
3724 error2 = xfs_buf_delwri_submit(&buffer_list);
3725 return error ? error : error2;
3726 }
3727
3728 STATIC void
3729 xlog_recover_add_item(
3730 struct list_head *head)
3731 {
3732 xlog_recover_item_t *item;
3733
3734 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
3735 INIT_LIST_HEAD(&item->ri_list);
3736 list_add_tail(&item->ri_list, head);
3737 }
3738
3739 STATIC int
3740 xlog_recover_add_to_cont_trans(
3741 struct xlog *log,
3742 struct xlog_recover *trans,
3743 char *dp,
3744 int len)
3745 {
3746 xlog_recover_item_t *item;
3747 char *ptr, *old_ptr;
3748 int old_len;
3749
3750 /*
3751 * If the transaction is empty, the header was split across this and the
3752 * previous record. Copy the rest of the header.
3753 */
3754 if (list_empty(&trans->r_itemq)) {
3755 ASSERT(len <= sizeof(struct xfs_trans_header));
3756 if (len > sizeof(struct xfs_trans_header)) {
3757 xfs_warn(log->l_mp, "%s: bad header length", __func__);
3758 return -EIO;
3759 }
3760
3761 xlog_recover_add_item(&trans->r_itemq);
3762 ptr = (char *)&trans->r_theader +
3763 sizeof(struct xfs_trans_header) - len;
3764 memcpy(ptr, dp, len);
3765 return 0;
3766 }
3767
3768 /* take the tail entry */
3769 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3770
3771 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
3772 old_len = item->ri_buf[item->ri_cnt-1].i_len;
3773
3774 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
3775 memcpy(&ptr[old_len], dp, len);
3776 item->ri_buf[item->ri_cnt-1].i_len += len;
3777 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
3778 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
3779 return 0;
3780 }
3781
3782 /*
3783 * The next region to add is the start of a new region. It could be
3784 * a whole region or it could be the first part of a new region. Because
3785 * of this, the assumption here is that the type and size fields of all
3786 * format structures fit into the first 32 bits of the structure.
3787 *
3788 * This works because all regions must be 32 bit aligned. Therefore, we
3789 * either have both fields or we have neither field. In the case we have
3790 * neither field, the data part of the region is zero length. We only have
3791 * a log_op_header and can throw away the header since a new one will appear
3792 * later. If we have at least 4 bytes, then we can determine how many regions
3793 * will appear in the current log item.
3794 */
3795 STATIC int
3796 xlog_recover_add_to_trans(
3797 struct xlog *log,
3798 struct xlog_recover *trans,
3799 char *dp,
3800 int len)
3801 {
3802 xfs_inode_log_format_t *in_f; /* any will do */
3803 xlog_recover_item_t *item;
3804 char *ptr;
3805
3806 if (!len)
3807 return 0;
3808 if (list_empty(&trans->r_itemq)) {
3809 /* we need to catch log corruptions here */
3810 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
3811 xfs_warn(log->l_mp, "%s: bad header magic number",
3812 __func__);
3813 ASSERT(0);
3814 return -EIO;
3815 }
3816
3817 if (len > sizeof(struct xfs_trans_header)) {
3818 xfs_warn(log->l_mp, "%s: bad header length", __func__);
3819 ASSERT(0);
3820 return -EIO;
3821 }
3822
3823 /*
3824 * The transaction header can be arbitrarily split across op
3825 * records. If we don't have the whole thing here, copy what we
3826 * do have and handle the rest in the next record.
3827 */
3828 if (len == sizeof(struct xfs_trans_header))
3829 xlog_recover_add_item(&trans->r_itemq);
3830 memcpy(&trans->r_theader, dp, len);
3831 return 0;
3832 }
3833
3834 ptr = kmem_alloc(len, KM_SLEEP);
3835 memcpy(ptr, dp, len);
3836 in_f = (xfs_inode_log_format_t *)ptr;
3837
3838 /* take the tail entry */
3839 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3840 if (item->ri_total != 0 &&
3841 item->ri_total == item->ri_cnt) {
3842 /* tail item is in use, get a new one */
3843 xlog_recover_add_item(&trans->r_itemq);
3844 item = list_entry(trans->r_itemq.prev,
3845 xlog_recover_item_t, ri_list);
3846 }
3847
3848 if (item->ri_total == 0) { /* first region to be added */
3849 if (in_f->ilf_size == 0 ||
3850 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
3851 xfs_warn(log->l_mp,
3852 "bad number of regions (%d) in inode log format",
3853 in_f->ilf_size);
3854 ASSERT(0);
3855 kmem_free(ptr);
3856 return -EIO;
3857 }
3858
3859 item->ri_total = in_f->ilf_size;
3860 item->ri_buf =
3861 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
3862 KM_SLEEP);
3863 }
3864 ASSERT(item->ri_total > item->ri_cnt);
3865 /* Description region is ri_buf[0] */
3866 item->ri_buf[item->ri_cnt].i_addr = ptr;
3867 item->ri_buf[item->ri_cnt].i_len = len;
3868 item->ri_cnt++;
3869 trace_xfs_log_recover_item_add(log, trans, item, 0);
3870 return 0;
3871 }
3872
3873 /*
3874 * Free up any resources allocated by the transaction
3875 *
3876 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3877 */
3878 STATIC void
3879 xlog_recover_free_trans(
3880 struct xlog_recover *trans)
3881 {
3882 xlog_recover_item_t *item, *n;
3883 int i;
3884
3885 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3886 /* Free the regions in the item. */
3887 list_del(&item->ri_list);
3888 for (i = 0; i < item->ri_cnt; i++)
3889 kmem_free(item->ri_buf[i].i_addr);
3890 /* Free the item itself */
3891 kmem_free(item->ri_buf);
3892 kmem_free(item);
3893 }
3894 /* Free the transaction recover structure */
3895 kmem_free(trans);
3896 }
3897
3898 /*
3899 * On error or completion, trans is freed.
3900 */
3901 STATIC int
3902 xlog_recovery_process_trans(
3903 struct xlog *log,
3904 struct xlog_recover *trans,
3905 char *dp,
3906 unsigned int len,
3907 unsigned int flags,
3908 int pass)
3909 {
3910 int error = 0;
3911 bool freeit = false;
3912
3913 /* mask off ophdr transaction container flags */
3914 flags &= ~XLOG_END_TRANS;
3915 if (flags & XLOG_WAS_CONT_TRANS)
3916 flags &= ~XLOG_CONTINUE_TRANS;
3917
3918 /*
3919 * Callees must not free the trans structure. We'll decide if we need to
3920 * free it or not based on the operation being done and it's result.
3921 */
3922 switch (flags) {
3923 /* expected flag values */
3924 case 0:
3925 case XLOG_CONTINUE_TRANS:
3926 error = xlog_recover_add_to_trans(log, trans, dp, len);
3927 break;
3928 case XLOG_WAS_CONT_TRANS:
3929 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
3930 break;
3931 case XLOG_COMMIT_TRANS:
3932 error = xlog_recover_commit_trans(log, trans, pass);
3933 /* success or fail, we are now done with this transaction. */
3934 freeit = true;
3935 break;
3936
3937 /* unexpected flag values */
3938 case XLOG_UNMOUNT_TRANS:
3939 /* just skip trans */
3940 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3941 freeit = true;
3942 break;
3943 case XLOG_START_TRANS:
3944 default:
3945 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
3946 ASSERT(0);
3947 error = -EIO;
3948 break;
3949 }
3950 if (error || freeit)
3951 xlog_recover_free_trans(trans);
3952 return error;
3953 }
3954
3955 /*
3956 * Lookup the transaction recovery structure associated with the ID in the
3957 * current ophdr. If the transaction doesn't exist and the start flag is set in
3958 * the ophdr, then allocate a new transaction for future ID matches to find.
3959 * Either way, return what we found during the lookup - an existing transaction
3960 * or nothing.
3961 */
3962 STATIC struct xlog_recover *
3963 xlog_recover_ophdr_to_trans(
3964 struct hlist_head rhash[],
3965 struct xlog_rec_header *rhead,
3966 struct xlog_op_header *ohead)
3967 {
3968 struct xlog_recover *trans;
3969 xlog_tid_t tid;
3970 struct hlist_head *rhp;
3971
3972 tid = be32_to_cpu(ohead->oh_tid);
3973 rhp = &rhash[XLOG_RHASH(tid)];
3974 hlist_for_each_entry(trans, rhp, r_list) {
3975 if (trans->r_log_tid == tid)
3976 return trans;
3977 }
3978
3979 /*
3980 * skip over non-start transaction headers - we could be
3981 * processing slack space before the next transaction starts
3982 */
3983 if (!(ohead->oh_flags & XLOG_START_TRANS))
3984 return NULL;
3985
3986 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
3987
3988 /*
3989 * This is a new transaction so allocate a new recovery container to
3990 * hold the recovery ops that will follow.
3991 */
3992 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
3993 trans->r_log_tid = tid;
3994 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
3995 INIT_LIST_HEAD(&trans->r_itemq);
3996 INIT_HLIST_NODE(&trans->r_list);
3997 hlist_add_head(&trans->r_list, rhp);
3998
3999 /*
4000 * Nothing more to do for this ophdr. Items to be added to this new
4001 * transaction will be in subsequent ophdr containers.
4002 */
4003 return NULL;
4004 }
4005
4006 STATIC int
4007 xlog_recover_process_ophdr(
4008 struct xlog *log,
4009 struct hlist_head rhash[],
4010 struct xlog_rec_header *rhead,
4011 struct xlog_op_header *ohead,
4012 char *dp,
4013 char *end,
4014 int pass)
4015 {
4016 struct xlog_recover *trans;
4017 unsigned int len;
4018
4019 /* Do we understand who wrote this op? */
4020 if (ohead->oh_clientid != XFS_TRANSACTION &&
4021 ohead->oh_clientid != XFS_LOG) {
4022 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4023 __func__, ohead->oh_clientid);
4024 ASSERT(0);
4025 return -EIO;
4026 }
4027
4028 /*
4029 * Check the ophdr contains all the data it is supposed to contain.
4030 */
4031 len = be32_to_cpu(ohead->oh_len);
4032 if (dp + len > end) {
4033 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4034 WARN_ON(1);
4035 return -EIO;
4036 }
4037
4038 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4039 if (!trans) {
4040 /* nothing to do, so skip over this ophdr */
4041 return 0;
4042 }
4043
4044 return xlog_recovery_process_trans(log, trans, dp, len,
4045 ohead->oh_flags, pass);
4046 }
4047
4048 /*
4049 * There are two valid states of the r_state field. 0 indicates that the
4050 * transaction structure is in a normal state. We have either seen the
4051 * start of the transaction or the last operation we added was not a partial
4052 * operation. If the last operation we added to the transaction was a
4053 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4054 *
4055 * NOTE: skip LRs with 0 data length.
4056 */
4057 STATIC int
4058 xlog_recover_process_data(
4059 struct xlog *log,
4060 struct hlist_head rhash[],
4061 struct xlog_rec_header *rhead,
4062 char *dp,
4063 int pass)
4064 {
4065 struct xlog_op_header *ohead;
4066 char *end;
4067 int num_logops;
4068 int error;
4069
4070 end = dp + be32_to_cpu(rhead->h_len);
4071 num_logops = be32_to_cpu(rhead->h_num_logops);
4072
4073 /* check the log format matches our own - else we can't recover */
4074 if (xlog_header_check_recover(log->l_mp, rhead))
4075 return -EIO;
4076
4077 while ((dp < end) && num_logops) {
4078
4079 ohead = (struct xlog_op_header *)dp;
4080 dp += sizeof(*ohead);
4081 ASSERT(dp <= end);
4082
4083 /* errors will abort recovery */
4084 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4085 dp, end, pass);
4086 if (error)
4087 return error;
4088
4089 dp += be32_to_cpu(ohead->oh_len);
4090 num_logops--;
4091 }
4092 return 0;
4093 }
4094
4095 /*
4096 * Process an extent free intent item that was recovered from
4097 * the log. We need to free the extents that it describes.
4098 */
4099 STATIC int
4100 xlog_recover_process_efi(
4101 xfs_mount_t *mp,
4102 xfs_efi_log_item_t *efip)
4103 {
4104 xfs_efd_log_item_t *efdp;
4105 xfs_trans_t *tp;
4106 int i;
4107 int error = 0;
4108 xfs_extent_t *extp;
4109 xfs_fsblock_t startblock_fsb;
4110
4111 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
4112
4113 /*
4114 * First check the validity of the extents described by the
4115 * EFI. If any are bad, then assume that all are bad and
4116 * just toss the EFI.
4117 */
4118 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4119 extp = &(efip->efi_format.efi_extents[i]);
4120 startblock_fsb = XFS_BB_TO_FSB(mp,
4121 XFS_FSB_TO_DADDR(mp, extp->ext_start));
4122 if ((startblock_fsb == 0) ||
4123 (extp->ext_len == 0) ||
4124 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
4125 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
4126 /*
4127 * This will pull the EFI from the AIL and
4128 * free the memory associated with it.
4129 */
4130 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4131 xfs_efi_release(efip);
4132 return -EIO;
4133 }
4134 }
4135
4136 tp = xfs_trans_alloc(mp, 0);
4137 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
4138 if (error)
4139 goto abort_error;
4140 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
4141
4142 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4143 extp = &(efip->efi_format.efi_extents[i]);
4144 error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
4145 extp->ext_len);
4146 if (error)
4147 goto abort_error;
4148
4149 }
4150
4151 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4152 error = xfs_trans_commit(tp);
4153 return error;
4154
4155 abort_error:
4156 xfs_trans_cancel(tp);
4157 return error;
4158 }
4159
4160 /*
4161 * When this is called, all of the EFIs which did not have
4162 * corresponding EFDs should be in the AIL. What we do now
4163 * is free the extents associated with each one.
4164 *
4165 * Since we process the EFIs in normal transactions, they
4166 * will be removed at some point after the commit. This prevents
4167 * us from just walking down the list processing each one.
4168 * We'll use a flag in the EFI to skip those that we've already
4169 * processed and use the AIL iteration mechanism's generation
4170 * count to try to speed this up at least a bit.
4171 *
4172 * When we start, we know that the EFIs are the only things in
4173 * the AIL. As we process them, however, other items are added
4174 * to the AIL. Since everything added to the AIL must come after
4175 * everything already in the AIL, we stop processing as soon as
4176 * we see something other than an EFI in the AIL.
4177 */
4178 STATIC int
4179 xlog_recover_process_efis(
4180 struct xlog *log)
4181 {
4182 struct xfs_log_item *lip;
4183 struct xfs_efi_log_item *efip;
4184 int error = 0;
4185 struct xfs_ail_cursor cur;
4186 struct xfs_ail *ailp;
4187
4188 ailp = log->l_ailp;
4189 spin_lock(&ailp->xa_lock);
4190 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4191 while (lip != NULL) {
4192 /*
4193 * We're done when we see something other than an EFI.
4194 * There should be no EFIs left in the AIL now.
4195 */
4196 if (lip->li_type != XFS_LI_EFI) {
4197 #ifdef DEBUG
4198 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4199 ASSERT(lip->li_type != XFS_LI_EFI);
4200 #endif
4201 break;
4202 }
4203
4204 /*
4205 * Skip EFIs that we've already processed.
4206 */
4207 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4208 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
4209 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4210 continue;
4211 }
4212
4213 spin_unlock(&ailp->xa_lock);
4214 error = xlog_recover_process_efi(log->l_mp, efip);
4215 spin_lock(&ailp->xa_lock);
4216 if (error)
4217 goto out;
4218 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4219 }
4220 out:
4221 xfs_trans_ail_cursor_done(&cur);
4222 spin_unlock(&ailp->xa_lock);
4223 return error;
4224 }
4225
4226 /*
4227 * A cancel occurs when the mount has failed and we're bailing out. Release all
4228 * pending EFIs so they don't pin the AIL.
4229 */
4230 STATIC int
4231 xlog_recover_cancel_efis(
4232 struct xlog *log)
4233 {
4234 struct xfs_log_item *lip;
4235 struct xfs_efi_log_item *efip;
4236 int error = 0;
4237 struct xfs_ail_cursor cur;
4238 struct xfs_ail *ailp;
4239
4240 ailp = log->l_ailp;
4241 spin_lock(&ailp->xa_lock);
4242 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4243 while (lip != NULL) {
4244 /*
4245 * We're done when we see something other than an EFI.
4246 * There should be no EFIs left in the AIL now.
4247 */
4248 if (lip->li_type != XFS_LI_EFI) {
4249 #ifdef DEBUG
4250 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4251 ASSERT(lip->li_type != XFS_LI_EFI);
4252 #endif
4253 break;
4254 }
4255
4256 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4257
4258 spin_unlock(&ailp->xa_lock);
4259 xfs_efi_release(efip);
4260 spin_lock(&ailp->xa_lock);
4261
4262 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4263 }
4264
4265 xfs_trans_ail_cursor_done(&cur);
4266 spin_unlock(&ailp->xa_lock);
4267 return error;
4268 }
4269
4270 /*
4271 * This routine performs a transaction to null out a bad inode pointer
4272 * in an agi unlinked inode hash bucket.
4273 */
4274 STATIC void
4275 xlog_recover_clear_agi_bucket(
4276 xfs_mount_t *mp,
4277 xfs_agnumber_t agno,
4278 int bucket)
4279 {
4280 xfs_trans_t *tp;
4281 xfs_agi_t *agi;
4282 xfs_buf_t *agibp;
4283 int offset;
4284 int error;
4285
4286 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
4287 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
4288 if (error)
4289 goto out_abort;
4290
4291 error = xfs_read_agi(mp, tp, agno, &agibp);
4292 if (error)
4293 goto out_abort;
4294
4295 agi = XFS_BUF_TO_AGI(agibp);
4296 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4297 offset = offsetof(xfs_agi_t, agi_unlinked) +
4298 (sizeof(xfs_agino_t) * bucket);
4299 xfs_trans_log_buf(tp, agibp, offset,
4300 (offset + sizeof(xfs_agino_t) - 1));
4301
4302 error = xfs_trans_commit(tp);
4303 if (error)
4304 goto out_error;
4305 return;
4306
4307 out_abort:
4308 xfs_trans_cancel(tp);
4309 out_error:
4310 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4311 return;
4312 }
4313
4314 STATIC xfs_agino_t
4315 xlog_recover_process_one_iunlink(
4316 struct xfs_mount *mp,
4317 xfs_agnumber_t agno,
4318 xfs_agino_t agino,
4319 int bucket)
4320 {
4321 struct xfs_buf *ibp;
4322 struct xfs_dinode *dip;
4323 struct xfs_inode *ip;
4324 xfs_ino_t ino;
4325 int error;
4326
4327 ino = XFS_AGINO_TO_INO(mp, agno, agino);
4328 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4329 if (error)
4330 goto fail;
4331
4332 /*
4333 * Get the on disk inode to find the next inode in the bucket.
4334 */
4335 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4336 if (error)
4337 goto fail_iput;
4338
4339 ASSERT(ip->i_d.di_nlink == 0);
4340 ASSERT(ip->i_d.di_mode != 0);
4341
4342 /* setup for the next pass */
4343 agino = be32_to_cpu(dip->di_next_unlinked);
4344 xfs_buf_relse(ibp);
4345
4346 /*
4347 * Prevent any DMAPI event from being sent when the reference on
4348 * the inode is dropped.
4349 */
4350 ip->i_d.di_dmevmask = 0;
4351
4352 IRELE(ip);
4353 return agino;
4354
4355 fail_iput:
4356 IRELE(ip);
4357 fail:
4358 /*
4359 * We can't read in the inode this bucket points to, or this inode
4360 * is messed up. Just ditch this bucket of inodes. We will lose
4361 * some inodes and space, but at least we won't hang.
4362 *
4363 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
4364 * clear the inode pointer in the bucket.
4365 */
4366 xlog_recover_clear_agi_bucket(mp, agno, bucket);
4367 return NULLAGINO;
4368 }
4369
4370 /*
4371 * xlog_iunlink_recover
4372 *
4373 * This is called during recovery to process any inodes which
4374 * we unlinked but not freed when the system crashed. These
4375 * inodes will be on the lists in the AGI blocks. What we do
4376 * here is scan all the AGIs and fully truncate and free any
4377 * inodes found on the lists. Each inode is removed from the
4378 * lists when it has been fully truncated and is freed. The
4379 * freeing of the inode and its removal from the list must be
4380 * atomic.
4381 */
4382 STATIC void
4383 xlog_recover_process_iunlinks(
4384 struct xlog *log)
4385 {
4386 xfs_mount_t *mp;
4387 xfs_agnumber_t agno;
4388 xfs_agi_t *agi;
4389 xfs_buf_t *agibp;
4390 xfs_agino_t agino;
4391 int bucket;
4392 int error;
4393 uint mp_dmevmask;
4394
4395 mp = log->l_mp;
4396
4397 /*
4398 * Prevent any DMAPI event from being sent while in this function.
4399 */
4400 mp_dmevmask = mp->m_dmevmask;
4401 mp->m_dmevmask = 0;
4402
4403 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4404 /*
4405 * Find the agi for this ag.
4406 */
4407 error = xfs_read_agi(mp, NULL, agno, &agibp);
4408 if (error) {
4409 /*
4410 * AGI is b0rked. Don't process it.
4411 *
4412 * We should probably mark the filesystem as corrupt
4413 * after we've recovered all the ag's we can....
4414 */
4415 continue;
4416 }
4417 /*
4418 * Unlock the buffer so that it can be acquired in the normal
4419 * course of the transaction to truncate and free each inode.
4420 * Because we are not racing with anyone else here for the AGI
4421 * buffer, we don't even need to hold it locked to read the
4422 * initial unlinked bucket entries out of the buffer. We keep
4423 * buffer reference though, so that it stays pinned in memory
4424 * while we need the buffer.
4425 */
4426 agi = XFS_BUF_TO_AGI(agibp);
4427 xfs_buf_unlock(agibp);
4428
4429 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
4430 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
4431 while (agino != NULLAGINO) {
4432 agino = xlog_recover_process_one_iunlink(mp,
4433 agno, agino, bucket);
4434 }
4435 }
4436 xfs_buf_rele(agibp);
4437 }
4438
4439 mp->m_dmevmask = mp_dmevmask;
4440 }
4441
4442 STATIC int
4443 xlog_unpack_data(
4444 struct xlog_rec_header *rhead,
4445 char *dp,
4446 struct xlog *log)
4447 {
4448 int i, j, k;
4449
4450 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4451 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4452 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4453 dp += BBSIZE;
4454 }
4455
4456 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4457 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4458 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4459 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4460 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4461 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4462 dp += BBSIZE;
4463 }
4464 }
4465
4466 return 0;
4467 }
4468
4469 /*
4470 * CRC check, unpack and process a log record.
4471 */
4472 STATIC int
4473 xlog_recover_process(
4474 struct xlog *log,
4475 struct hlist_head rhash[],
4476 struct xlog_rec_header *rhead,
4477 char *dp,
4478 int pass)
4479 {
4480 int error;
4481 __le32 crc;
4482
4483 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
4484
4485 /*
4486 * Nothing else to do if this is a CRC verification pass. Just return
4487 * if this a record with a non-zero crc. Unfortunately, mkfs always
4488 * sets h_crc to 0 so we must consider this valid even on v5 supers.
4489 * Otherwise, return EFSBADCRC on failure so the callers up the stack
4490 * know precisely what failed.
4491 */
4492 if (pass == XLOG_RECOVER_CRCPASS) {
4493 if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc))
4494 return -EFSBADCRC;
4495 return 0;
4496 }
4497
4498 /*
4499 * We're in the normal recovery path. Issue a warning if and only if the
4500 * CRC in the header is non-zero. This is an advisory warning and the
4501 * zero CRC check prevents warnings from being emitted when upgrading
4502 * the kernel from one that does not add CRCs by default.
4503 */
4504 if (crc != le32_to_cpu(rhead->h_crc)) {
4505 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4506 xfs_alert(log->l_mp,
4507 "log record CRC mismatch: found 0x%x, expected 0x%x.",
4508 le32_to_cpu(rhead->h_crc),
4509 le32_to_cpu(crc));
4510 xfs_hex_dump(dp, 32);
4511 }
4512
4513 /*
4514 * If the filesystem is CRC enabled, this mismatch becomes a
4515 * fatal log corruption failure.
4516 */
4517 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
4518 return -EFSCORRUPTED;
4519 }
4520
4521 error = xlog_unpack_data(rhead, dp, log);
4522 if (error)
4523 return error;
4524
4525 return xlog_recover_process_data(log, rhash, rhead, dp, pass);
4526 }
4527
4528 STATIC int
4529 xlog_valid_rec_header(
4530 struct xlog *log,
4531 struct xlog_rec_header *rhead,
4532 xfs_daddr_t blkno)
4533 {
4534 int hlen;
4535
4536 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4537 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4538 XFS_ERRLEVEL_LOW, log->l_mp);
4539 return -EFSCORRUPTED;
4540 }
4541 if (unlikely(
4542 (!rhead->h_version ||
4543 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4544 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4545 __func__, be32_to_cpu(rhead->h_version));
4546 return -EIO;
4547 }
4548
4549 /* LR body must have data or it wouldn't have been written */
4550 hlen = be32_to_cpu(rhead->h_len);
4551 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4552 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4553 XFS_ERRLEVEL_LOW, log->l_mp);
4554 return -EFSCORRUPTED;
4555 }
4556 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4557 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4558 XFS_ERRLEVEL_LOW, log->l_mp);
4559 return -EFSCORRUPTED;
4560 }
4561 return 0;
4562 }
4563
4564 /*
4565 * Read the log from tail to head and process the log records found.
4566 * Handle the two cases where the tail and head are in the same cycle
4567 * and where the active portion of the log wraps around the end of
4568 * the physical log separately. The pass parameter is passed through
4569 * to the routines called to process the data and is not looked at
4570 * here.
4571 */
4572 STATIC int
4573 xlog_do_recovery_pass(
4574 struct xlog *log,
4575 xfs_daddr_t head_blk,
4576 xfs_daddr_t tail_blk,
4577 int pass,
4578 xfs_daddr_t *first_bad) /* out: first bad log rec */
4579 {
4580 xlog_rec_header_t *rhead;
4581 xfs_daddr_t blk_no;
4582 xfs_daddr_t rhead_blk;
4583 char *offset;
4584 xfs_buf_t *hbp, *dbp;
4585 int error = 0, h_size, h_len;
4586 int bblks, split_bblks;
4587 int hblks, split_hblks, wrapped_hblks;
4588 struct hlist_head rhash[XLOG_RHASH_SIZE];
4589
4590 ASSERT(head_blk != tail_blk);
4591 rhead_blk = 0;
4592
4593 /*
4594 * Read the header of the tail block and get the iclog buffer size from
4595 * h_size. Use this to tell how many sectors make up the log header.
4596 */
4597 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4598 /*
4599 * When using variable length iclogs, read first sector of
4600 * iclog header and extract the header size from it. Get a
4601 * new hbp that is the correct size.
4602 */
4603 hbp = xlog_get_bp(log, 1);
4604 if (!hbp)
4605 return -ENOMEM;
4606
4607 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4608 if (error)
4609 goto bread_err1;
4610
4611 rhead = (xlog_rec_header_t *)offset;
4612 error = xlog_valid_rec_header(log, rhead, tail_blk);
4613 if (error)
4614 goto bread_err1;
4615
4616 /*
4617 * xfsprogs has a bug where record length is based on lsunit but
4618 * h_size (iclog size) is hardcoded to 32k. Now that we
4619 * unconditionally CRC verify the unmount record, this means the
4620 * log buffer can be too small for the record and cause an
4621 * overrun.
4622 *
4623 * Detect this condition here. Use lsunit for the buffer size as
4624 * long as this looks like the mkfs case. Otherwise, return an
4625 * error to avoid a buffer overrun.
4626 */
4627 h_size = be32_to_cpu(rhead->h_size);
4628 h_len = be32_to_cpu(rhead->h_len);
4629 if (h_len > h_size) {
4630 if (h_len <= log->l_mp->m_logbsize &&
4631 be32_to_cpu(rhead->h_num_logops) == 1) {
4632 xfs_warn(log->l_mp,
4633 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
4634 h_size, log->l_mp->m_logbsize);
4635 h_size = log->l_mp->m_logbsize;
4636 } else
4637 return -EFSCORRUPTED;
4638 }
4639
4640 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4641 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4642 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4643 if (h_size % XLOG_HEADER_CYCLE_SIZE)
4644 hblks++;
4645 xlog_put_bp(hbp);
4646 hbp = xlog_get_bp(log, hblks);
4647 } else {
4648 hblks = 1;
4649 }
4650 } else {
4651 ASSERT(log->l_sectBBsize == 1);
4652 hblks = 1;
4653 hbp = xlog_get_bp(log, 1);
4654 h_size = XLOG_BIG_RECORD_BSIZE;
4655 }
4656
4657 if (!hbp)
4658 return -ENOMEM;
4659 dbp = xlog_get_bp(log, BTOBB(h_size));
4660 if (!dbp) {
4661 xlog_put_bp(hbp);
4662 return -ENOMEM;
4663 }
4664
4665 memset(rhash, 0, sizeof(rhash));
4666 blk_no = rhead_blk = tail_blk;
4667 if (tail_blk > head_blk) {
4668 /*
4669 * Perform recovery around the end of the physical log.
4670 * When the head is not on the same cycle number as the tail,
4671 * we can't do a sequential recovery.
4672 */
4673 while (blk_no < log->l_logBBsize) {
4674 /*
4675 * Check for header wrapping around physical end-of-log
4676 */
4677 offset = hbp->b_addr;
4678 split_hblks = 0;
4679 wrapped_hblks = 0;
4680 if (blk_no + hblks <= log->l_logBBsize) {
4681 /* Read header in one read */
4682 error = xlog_bread(log, blk_no, hblks, hbp,
4683 &offset);
4684 if (error)
4685 goto bread_err2;
4686 } else {
4687 /* This LR is split across physical log end */
4688 if (blk_no != log->l_logBBsize) {
4689 /* some data before physical log end */
4690 ASSERT(blk_no <= INT_MAX);
4691 split_hblks = log->l_logBBsize - (int)blk_no;
4692 ASSERT(split_hblks > 0);
4693 error = xlog_bread(log, blk_no,
4694 split_hblks, hbp,
4695 &offset);
4696 if (error)
4697 goto bread_err2;
4698 }
4699
4700 /*
4701 * Note: this black magic still works with
4702 * large sector sizes (non-512) only because:
4703 * - we increased the buffer size originally
4704 * by 1 sector giving us enough extra space
4705 * for the second read;
4706 * - the log start is guaranteed to be sector
4707 * aligned;
4708 * - we read the log end (LR header start)
4709 * _first_, then the log start (LR header end)
4710 * - order is important.
4711 */
4712 wrapped_hblks = hblks - split_hblks;
4713 error = xlog_bread_offset(log, 0,
4714 wrapped_hblks, hbp,
4715 offset + BBTOB(split_hblks));
4716 if (error)
4717 goto bread_err2;
4718 }
4719 rhead = (xlog_rec_header_t *)offset;
4720 error = xlog_valid_rec_header(log, rhead,
4721 split_hblks ? blk_no : 0);
4722 if (error)
4723 goto bread_err2;
4724
4725 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4726 blk_no += hblks;
4727
4728 /* Read in data for log record */
4729 if (blk_no + bblks <= log->l_logBBsize) {
4730 error = xlog_bread(log, blk_no, bblks, dbp,
4731 &offset);
4732 if (error)
4733 goto bread_err2;
4734 } else {
4735 /* This log record is split across the
4736 * physical end of log */
4737 offset = dbp->b_addr;
4738 split_bblks = 0;
4739 if (blk_no != log->l_logBBsize) {
4740 /* some data is before the physical
4741 * end of log */
4742 ASSERT(!wrapped_hblks);
4743 ASSERT(blk_no <= INT_MAX);
4744 split_bblks =
4745 log->l_logBBsize - (int)blk_no;
4746 ASSERT(split_bblks > 0);
4747 error = xlog_bread(log, blk_no,
4748 split_bblks, dbp,
4749 &offset);
4750 if (error)
4751 goto bread_err2;
4752 }
4753
4754 /*
4755 * Note: this black magic still works with
4756 * large sector sizes (non-512) only because:
4757 * - we increased the buffer size originally
4758 * by 1 sector giving us enough extra space
4759 * for the second read;
4760 * - the log start is guaranteed to be sector
4761 * aligned;
4762 * - we read the log end (LR header start)
4763 * _first_, then the log start (LR header end)
4764 * - order is important.
4765 */
4766 error = xlog_bread_offset(log, 0,
4767 bblks - split_bblks, dbp,
4768 offset + BBTOB(split_bblks));
4769 if (error)
4770 goto bread_err2;
4771 }
4772
4773 error = xlog_recover_process(log, rhash, rhead, offset,
4774 pass);
4775 if (error)
4776 goto bread_err2;
4777
4778 blk_no += bblks;
4779 rhead_blk = blk_no;
4780 }
4781
4782 ASSERT(blk_no >= log->l_logBBsize);
4783 blk_no -= log->l_logBBsize;
4784 rhead_blk = blk_no;
4785 }
4786
4787 /* read first part of physical log */
4788 while (blk_no < head_blk) {
4789 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4790 if (error)
4791 goto bread_err2;
4792
4793 rhead = (xlog_rec_header_t *)offset;
4794 error = xlog_valid_rec_header(log, rhead, blk_no);
4795 if (error)
4796 goto bread_err2;
4797
4798 /* blocks in data section */
4799 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4800 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4801 &offset);
4802 if (error)
4803 goto bread_err2;
4804
4805 error = xlog_recover_process(log, rhash, rhead, offset, pass);
4806 if (error)
4807 goto bread_err2;
4808
4809 blk_no += bblks + hblks;
4810 rhead_blk = blk_no;
4811 }
4812
4813 bread_err2:
4814 xlog_put_bp(dbp);
4815 bread_err1:
4816 xlog_put_bp(hbp);
4817
4818 if (error && first_bad)
4819 *first_bad = rhead_blk;
4820
4821 return error;
4822 }
4823
4824 /*
4825 * Do the recovery of the log. We actually do this in two phases.
4826 * The two passes are necessary in order to implement the function
4827 * of cancelling a record written into the log. The first pass
4828 * determines those things which have been cancelled, and the
4829 * second pass replays log items normally except for those which
4830 * have been cancelled. The handling of the replay and cancellations
4831 * takes place in the log item type specific routines.
4832 *
4833 * The table of items which have cancel records in the log is allocated
4834 * and freed at this level, since only here do we know when all of
4835 * the log recovery has been completed.
4836 */
4837 STATIC int
4838 xlog_do_log_recovery(
4839 struct xlog *log,
4840 xfs_daddr_t head_blk,
4841 xfs_daddr_t tail_blk)
4842 {
4843 int error, i;
4844
4845 ASSERT(head_blk != tail_blk);
4846
4847 /*
4848 * First do a pass to find all of the cancelled buf log items.
4849 * Store them in the buf_cancel_table for use in the second pass.
4850 */
4851 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4852 sizeof(struct list_head),
4853 KM_SLEEP);
4854 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4855 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4856
4857 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4858 XLOG_RECOVER_PASS1, NULL);
4859 if (error != 0) {
4860 kmem_free(log->l_buf_cancel_table);
4861 log->l_buf_cancel_table = NULL;
4862 return error;
4863 }
4864 /*
4865 * Then do a second pass to actually recover the items in the log.
4866 * When it is complete free the table of buf cancel items.
4867 */
4868 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4869 XLOG_RECOVER_PASS2, NULL);
4870 #ifdef DEBUG
4871 if (!error) {
4872 int i;
4873
4874 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4875 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4876 }
4877 #endif /* DEBUG */
4878
4879 kmem_free(log->l_buf_cancel_table);
4880 log->l_buf_cancel_table = NULL;
4881
4882 return error;
4883 }
4884
4885 /*
4886 * Do the actual recovery
4887 */
4888 STATIC int
4889 xlog_do_recover(
4890 struct xlog *log,
4891 xfs_daddr_t head_blk,
4892 xfs_daddr_t tail_blk)
4893 {
4894 int error;
4895 xfs_buf_t *bp;
4896 xfs_sb_t *sbp;
4897
4898 /*
4899 * First replay the images in the log.
4900 */
4901 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4902 if (error)
4903 return error;
4904
4905 /*
4906 * If IO errors happened during recovery, bail out.
4907 */
4908 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4909 return -EIO;
4910 }
4911
4912 /*
4913 * We now update the tail_lsn since much of the recovery has completed
4914 * and there may be space available to use. If there were no extent
4915 * or iunlinks, we can free up the entire log and set the tail_lsn to
4916 * be the last_sync_lsn. This was set in xlog_find_tail to be the
4917 * lsn of the last known good LR on disk. If there are extent frees
4918 * or iunlinks they will have some entries in the AIL; so we look at
4919 * the AIL to determine how to set the tail_lsn.
4920 */
4921 xlog_assign_tail_lsn(log->l_mp);
4922
4923 /*
4924 * Now that we've finished replaying all buffer and inode
4925 * updates, re-read in the superblock and reverify it.
4926 */
4927 bp = xfs_getsb(log->l_mp, 0);
4928 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
4929 ASSERT(!(bp->b_flags & XBF_WRITE));
4930 bp->b_flags |= XBF_READ;
4931 bp->b_ops = &xfs_sb_buf_ops;
4932
4933 error = xfs_buf_submit_wait(bp);
4934 if (error) {
4935 if (!XFS_FORCED_SHUTDOWN(log->l_mp)) {
4936 xfs_buf_ioerror_alert(bp, __func__);
4937 ASSERT(0);
4938 }
4939 xfs_buf_relse(bp);
4940 return error;
4941 }
4942
4943 /* Convert superblock from on-disk format */
4944 sbp = &log->l_mp->m_sb;
4945 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4946 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4947 ASSERT(xfs_sb_good_version(sbp));
4948 xfs_reinit_percpu_counters(log->l_mp);
4949
4950 xfs_buf_relse(bp);
4951
4952
4953 xlog_recover_check_summary(log);
4954
4955 /* Normal transactions can now occur */
4956 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4957 return 0;
4958 }
4959
4960 /*
4961 * Perform recovery and re-initialize some log variables in xlog_find_tail.
4962 *
4963 * Return error or zero.
4964 */
4965 int
4966 xlog_recover(
4967 struct xlog *log)
4968 {
4969 xfs_daddr_t head_blk, tail_blk;
4970 int error;
4971
4972 /* find the tail of the log */
4973 error = xlog_find_tail(log, &head_blk, &tail_blk);
4974 if (error)
4975 return error;
4976
4977 /*
4978 * The superblock was read before the log was available and thus the LSN
4979 * could not be verified. Check the superblock LSN against the current
4980 * LSN now that it's known.
4981 */
4982 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
4983 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
4984 return -EINVAL;
4985
4986 if (tail_blk != head_blk) {
4987 /* There used to be a comment here:
4988 *
4989 * disallow recovery on read-only mounts. note -- mount
4990 * checks for ENOSPC and turns it into an intelligent
4991 * error message.
4992 * ...but this is no longer true. Now, unless you specify
4993 * NORECOVERY (in which case this function would never be
4994 * called), we just go ahead and recover. We do this all
4995 * under the vfs layer, so we can get away with it unless
4996 * the device itself is read-only, in which case we fail.
4997 */
4998 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4999 return error;
5000 }
5001
5002 /*
5003 * Version 5 superblock log feature mask validation. We know the
5004 * log is dirty so check if there are any unknown log features
5005 * in what we need to recover. If there are unknown features
5006 * (e.g. unsupported transactions, then simply reject the
5007 * attempt at recovery before touching anything.
5008 */
5009 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5010 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5011 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5012 xfs_warn(log->l_mp,
5013 "Superblock has unknown incompatible log features (0x%x) enabled.",
5014 (log->l_mp->m_sb.sb_features_log_incompat &
5015 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5016 xfs_warn(log->l_mp,
5017 "The log can not be fully and/or safely recovered by this kernel.");
5018 xfs_warn(log->l_mp,
5019 "Please recover the log on a kernel that supports the unknown features.");
5020 return -EINVAL;
5021 }
5022
5023 /*
5024 * Delay log recovery if the debug hook is set. This is debug
5025 * instrumention to coordinate simulation of I/O failures with
5026 * log recovery.
5027 */
5028 if (xfs_globals.log_recovery_delay) {
5029 xfs_notice(log->l_mp,
5030 "Delaying log recovery for %d seconds.",
5031 xfs_globals.log_recovery_delay);
5032 msleep(xfs_globals.log_recovery_delay * 1000);
5033 }
5034
5035 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5036 log->l_mp->m_logname ? log->l_mp->m_logname
5037 : "internal");
5038
5039 error = xlog_do_recover(log, head_blk, tail_blk);
5040 log->l_flags |= XLOG_RECOVERY_NEEDED;
5041 }
5042 return error;
5043 }
5044
5045 /*
5046 * In the first part of recovery we replay inodes and buffers and build
5047 * up the list of extent free items which need to be processed. Here
5048 * we process the extent free items and clean up the on disk unlinked
5049 * inode lists. This is separated from the first part of recovery so
5050 * that the root and real-time bitmap inodes can be read in from disk in
5051 * between the two stages. This is necessary so that we can free space
5052 * in the real-time portion of the file system.
5053 */
5054 int
5055 xlog_recover_finish(
5056 struct xlog *log)
5057 {
5058 /*
5059 * Now we're ready to do the transactions needed for the
5060 * rest of recovery. Start with completing all the extent
5061 * free intent records and then process the unlinked inode
5062 * lists. At this point, we essentially run in normal mode
5063 * except that we're still performing recovery actions
5064 * rather than accepting new requests.
5065 */
5066 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5067 int error;
5068 error = xlog_recover_process_efis(log);
5069 if (error) {
5070 xfs_alert(log->l_mp, "Failed to recover EFIs");
5071 return error;
5072 }
5073 /*
5074 * Sync the log to get all the EFIs out of the AIL.
5075 * This isn't absolutely necessary, but it helps in
5076 * case the unlink transactions would have problems
5077 * pushing the EFIs out of the way.
5078 */
5079 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5080
5081 xlog_recover_process_iunlinks(log);
5082
5083 xlog_recover_check_summary(log);
5084
5085 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5086 log->l_mp->m_logname ? log->l_mp->m_logname
5087 : "internal");
5088 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5089 } else {
5090 xfs_info(log->l_mp, "Ending clean mount");
5091 }
5092 return 0;
5093 }
5094
5095 int
5096 xlog_recover_cancel(
5097 struct xlog *log)
5098 {
5099 int error = 0;
5100
5101 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5102 error = xlog_recover_cancel_efis(log);
5103
5104 return error;
5105 }
5106
5107 #if defined(DEBUG)
5108 /*
5109 * Read all of the agf and agi counters and check that they
5110 * are consistent with the superblock counters.
5111 */
5112 void
5113 xlog_recover_check_summary(
5114 struct xlog *log)
5115 {
5116 xfs_mount_t *mp;
5117 xfs_agf_t *agfp;
5118 xfs_buf_t *agfbp;
5119 xfs_buf_t *agibp;
5120 xfs_agnumber_t agno;
5121 __uint64_t freeblks;
5122 __uint64_t itotal;
5123 __uint64_t ifree;
5124 int error;
5125
5126 mp = log->l_mp;
5127
5128 freeblks = 0LL;
5129 itotal = 0LL;
5130 ifree = 0LL;
5131 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5132 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5133 if (error) {
5134 xfs_alert(mp, "%s agf read failed agno %d error %d",
5135 __func__, agno, error);
5136 } else {
5137 agfp = XFS_BUF_TO_AGF(agfbp);
5138 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5139 be32_to_cpu(agfp->agf_flcount);
5140 xfs_buf_relse(agfbp);
5141 }
5142
5143 error = xfs_read_agi(mp, NULL, agno, &agibp);
5144 if (error) {
5145 xfs_alert(mp, "%s agi read failed agno %d error %d",
5146 __func__, agno, error);
5147 } else {
5148 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5149
5150 itotal += be32_to_cpu(agi->agi_count);
5151 ifree += be32_to_cpu(agi->agi_freecount);
5152 xfs_buf_relse(agibp);
5153 }
5154 }
5155 }
5156 #endif /* DEBUG */