]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/xfs/xfs_log_recover.c
xfs: remove IO_ISAIO
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / xfs_log_recover.c
CommitLineData
1da177e4 1/*
87c199c2 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4 24#include "xfs_trans.h"
a844f451
NS
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4
LT
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
30#include "xfs_error.h"
31#include "xfs_bmap_btree.h"
a844f451
NS
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
1da177e4 34#include "xfs_dir2_sf.h"
a844f451 35#include "xfs_attr_sf.h"
1da177e4 36#include "xfs_dinode.h"
1da177e4 37#include "xfs_inode.h"
a844f451 38#include "xfs_inode_item.h"
a844f451 39#include "xfs_alloc.h"
1da177e4
LT
40#include "xfs_ialloc.h"
41#include "xfs_log_priv.h"
42#include "xfs_buf_item.h"
1da177e4
LT
43#include "xfs_log_recover.h"
44#include "xfs_extfree_item.h"
45#include "xfs_trans_priv.h"
1da177e4
LT
46#include "xfs_quota.h"
47#include "xfs_rw.h"
43355099 48#include "xfs_utils.h"
1da177e4
LT
49
50STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
51STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
52STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q,
53 xlog_recover_item_t *item);
54#if defined(DEBUG)
55STATIC void xlog_recover_check_summary(xlog_t *);
1da177e4
LT
56#else
57#define xlog_recover_check_summary(log)
1da177e4
LT
58#endif
59
60
61/*
62 * Sector aligned buffer routines for buffer create/read/write/access
63 */
64
65#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) \
66 ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
67 ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
68#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
69
70xfs_buf_t *
71xlog_get_bp(
72 xlog_t *log,
3228149c 73 int nbblks)
1da177e4 74{
3228149c
DC
75 if (nbblks <= 0 || nbblks > log->l_logBBsize) {
76 xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
77 XFS_ERROR_REPORT("xlog_get_bp(1)",
78 XFS_ERRLEVEL_HIGH, log->l_mp);
79 return NULL;
80 }
1da177e4
LT
81
82 if (log->l_sectbb_log) {
3228149c
DC
83 if (nbblks > 1)
84 nbblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
85 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
1da177e4 86 }
3228149c 87 return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
1da177e4
LT
88}
89
90void
91xlog_put_bp(
92 xfs_buf_t *bp)
93{
94 xfs_buf_free(bp);
95}
96
076e6acb
CH
97STATIC xfs_caddr_t
98xlog_align(
99 xlog_t *log,
100 xfs_daddr_t blk_no,
101 int nbblks,
102 xfs_buf_t *bp)
103{
104 xfs_caddr_t ptr;
105
106 if (!log->l_sectbb_log)
107 return XFS_BUF_PTR(bp);
108
109 ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
110 ASSERT(XFS_BUF_SIZE(bp) >=
111 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
112 return ptr;
113}
114
1da177e4
LT
115
116/*
117 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
118 */
076e6acb
CH
119STATIC int
120xlog_bread_noalign(
1da177e4
LT
121 xlog_t *log,
122 xfs_daddr_t blk_no,
123 int nbblks,
124 xfs_buf_t *bp)
125{
126 int error;
127
3228149c
DC
128 if (nbblks <= 0 || nbblks > log->l_logBBsize) {
129 xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
130 XFS_ERROR_REPORT("xlog_bread(1)",
131 XFS_ERRLEVEL_HIGH, log->l_mp);
132 return EFSCORRUPTED;
133 }
134
1da177e4
LT
135 if (log->l_sectbb_log) {
136 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
137 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
138 }
139
140 ASSERT(nbblks > 0);
141 ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
142 ASSERT(bp);
143
144 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
145 XFS_BUF_READ(bp);
146 XFS_BUF_BUSY(bp);
147 XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
148 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
149
150 xfsbdstrat(log->l_mp, bp);
d64e31a2
DC
151 error = xfs_iowait(bp);
152 if (error)
1da177e4
LT
153 xfs_ioerror_alert("xlog_bread", log->l_mp,
154 bp, XFS_BUF_ADDR(bp));
155 return error;
156}
157
076e6acb
CH
158STATIC int
159xlog_bread(
160 xlog_t *log,
161 xfs_daddr_t blk_no,
162 int nbblks,
163 xfs_buf_t *bp,
164 xfs_caddr_t *offset)
165{
166 int error;
167
168 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
169 if (error)
170 return error;
171
172 *offset = xlog_align(log, blk_no, nbblks, bp);
173 return 0;
174}
175
1da177e4
LT
176/*
177 * Write out the buffer at the given block for the given number of blocks.
178 * The buffer is kept locked across the write and is returned locked.
179 * This can only be used for synchronous log writes.
180 */
ba0f32d4 181STATIC int
1da177e4
LT
182xlog_bwrite(
183 xlog_t *log,
184 xfs_daddr_t blk_no,
185 int nbblks,
186 xfs_buf_t *bp)
187{
188 int error;
189
3228149c
DC
190 if (nbblks <= 0 || nbblks > log->l_logBBsize) {
191 xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
192 XFS_ERROR_REPORT("xlog_bwrite(1)",
193 XFS_ERRLEVEL_HIGH, log->l_mp);
194 return EFSCORRUPTED;
195 }
196
1da177e4
LT
197 if (log->l_sectbb_log) {
198 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
199 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
200 }
201
202 ASSERT(nbblks > 0);
203 ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
204
205 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
206 XFS_BUF_ZEROFLAGS(bp);
207 XFS_BUF_BUSY(bp);
208 XFS_BUF_HOLD(bp);
209 XFS_BUF_PSEMA(bp, PRIBIO);
210 XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
211 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
212
213 if ((error = xfs_bwrite(log->l_mp, bp)))
214 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
215 bp, XFS_BUF_ADDR(bp));
216 return error;
217}
218
1da177e4
LT
219#ifdef DEBUG
220/*
221 * dump debug superblock and log record information
222 */
223STATIC void
224xlog_header_check_dump(
225 xfs_mount_t *mp,
226 xlog_rec_header_t *head)
227{
228 int b;
229
34a622b2 230 cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__);
1da177e4 231 for (b = 0; b < 16; b++)
a5687787 232 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]);
b6574520
NS
233 cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
234 cmn_err(CE_DEBUG, " log : uuid = ");
1da177e4 235 for (b = 0; b < 16; b++)
a5687787 236 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]);
b53e675d 237 cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
1da177e4
LT
238}
239#else
240#define xlog_header_check_dump(mp, head)
241#endif
242
243/*
244 * check log record header for recovery
245 */
246STATIC int
247xlog_header_check_recover(
248 xfs_mount_t *mp,
249 xlog_rec_header_t *head)
250{
b53e675d 251 ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
1da177e4
LT
252
253 /*
254 * IRIX doesn't write the h_fmt field and leaves it zeroed
255 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
256 * a dirty log created in IRIX.
257 */
b53e675d 258 if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
1da177e4
LT
259 xlog_warn(
260 "XFS: dirty log written in incompatible format - can't recover");
261 xlog_header_check_dump(mp, head);
262 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
263 XFS_ERRLEVEL_HIGH, mp);
264 return XFS_ERROR(EFSCORRUPTED);
265 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
266 xlog_warn(
267 "XFS: dirty log entry has mismatched uuid - can't recover");
268 xlog_header_check_dump(mp, head);
269 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
270 XFS_ERRLEVEL_HIGH, mp);
271 return XFS_ERROR(EFSCORRUPTED);
272 }
273 return 0;
274}
275
276/*
277 * read the head block of the log and check the header
278 */
279STATIC int
280xlog_header_check_mount(
281 xfs_mount_t *mp,
282 xlog_rec_header_t *head)
283{
b53e675d 284 ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
1da177e4
LT
285
286 if (uuid_is_nil(&head->h_fs_uuid)) {
287 /*
288 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
289 * h_fs_uuid is nil, we assume this log was last mounted
290 * by IRIX and continue.
291 */
292 xlog_warn("XFS: nil uuid in log - IRIX style log");
293 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
294 xlog_warn("XFS: log has mismatched uuid - can't recover");
295 xlog_header_check_dump(mp, head);
296 XFS_ERROR_REPORT("xlog_header_check_mount",
297 XFS_ERRLEVEL_HIGH, mp);
298 return XFS_ERROR(EFSCORRUPTED);
299 }
300 return 0;
301}
302
303STATIC void
304xlog_recover_iodone(
305 struct xfs_buf *bp)
306{
1da177e4
LT
307 if (XFS_BUF_GETERROR(bp)) {
308 /*
309 * We're not going to bother about retrying
310 * this during recovery. One strike!
311 */
1da177e4 312 xfs_ioerror_alert("xlog_recover_iodone",
15ac08a8
CH
313 bp->b_mount, bp, XFS_BUF_ADDR(bp));
314 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1da177e4 315 }
15ac08a8 316 bp->b_mount = NULL;
1da177e4
LT
317 XFS_BUF_CLR_IODONE_FUNC(bp);
318 xfs_biodone(bp);
319}
320
321/*
322 * This routine finds (to an approximation) the first block in the physical
323 * log which contains the given cycle. It uses a binary search algorithm.
324 * Note that the algorithm can not be perfect because the disk will not
325 * necessarily be perfect.
326 */
a8272ce0 327STATIC int
1da177e4
LT
328xlog_find_cycle_start(
329 xlog_t *log,
330 xfs_buf_t *bp,
331 xfs_daddr_t first_blk,
332 xfs_daddr_t *last_blk,
333 uint cycle)
334{
335 xfs_caddr_t offset;
336 xfs_daddr_t mid_blk;
337 uint mid_cycle;
338 int error;
339
340 mid_blk = BLK_AVG(first_blk, *last_blk);
341 while (mid_blk != first_blk && mid_blk != *last_blk) {
076e6acb
CH
342 error = xlog_bread(log, mid_blk, 1, bp, &offset);
343 if (error)
1da177e4 344 return error;
03bea6fe 345 mid_cycle = xlog_get_cycle(offset);
1da177e4
LT
346 if (mid_cycle == cycle) {
347 *last_blk = mid_blk;
348 /* last_half_cycle == mid_cycle */
349 } else {
350 first_blk = mid_blk;
351 /* first_half_cycle == mid_cycle */
352 }
353 mid_blk = BLK_AVG(first_blk, *last_blk);
354 }
355 ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
356 (mid_blk == *last_blk && mid_blk-1 == first_blk));
357
358 return 0;
359}
360
361/*
362 * Check that the range of blocks does not contain the cycle number
363 * given. The scan needs to occur from front to back and the ptr into the
364 * region must be updated since a later routine will need to perform another
365 * test. If the region is completely good, we end up returning the same
366 * last block number.
367 *
368 * Set blkno to -1 if we encounter no errors. This is an invalid block number
369 * since we don't ever expect logs to get this large.
370 */
371STATIC int
372xlog_find_verify_cycle(
373 xlog_t *log,
374 xfs_daddr_t start_blk,
375 int nbblks,
376 uint stop_on_cycle_no,
377 xfs_daddr_t *new_blk)
378{
379 xfs_daddr_t i, j;
380 uint cycle;
381 xfs_buf_t *bp;
382 xfs_daddr_t bufblks;
383 xfs_caddr_t buf = NULL;
384 int error = 0;
385
386 bufblks = 1 << ffs(nbblks);
387
388 while (!(bp = xlog_get_bp(log, bufblks))) {
389 /* can't get enough memory to do everything in one big buffer */
390 bufblks >>= 1;
391 if (bufblks <= log->l_sectbb_log)
392 return ENOMEM;
393 }
394
395 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
396 int bcount;
397
398 bcount = min(bufblks, (start_blk + nbblks - i));
399
076e6acb
CH
400 error = xlog_bread(log, i, bcount, bp, &buf);
401 if (error)
1da177e4
LT
402 goto out;
403
1da177e4 404 for (j = 0; j < bcount; j++) {
03bea6fe 405 cycle = xlog_get_cycle(buf);
1da177e4
LT
406 if (cycle == stop_on_cycle_no) {
407 *new_blk = i+j;
408 goto out;
409 }
410
411 buf += BBSIZE;
412 }
413 }
414
415 *new_blk = -1;
416
417out:
418 xlog_put_bp(bp);
419 return error;
420}
421
422/*
423 * Potentially backup over partial log record write.
424 *
425 * In the typical case, last_blk is the number of the block directly after
426 * a good log record. Therefore, we subtract one to get the block number
427 * of the last block in the given buffer. extra_bblks contains the number
428 * of blocks we would have read on a previous read. This happens when the
429 * last log record is split over the end of the physical log.
430 *
431 * extra_bblks is the number of blocks potentially verified on a previous
432 * call to this routine.
433 */
434STATIC int
435xlog_find_verify_log_record(
436 xlog_t *log,
437 xfs_daddr_t start_blk,
438 xfs_daddr_t *last_blk,
439 int extra_bblks)
440{
441 xfs_daddr_t i;
442 xfs_buf_t *bp;
443 xfs_caddr_t offset = NULL;
444 xlog_rec_header_t *head = NULL;
445 int error = 0;
446 int smallmem = 0;
447 int num_blks = *last_blk - start_blk;
448 int xhdrs;
449
450 ASSERT(start_blk != 0 || *last_blk != start_blk);
451
452 if (!(bp = xlog_get_bp(log, num_blks))) {
453 if (!(bp = xlog_get_bp(log, 1)))
454 return ENOMEM;
455 smallmem = 1;
456 } else {
076e6acb
CH
457 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
458 if (error)
1da177e4 459 goto out;
1da177e4
LT
460 offset += ((num_blks - 1) << BBSHIFT);
461 }
462
463 for (i = (*last_blk) - 1; i >= 0; i--) {
464 if (i < start_blk) {
465 /* valid log record not found */
466 xlog_warn(
467 "XFS: Log inconsistent (didn't find previous header)");
468 ASSERT(0);
469 error = XFS_ERROR(EIO);
470 goto out;
471 }
472
473 if (smallmem) {
076e6acb
CH
474 error = xlog_bread(log, i, 1, bp, &offset);
475 if (error)
1da177e4 476 goto out;
1da177e4
LT
477 }
478
479 head = (xlog_rec_header_t *)offset;
480
b53e675d 481 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
1da177e4
LT
482 break;
483
484 if (!smallmem)
485 offset -= BBSIZE;
486 }
487
488 /*
489 * We hit the beginning of the physical log & still no header. Return
490 * to caller. If caller can handle a return of -1, then this routine
491 * will be called again for the end of the physical log.
492 */
493 if (i == -1) {
494 error = -1;
495 goto out;
496 }
497
498 /*
499 * We have the final block of the good log (the first block
500 * of the log record _before_ the head. So we check the uuid.
501 */
502 if ((error = xlog_header_check_mount(log->l_mp, head)))
503 goto out;
504
505 /*
506 * We may have found a log record header before we expected one.
507 * last_blk will be the 1st block # with a given cycle #. We may end
508 * up reading an entire log record. In this case, we don't want to
509 * reset last_blk. Only when last_blk points in the middle of a log
510 * record do we update last_blk.
511 */
62118709 512 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
b53e675d 513 uint h_size = be32_to_cpu(head->h_size);
1da177e4
LT
514
515 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
516 if (h_size % XLOG_HEADER_CYCLE_SIZE)
517 xhdrs++;
518 } else {
519 xhdrs = 1;
520 }
521
b53e675d
CH
522 if (*last_blk - i + extra_bblks !=
523 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
1da177e4
LT
524 *last_blk = i;
525
526out:
527 xlog_put_bp(bp);
528 return error;
529}
530
531/*
532 * Head is defined to be the point of the log where the next log write
533 * write could go. This means that incomplete LR writes at the end are
534 * eliminated when calculating the head. We aren't guaranteed that previous
535 * LR have complete transactions. We only know that a cycle number of
536 * current cycle number -1 won't be present in the log if we start writing
537 * from our current block number.
538 *
539 * last_blk contains the block number of the first block with a given
540 * cycle number.
541 *
542 * Return: zero if normal, non-zero if error.
543 */
ba0f32d4 544STATIC int
1da177e4
LT
545xlog_find_head(
546 xlog_t *log,
547 xfs_daddr_t *return_head_blk)
548{
549 xfs_buf_t *bp;
550 xfs_caddr_t offset;
551 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
552 int num_scan_bblks;
553 uint first_half_cycle, last_half_cycle;
554 uint stop_on_cycle;
555 int error, log_bbnum = log->l_logBBsize;
556
557 /* Is the end of the log device zeroed? */
558 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
559 *return_head_blk = first_blk;
560
561 /* Is the whole lot zeroed? */
562 if (!first_blk) {
563 /* Linux XFS shouldn't generate totally zeroed logs -
564 * mkfs etc write a dummy unmount record to a fresh
565 * log so we can store the uuid in there
566 */
567 xlog_warn("XFS: totally zeroed log");
568 }
569
570 return 0;
571 } else if (error) {
572 xlog_warn("XFS: empty log check failed");
573 return error;
574 }
575
576 first_blk = 0; /* get cycle # of 1st block */
577 bp = xlog_get_bp(log, 1);
578 if (!bp)
579 return ENOMEM;
076e6acb
CH
580
581 error = xlog_bread(log, 0, 1, bp, &offset);
582 if (error)
1da177e4 583 goto bp_err;
076e6acb 584
03bea6fe 585 first_half_cycle = xlog_get_cycle(offset);
1da177e4
LT
586
587 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
076e6acb
CH
588 error = xlog_bread(log, last_blk, 1, bp, &offset);
589 if (error)
1da177e4 590 goto bp_err;
076e6acb 591
03bea6fe 592 last_half_cycle = xlog_get_cycle(offset);
1da177e4
LT
593 ASSERT(last_half_cycle != 0);
594
595 /*
596 * If the 1st half cycle number is equal to the last half cycle number,
597 * then the entire log is stamped with the same cycle number. In this
598 * case, head_blk can't be set to zero (which makes sense). The below
599 * math doesn't work out properly with head_blk equal to zero. Instead,
600 * we set it to log_bbnum which is an invalid block number, but this
601 * value makes the math correct. If head_blk doesn't changed through
602 * all the tests below, *head_blk is set to zero at the very end rather
603 * than log_bbnum. In a sense, log_bbnum and zero are the same block
604 * in a circular file.
605 */
606 if (first_half_cycle == last_half_cycle) {
607 /*
608 * In this case we believe that the entire log should have
609 * cycle number last_half_cycle. We need to scan backwards
610 * from the end verifying that there are no holes still
611 * containing last_half_cycle - 1. If we find such a hole,
612 * then the start of that hole will be the new head. The
613 * simple case looks like
614 * x | x ... | x - 1 | x
615 * Another case that fits this picture would be
616 * x | x + 1 | x ... | x
c41564b5 617 * In this case the head really is somewhere at the end of the
1da177e4
LT
618 * log, as one of the latest writes at the beginning was
619 * incomplete.
620 * One more case is
621 * x | x + 1 | x ... | x - 1 | x
622 * This is really the combination of the above two cases, and
623 * the head has to end up at the start of the x-1 hole at the
624 * end of the log.
625 *
626 * In the 256k log case, we will read from the beginning to the
627 * end of the log and search for cycle numbers equal to x-1.
628 * We don't worry about the x+1 blocks that we encounter,
629 * because we know that they cannot be the head since the log
630 * started with x.
631 */
632 head_blk = log_bbnum;
633 stop_on_cycle = last_half_cycle - 1;
634 } else {
635 /*
636 * In this case we want to find the first block with cycle
637 * number matching last_half_cycle. We expect the log to be
638 * some variation on
639 * x + 1 ... | x ...
640 * The first block with cycle number x (last_half_cycle) will
641 * be where the new head belongs. First we do a binary search
642 * for the first occurrence of last_half_cycle. The binary
643 * search may not be totally accurate, so then we scan back
644 * from there looking for occurrences of last_half_cycle before
645 * us. If that backwards scan wraps around the beginning of
646 * the log, then we look for occurrences of last_half_cycle - 1
647 * at the end of the log. The cases we're looking for look
648 * like
649 * x + 1 ... | x | x + 1 | x ...
650 * ^ binary search stopped here
651 * or
652 * x + 1 ... | x ... | x - 1 | x
653 * <---------> less than scan distance
654 */
655 stop_on_cycle = last_half_cycle;
656 if ((error = xlog_find_cycle_start(log, bp, first_blk,
657 &head_blk, last_half_cycle)))
658 goto bp_err;
659 }
660
661 /*
662 * Now validate the answer. Scan back some number of maximum possible
663 * blocks and make sure each one has the expected cycle number. The
664 * maximum is determined by the total possible amount of buffering
665 * in the in-core log. The following number can be made tighter if
666 * we actually look at the block size of the filesystem.
667 */
668 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
669 if (head_blk >= num_scan_bblks) {
670 /*
671 * We are guaranteed that the entire check can be performed
672 * in one buffer.
673 */
674 start_blk = head_blk - num_scan_bblks;
675 if ((error = xlog_find_verify_cycle(log,
676 start_blk, num_scan_bblks,
677 stop_on_cycle, &new_blk)))
678 goto bp_err;
679 if (new_blk != -1)
680 head_blk = new_blk;
681 } else { /* need to read 2 parts of log */
682 /*
683 * We are going to scan backwards in the log in two parts.
684 * First we scan the physical end of the log. In this part
685 * of the log, we are looking for blocks with cycle number
686 * last_half_cycle - 1.
687 * If we find one, then we know that the log starts there, as
688 * we've found a hole that didn't get written in going around
689 * the end of the physical log. The simple case for this is
690 * x + 1 ... | x ... | x - 1 | x
691 * <---------> less than scan distance
692 * If all of the blocks at the end of the log have cycle number
693 * last_half_cycle, then we check the blocks at the start of
694 * the log looking for occurrences of last_half_cycle. If we
695 * find one, then our current estimate for the location of the
696 * first occurrence of last_half_cycle is wrong and we move
697 * back to the hole we've found. This case looks like
698 * x + 1 ... | x | x + 1 | x ...
699 * ^ binary search stopped here
700 * Another case we need to handle that only occurs in 256k
701 * logs is
702 * x + 1 ... | x ... | x+1 | x ...
703 * ^ binary search stops here
704 * In a 256k log, the scan at the end of the log will see the
705 * x + 1 blocks. We need to skip past those since that is
706 * certainly not the head of the log. By searching for
707 * last_half_cycle-1 we accomplish that.
708 */
709 start_blk = log_bbnum - num_scan_bblks + head_blk;
710 ASSERT(head_blk <= INT_MAX &&
711 (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
712 if ((error = xlog_find_verify_cycle(log, start_blk,
713 num_scan_bblks - (int)head_blk,
714 (stop_on_cycle - 1), &new_blk)))
715 goto bp_err;
716 if (new_blk != -1) {
717 head_blk = new_blk;
718 goto bad_blk;
719 }
720
721 /*
722 * Scan beginning of log now. The last part of the physical
723 * log is good. This scan needs to verify that it doesn't find
724 * the last_half_cycle.
725 */
726 start_blk = 0;
727 ASSERT(head_blk <= INT_MAX);
728 if ((error = xlog_find_verify_cycle(log,
729 start_blk, (int)head_blk,
730 stop_on_cycle, &new_blk)))
731 goto bp_err;
732 if (new_blk != -1)
733 head_blk = new_blk;
734 }
735
736 bad_blk:
737 /*
738 * Now we need to make sure head_blk is not pointing to a block in
739 * the middle of a log record.
740 */
741 num_scan_bblks = XLOG_REC_SHIFT(log);
742 if (head_blk >= num_scan_bblks) {
743 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
744
745 /* start ptr at last block ptr before head_blk */
746 if ((error = xlog_find_verify_log_record(log, start_blk,
747 &head_blk, 0)) == -1) {
748 error = XFS_ERROR(EIO);
749 goto bp_err;
750 } else if (error)
751 goto bp_err;
752 } else {
753 start_blk = 0;
754 ASSERT(head_blk <= INT_MAX);
755 if ((error = xlog_find_verify_log_record(log, start_blk,
756 &head_blk, 0)) == -1) {
757 /* We hit the beginning of the log during our search */
758 start_blk = log_bbnum - num_scan_bblks + head_blk;
759 new_blk = log_bbnum;
760 ASSERT(start_blk <= INT_MAX &&
761 (xfs_daddr_t) log_bbnum-start_blk >= 0);
762 ASSERT(head_blk <= INT_MAX);
763 if ((error = xlog_find_verify_log_record(log,
764 start_blk, &new_blk,
765 (int)head_blk)) == -1) {
766 error = XFS_ERROR(EIO);
767 goto bp_err;
768 } else if (error)
769 goto bp_err;
770 if (new_blk != log_bbnum)
771 head_blk = new_blk;
772 } else if (error)
773 goto bp_err;
774 }
775
776 xlog_put_bp(bp);
777 if (head_blk == log_bbnum)
778 *return_head_blk = 0;
779 else
780 *return_head_blk = head_blk;
781 /*
782 * When returning here, we have a good block number. Bad block
783 * means that during a previous crash, we didn't have a clean break
784 * from cycle number N to cycle number N-1. In this case, we need
785 * to find the first block with cycle number N-1.
786 */
787 return 0;
788
789 bp_err:
790 xlog_put_bp(bp);
791
792 if (error)
793 xlog_warn("XFS: failed to find log head");
794 return error;
795}
796
797/*
798 * Find the sync block number or the tail of the log.
799 *
800 * This will be the block number of the last record to have its
801 * associated buffers synced to disk. Every log record header has
802 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
803 * to get a sync block number. The only concern is to figure out which
804 * log record header to believe.
805 *
806 * The following algorithm uses the log record header with the largest
807 * lsn. The entire log record does not need to be valid. We only care
808 * that the header is valid.
809 *
810 * We could speed up search by using current head_blk buffer, but it is not
811 * available.
812 */
813int
814xlog_find_tail(
815 xlog_t *log,
816 xfs_daddr_t *head_blk,
65be6054 817 xfs_daddr_t *tail_blk)
1da177e4
LT
818{
819 xlog_rec_header_t *rhead;
820 xlog_op_header_t *op_head;
821 xfs_caddr_t offset = NULL;
822 xfs_buf_t *bp;
823 int error, i, found;
824 xfs_daddr_t umount_data_blk;
825 xfs_daddr_t after_umount_blk;
826 xfs_lsn_t tail_lsn;
827 int hblks;
828
829 found = 0;
830
831 /*
832 * Find previous log record
833 */
834 if ((error = xlog_find_head(log, head_blk)))
835 return error;
836
837 bp = xlog_get_bp(log, 1);
838 if (!bp)
839 return ENOMEM;
840 if (*head_blk == 0) { /* special case */
076e6acb
CH
841 error = xlog_bread(log, 0, 1, bp, &offset);
842 if (error)
1da177e4 843 goto bread_err;
076e6acb 844
03bea6fe 845 if (xlog_get_cycle(offset) == 0) {
1da177e4
LT
846 *tail_blk = 0;
847 /* leave all other log inited values alone */
848 goto exit;
849 }
850 }
851
852 /*
853 * Search backwards looking for log record header block
854 */
855 ASSERT(*head_blk < INT_MAX);
856 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
076e6acb
CH
857 error = xlog_bread(log, i, 1, bp, &offset);
858 if (error)
1da177e4 859 goto bread_err;
076e6acb 860
b53e675d 861 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
1da177e4
LT
862 found = 1;
863 break;
864 }
865 }
866 /*
867 * If we haven't found the log record header block, start looking
868 * again from the end of the physical log. XXXmiken: There should be
869 * a check here to make sure we didn't search more than N blocks in
870 * the previous code.
871 */
872 if (!found) {
873 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
076e6acb
CH
874 error = xlog_bread(log, i, 1, bp, &offset);
875 if (error)
1da177e4 876 goto bread_err;
076e6acb 877
1da177e4 878 if (XLOG_HEADER_MAGIC_NUM ==
b53e675d 879 be32_to_cpu(*(__be32 *)offset)) {
1da177e4
LT
880 found = 2;
881 break;
882 }
883 }
884 }
885 if (!found) {
886 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
887 ASSERT(0);
888 return XFS_ERROR(EIO);
889 }
890
891 /* find blk_no of tail of log */
892 rhead = (xlog_rec_header_t *)offset;
b53e675d 893 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1da177e4
LT
894
895 /*
896 * Reset log values according to the state of the log when we
897 * crashed. In the case where head_blk == 0, we bump curr_cycle
898 * one because the next write starts a new cycle rather than
899 * continuing the cycle of the last good log record. At this
900 * point we have guaranteed that all partial log records have been
901 * accounted for. Therefore, we know that the last good log record
902 * written was complete and ended exactly on the end boundary
903 * of the physical log.
904 */
905 log->l_prev_block = i;
906 log->l_curr_block = (int)*head_blk;
b53e675d 907 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1da177e4
LT
908 if (found == 2)
909 log->l_curr_cycle++;
b53e675d
CH
910 log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
911 log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
1da177e4
LT
912 log->l_grant_reserve_cycle = log->l_curr_cycle;
913 log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
914 log->l_grant_write_cycle = log->l_curr_cycle;
915 log->l_grant_write_bytes = BBTOB(log->l_curr_block);
916
917 /*
918 * Look for unmount record. If we find it, then we know there
919 * was a clean unmount. Since 'i' could be the last block in
920 * the physical log, we convert to a log block before comparing
921 * to the head_blk.
922 *
923 * Save the current tail lsn to use to pass to
924 * xlog_clear_stale_blocks() below. We won't want to clear the
925 * unmount record if there is one, so we pass the lsn of the
926 * unmount record rather than the block after it.
927 */
62118709 928 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
b53e675d
CH
929 int h_size = be32_to_cpu(rhead->h_size);
930 int h_version = be32_to_cpu(rhead->h_version);
1da177e4
LT
931
932 if ((h_version & XLOG_VERSION_2) &&
933 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
934 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
935 if (h_size % XLOG_HEADER_CYCLE_SIZE)
936 hblks++;
937 } else {
938 hblks = 1;
939 }
940 } else {
941 hblks = 1;
942 }
943 after_umount_blk = (i + hblks + (int)
b53e675d 944 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1da177e4
LT
945 tail_lsn = log->l_tail_lsn;
946 if (*head_blk == after_umount_blk &&
b53e675d 947 be32_to_cpu(rhead->h_num_logops) == 1) {
1da177e4 948 umount_data_blk = (i + hblks) % log->l_logBBsize;
076e6acb
CH
949 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
950 if (error)
1da177e4 951 goto bread_err;
076e6acb 952
1da177e4
LT
953 op_head = (xlog_op_header_t *)offset;
954 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
955 /*
956 * Set tail and last sync so that newly written
957 * log records will point recovery to after the
958 * current unmount record.
959 */
03bea6fe
CH
960 log->l_tail_lsn =
961 xlog_assign_lsn(log->l_curr_cycle,
962 after_umount_blk);
963 log->l_last_sync_lsn =
964 xlog_assign_lsn(log->l_curr_cycle,
965 after_umount_blk);
1da177e4 966 *tail_blk = after_umount_blk;
92821e2b
DC
967
968 /*
969 * Note that the unmount was clean. If the unmount
970 * was not clean, we need to know this to rebuild the
971 * superblock counters from the perag headers if we
972 * have a filesystem using non-persistent counters.
973 */
974 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1da177e4
LT
975 }
976 }
977
978 /*
979 * Make sure that there are no blocks in front of the head
980 * with the same cycle number as the head. This can happen
981 * because we allow multiple outstanding log writes concurrently,
982 * and the later writes might make it out before earlier ones.
983 *
984 * We use the lsn from before modifying it so that we'll never
985 * overwrite the unmount record after a clean unmount.
986 *
987 * Do this only if we are going to recover the filesystem
988 *
989 * NOTE: This used to say "if (!readonly)"
990 * However on Linux, we can & do recover a read-only filesystem.
991 * We only skip recovery if NORECOVERY is specified on mount,
992 * in which case we would not be here.
993 *
994 * But... if the -device- itself is readonly, just skip this.
995 * We can't recover this device anyway, so it won't matter.
996 */
997 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
998 error = xlog_clear_stale_blocks(log, tail_lsn);
999 }
1000
1001bread_err:
1002exit:
1003 xlog_put_bp(bp);
1004
1005 if (error)
1006 xlog_warn("XFS: failed to locate log tail");
1007 return error;
1008}
1009
1010/*
1011 * Is the log zeroed at all?
1012 *
1013 * The last binary search should be changed to perform an X block read
1014 * once X becomes small enough. You can then search linearly through
1015 * the X blocks. This will cut down on the number of reads we need to do.
1016 *
1017 * If the log is partially zeroed, this routine will pass back the blkno
1018 * of the first block with cycle number 0. It won't have a complete LR
1019 * preceding it.
1020 *
1021 * Return:
1022 * 0 => the log is completely written to
1023 * -1 => use *blk_no as the first block of the log
1024 * >0 => error has occurred
1025 */
a8272ce0 1026STATIC int
1da177e4
LT
1027xlog_find_zeroed(
1028 xlog_t *log,
1029 xfs_daddr_t *blk_no)
1030{
1031 xfs_buf_t *bp;
1032 xfs_caddr_t offset;
1033 uint first_cycle, last_cycle;
1034 xfs_daddr_t new_blk, last_blk, start_blk;
1035 xfs_daddr_t num_scan_bblks;
1036 int error, log_bbnum = log->l_logBBsize;
1037
6fdf8ccc
NS
1038 *blk_no = 0;
1039
1da177e4
LT
1040 /* check totally zeroed log */
1041 bp = xlog_get_bp(log, 1);
1042 if (!bp)
1043 return ENOMEM;
076e6acb
CH
1044 error = xlog_bread(log, 0, 1, bp, &offset);
1045 if (error)
1da177e4 1046 goto bp_err;
076e6acb 1047
03bea6fe 1048 first_cycle = xlog_get_cycle(offset);
1da177e4
LT
1049 if (first_cycle == 0) { /* completely zeroed log */
1050 *blk_no = 0;
1051 xlog_put_bp(bp);
1052 return -1;
1053 }
1054
1055 /* check partially zeroed log */
076e6acb
CH
1056 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1057 if (error)
1da177e4 1058 goto bp_err;
076e6acb 1059
03bea6fe 1060 last_cycle = xlog_get_cycle(offset);
1da177e4
LT
1061 if (last_cycle != 0) { /* log completely written to */
1062 xlog_put_bp(bp);
1063 return 0;
1064 } else if (first_cycle != 1) {
1065 /*
1066 * If the cycle of the last block is zero, the cycle of
1067 * the first block must be 1. If it's not, maybe we're
1068 * not looking at a log... Bail out.
1069 */
1070 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1071 return XFS_ERROR(EINVAL);
1072 }
1073
1074 /* we have a partially zeroed log */
1075 last_blk = log_bbnum-1;
1076 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1077 goto bp_err;
1078
1079 /*
1080 * Validate the answer. Because there is no way to guarantee that
1081 * the entire log is made up of log records which are the same size,
1082 * we scan over the defined maximum blocks. At this point, the maximum
1083 * is not chosen to mean anything special. XXXmiken
1084 */
1085 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1086 ASSERT(num_scan_bblks <= INT_MAX);
1087
1088 if (last_blk < num_scan_bblks)
1089 num_scan_bblks = last_blk;
1090 start_blk = last_blk - num_scan_bblks;
1091
1092 /*
1093 * We search for any instances of cycle number 0 that occur before
1094 * our current estimate of the head. What we're trying to detect is
1095 * 1 ... | 0 | 1 | 0...
1096 * ^ binary search ends here
1097 */
1098 if ((error = xlog_find_verify_cycle(log, start_blk,
1099 (int)num_scan_bblks, 0, &new_blk)))
1100 goto bp_err;
1101 if (new_blk != -1)
1102 last_blk = new_blk;
1103
1104 /*
1105 * Potentially backup over partial log record write. We don't need
1106 * to search the end of the log because we know it is zero.
1107 */
1108 if ((error = xlog_find_verify_log_record(log, start_blk,
1109 &last_blk, 0)) == -1) {
1110 error = XFS_ERROR(EIO);
1111 goto bp_err;
1112 } else if (error)
1113 goto bp_err;
1114
1115 *blk_no = last_blk;
1116bp_err:
1117 xlog_put_bp(bp);
1118 if (error)
1119 return error;
1120 return -1;
1121}
1122
1123/*
1124 * These are simple subroutines used by xlog_clear_stale_blocks() below
1125 * to initialize a buffer full of empty log record headers and write
1126 * them into the log.
1127 */
1128STATIC void
1129xlog_add_record(
1130 xlog_t *log,
1131 xfs_caddr_t buf,
1132 int cycle,
1133 int block,
1134 int tail_cycle,
1135 int tail_block)
1136{
1137 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1138
1139 memset(buf, 0, BBSIZE);
b53e675d
CH
1140 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1141 recp->h_cycle = cpu_to_be32(cycle);
1142 recp->h_version = cpu_to_be32(
62118709 1143 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
b53e675d
CH
1144 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1145 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1146 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1da177e4
LT
1147 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1148}
1149
1150STATIC int
1151xlog_write_log_records(
1152 xlog_t *log,
1153 int cycle,
1154 int start_block,
1155 int blocks,
1156 int tail_cycle,
1157 int tail_block)
1158{
1159 xfs_caddr_t offset;
1160 xfs_buf_t *bp;
1161 int balign, ealign;
1162 int sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
1163 int end_block = start_block + blocks;
1164 int bufblks;
1165 int error = 0;
1166 int i, j = 0;
1167
1168 bufblks = 1 << ffs(blocks);
1169 while (!(bp = xlog_get_bp(log, bufblks))) {
1170 bufblks >>= 1;
1171 if (bufblks <= log->l_sectbb_log)
1172 return ENOMEM;
1173 }
1174
1175 /* We may need to do a read at the start to fill in part of
1176 * the buffer in the starting sector not covered by the first
1177 * write below.
1178 */
1179 balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
1180 if (balign != start_block) {
076e6acb
CH
1181 error = xlog_bread_noalign(log, start_block, 1, bp);
1182 if (error)
1183 goto out_put_bp;
1184
1da177e4
LT
1185 j = start_block - balign;
1186 }
1187
1188 for (i = start_block; i < end_block; i += bufblks) {
1189 int bcount, endcount;
1190
1191 bcount = min(bufblks, end_block - start_block);
1192 endcount = bcount - j;
1193
1194 /* We may need to do a read at the end to fill in part of
1195 * the buffer in the final sector not covered by the write.
1196 * If this is the same sector as the above read, skip it.
1197 */
1198 ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
1199 if (j == 0 && (start_block + endcount > ealign)) {
1200 offset = XFS_BUF_PTR(bp);
1201 balign = BBTOB(ealign - start_block);
234f56ac
DC
1202 error = XFS_BUF_SET_PTR(bp, offset + balign,
1203 BBTOB(sectbb));
076e6acb
CH
1204 if (error)
1205 break;
1206
1207 error = xlog_bread_noalign(log, ealign, sectbb, bp);
1208 if (error)
1209 break;
1210
1211 error = XFS_BUF_SET_PTR(bp, offset, bufblks);
234f56ac 1212 if (error)
1da177e4 1213 break;
1da177e4
LT
1214 }
1215
1216 offset = xlog_align(log, start_block, endcount, bp);
1217 for (; j < endcount; j++) {
1218 xlog_add_record(log, offset, cycle, i+j,
1219 tail_cycle, tail_block);
1220 offset += BBSIZE;
1221 }
1222 error = xlog_bwrite(log, start_block, endcount, bp);
1223 if (error)
1224 break;
1225 start_block += endcount;
1226 j = 0;
1227 }
076e6acb
CH
1228
1229 out_put_bp:
1da177e4
LT
1230 xlog_put_bp(bp);
1231 return error;
1232}
1233
1234/*
1235 * This routine is called to blow away any incomplete log writes out
1236 * in front of the log head. We do this so that we won't become confused
1237 * if we come up, write only a little bit more, and then crash again.
1238 * If we leave the partial log records out there, this situation could
1239 * cause us to think those partial writes are valid blocks since they
1240 * have the current cycle number. We get rid of them by overwriting them
1241 * with empty log records with the old cycle number rather than the
1242 * current one.
1243 *
1244 * The tail lsn is passed in rather than taken from
1245 * the log so that we will not write over the unmount record after a
1246 * clean unmount in a 512 block log. Doing so would leave the log without
1247 * any valid log records in it until a new one was written. If we crashed
1248 * during that time we would not be able to recover.
1249 */
1250STATIC int
1251xlog_clear_stale_blocks(
1252 xlog_t *log,
1253 xfs_lsn_t tail_lsn)
1254{
1255 int tail_cycle, head_cycle;
1256 int tail_block, head_block;
1257 int tail_distance, max_distance;
1258 int distance;
1259 int error;
1260
1261 tail_cycle = CYCLE_LSN(tail_lsn);
1262 tail_block = BLOCK_LSN(tail_lsn);
1263 head_cycle = log->l_curr_cycle;
1264 head_block = log->l_curr_block;
1265
1266 /*
1267 * Figure out the distance between the new head of the log
1268 * and the tail. We want to write over any blocks beyond the
1269 * head that we may have written just before the crash, but
1270 * we don't want to overwrite the tail of the log.
1271 */
1272 if (head_cycle == tail_cycle) {
1273 /*
1274 * The tail is behind the head in the physical log,
1275 * so the distance from the head to the tail is the
1276 * distance from the head to the end of the log plus
1277 * the distance from the beginning of the log to the
1278 * tail.
1279 */
1280 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1281 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1282 XFS_ERRLEVEL_LOW, log->l_mp);
1283 return XFS_ERROR(EFSCORRUPTED);
1284 }
1285 tail_distance = tail_block + (log->l_logBBsize - head_block);
1286 } else {
1287 /*
1288 * The head is behind the tail in the physical log,
1289 * so the distance from the head to the tail is just
1290 * the tail block minus the head block.
1291 */
1292 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1293 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1294 XFS_ERRLEVEL_LOW, log->l_mp);
1295 return XFS_ERROR(EFSCORRUPTED);
1296 }
1297 tail_distance = tail_block - head_block;
1298 }
1299
1300 /*
1301 * If the head is right up against the tail, we can't clear
1302 * anything.
1303 */
1304 if (tail_distance <= 0) {
1305 ASSERT(tail_distance == 0);
1306 return 0;
1307 }
1308
1309 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1310 /*
1311 * Take the smaller of the maximum amount of outstanding I/O
1312 * we could have and the distance to the tail to clear out.
1313 * We take the smaller so that we don't overwrite the tail and
1314 * we don't waste all day writing from the head to the tail
1315 * for no reason.
1316 */
1317 max_distance = MIN(max_distance, tail_distance);
1318
1319 if ((head_block + max_distance) <= log->l_logBBsize) {
1320 /*
1321 * We can stomp all the blocks we need to without
1322 * wrapping around the end of the log. Just do it
1323 * in a single write. Use the cycle number of the
1324 * current cycle minus one so that the log will look like:
1325 * n ... | n - 1 ...
1326 */
1327 error = xlog_write_log_records(log, (head_cycle - 1),
1328 head_block, max_distance, tail_cycle,
1329 tail_block);
1330 if (error)
1331 return error;
1332 } else {
1333 /*
1334 * We need to wrap around the end of the physical log in
1335 * order to clear all the blocks. Do it in two separate
1336 * I/Os. The first write should be from the head to the
1337 * end of the physical log, and it should use the current
1338 * cycle number minus one just like above.
1339 */
1340 distance = log->l_logBBsize - head_block;
1341 error = xlog_write_log_records(log, (head_cycle - 1),
1342 head_block, distance, tail_cycle,
1343 tail_block);
1344
1345 if (error)
1346 return error;
1347
1348 /*
1349 * Now write the blocks at the start of the physical log.
1350 * This writes the remainder of the blocks we want to clear.
1351 * It uses the current cycle number since we're now on the
1352 * same cycle as the head so that we get:
1353 * n ... n ... | n - 1 ...
1354 * ^^^^^ blocks we're writing
1355 */
1356 distance = max_distance - (log->l_logBBsize - head_block);
1357 error = xlog_write_log_records(log, head_cycle, 0, distance,
1358 tail_cycle, tail_block);
1359 if (error)
1360 return error;
1361 }
1362
1363 return 0;
1364}
1365
1366/******************************************************************************
1367 *
1368 * Log recover routines
1369 *
1370 ******************************************************************************
1371 */
1372
1373STATIC xlog_recover_t *
1374xlog_recover_find_tid(
1375 xlog_recover_t *q,
1376 xlog_tid_t tid)
1377{
1378 xlog_recover_t *p = q;
1379
1380 while (p != NULL) {
1381 if (p->r_log_tid == tid)
1382 break;
1383 p = p->r_next;
1384 }
1385 return p;
1386}
1387
1388STATIC void
1389xlog_recover_put_hashq(
1390 xlog_recover_t **q,
1391 xlog_recover_t *trans)
1392{
1393 trans->r_next = *q;
1394 *q = trans;
1395}
1396
1397STATIC void
1398xlog_recover_add_item(
1399 xlog_recover_item_t **itemq)
1400{
1401 xlog_recover_item_t *item;
1402
1403 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1404 xlog_recover_insert_item_backq(itemq, item);
1405}
1406
1407STATIC int
1408xlog_recover_add_to_cont_trans(
1409 xlog_recover_t *trans,
1410 xfs_caddr_t dp,
1411 int len)
1412{
1413 xlog_recover_item_t *item;
1414 xfs_caddr_t ptr, old_ptr;
1415 int old_len;
1416
1417 item = trans->r_itemq;
4b80916b 1418 if (item == NULL) {
1da177e4
LT
1419 /* finish copying rest of trans header */
1420 xlog_recover_add_item(&trans->r_itemq);
1421 ptr = (xfs_caddr_t) &trans->r_theader +
1422 sizeof(xfs_trans_header_t) - len;
1423 memcpy(ptr, dp, len); /* d, s, l */
1424 return 0;
1425 }
1426 item = item->ri_prev;
1427
1428 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1429 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1430
760dea67 1431 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1da177e4
LT
1432 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1433 item->ri_buf[item->ri_cnt-1].i_len += len;
1434 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1435 return 0;
1436}
1437
1438/*
1439 * The next region to add is the start of a new region. It could be
1440 * a whole region or it could be the first part of a new region. Because
1441 * of this, the assumption here is that the type and size fields of all
1442 * format structures fit into the first 32 bits of the structure.
1443 *
1444 * This works because all regions must be 32 bit aligned. Therefore, we
1445 * either have both fields or we have neither field. In the case we have
1446 * neither field, the data part of the region is zero length. We only have
1447 * a log_op_header and can throw away the header since a new one will appear
1448 * later. If we have at least 4 bytes, then we can determine how many regions
1449 * will appear in the current log item.
1450 */
1451STATIC int
1452xlog_recover_add_to_trans(
1453 xlog_recover_t *trans,
1454 xfs_caddr_t dp,
1455 int len)
1456{
1457 xfs_inode_log_format_t *in_f; /* any will do */
1458 xlog_recover_item_t *item;
1459 xfs_caddr_t ptr;
1460
1461 if (!len)
1462 return 0;
1463 item = trans->r_itemq;
4b80916b 1464 if (item == NULL) {
5a792c45
DC
1465 /* we need to catch log corruptions here */
1466 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1467 xlog_warn("XFS: xlog_recover_add_to_trans: "
1468 "bad header magic number");
1469 ASSERT(0);
1470 return XFS_ERROR(EIO);
1471 }
1da177e4
LT
1472 if (len == sizeof(xfs_trans_header_t))
1473 xlog_recover_add_item(&trans->r_itemq);
1474 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1475 return 0;
1476 }
1477
1478 ptr = kmem_alloc(len, KM_SLEEP);
1479 memcpy(ptr, dp, len);
1480 in_f = (xfs_inode_log_format_t *)ptr;
1481
1482 if (item->ri_prev->ri_total != 0 &&
1483 item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
1484 xlog_recover_add_item(&trans->r_itemq);
1485 }
1486 item = trans->r_itemq;
1487 item = item->ri_prev;
1488
1489 if (item->ri_total == 0) { /* first region to be added */
e8fa6b48
CH
1490 if (in_f->ilf_size == 0 ||
1491 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1492 xlog_warn(
1493 "XFS: bad number of regions (%d) in inode log format",
1494 in_f->ilf_size);
1495 ASSERT(0);
1496 return XFS_ERROR(EIO);
1497 }
1498
1499 item->ri_total = in_f->ilf_size;
1500 item->ri_buf =
1501 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1502 KM_SLEEP);
1da177e4
LT
1503 }
1504 ASSERT(item->ri_total > item->ri_cnt);
1505 /* Description region is ri_buf[0] */
1506 item->ri_buf[item->ri_cnt].i_addr = ptr;
1507 item->ri_buf[item->ri_cnt].i_len = len;
1508 item->ri_cnt++;
1509 return 0;
1510}
1511
1512STATIC void
1513xlog_recover_new_tid(
1514 xlog_recover_t **q,
1515 xlog_tid_t tid,
1516 xfs_lsn_t lsn)
1517{
1518 xlog_recover_t *trans;
1519
1520 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1521 trans->r_log_tid = tid;
1522 trans->r_lsn = lsn;
1523 xlog_recover_put_hashq(q, trans);
1524}
1525
1526STATIC int
1527xlog_recover_unlink_tid(
1528 xlog_recover_t **q,
1529 xlog_recover_t *trans)
1530{
1531 xlog_recover_t *tp;
1532 int found = 0;
1533
4b80916b 1534 ASSERT(trans != NULL);
1da177e4
LT
1535 if (trans == *q) {
1536 *q = (*q)->r_next;
1537 } else {
1538 tp = *q;
4b80916b 1539 while (tp) {
1da177e4
LT
1540 if (tp->r_next == trans) {
1541 found = 1;
1542 break;
1543 }
1544 tp = tp->r_next;
1545 }
1546 if (!found) {
1547 xlog_warn(
1548 "XFS: xlog_recover_unlink_tid: trans not found");
1549 ASSERT(0);
1550 return XFS_ERROR(EIO);
1551 }
1552 tp->r_next = tp->r_next->r_next;
1553 }
1554 return 0;
1555}
1556
1557STATIC void
1558xlog_recover_insert_item_backq(
1559 xlog_recover_item_t **q,
1560 xlog_recover_item_t *item)
1561{
4b80916b 1562 if (*q == NULL) {
1da177e4
LT
1563 item->ri_prev = item->ri_next = item;
1564 *q = item;
1565 } else {
1566 item->ri_next = *q;
1567 item->ri_prev = (*q)->ri_prev;
1568 (*q)->ri_prev = item;
1569 item->ri_prev->ri_next = item;
1570 }
1571}
1572
1573STATIC void
1574xlog_recover_insert_item_frontq(
1575 xlog_recover_item_t **q,
1576 xlog_recover_item_t *item)
1577{
1578 xlog_recover_insert_item_backq(q, item);
1579 *q = item;
1580}
1581
1582STATIC int
1583xlog_recover_reorder_trans(
1da177e4
LT
1584 xlog_recover_t *trans)
1585{
1586 xlog_recover_item_t *first_item, *itemq, *itemq_next;
1587 xfs_buf_log_format_t *buf_f;
1da177e4
LT
1588 ushort flags = 0;
1589
1590 first_item = itemq = trans->r_itemq;
1591 trans->r_itemq = NULL;
1592 do {
1593 itemq_next = itemq->ri_next;
1594 buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
1da177e4
LT
1595
1596 switch (ITEM_TYPE(itemq)) {
1597 case XFS_LI_BUF:
804195b6 1598 flags = buf_f->blf_flags;
1da177e4
LT
1599 if (!(flags & XFS_BLI_CANCEL)) {
1600 xlog_recover_insert_item_frontq(&trans->r_itemq,
1601 itemq);
1602 break;
1603 }
1604 case XFS_LI_INODE:
1da177e4
LT
1605 case XFS_LI_DQUOT:
1606 case XFS_LI_QUOTAOFF:
1607 case XFS_LI_EFD:
1608 case XFS_LI_EFI:
1609 xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
1610 break;
1611 default:
1612 xlog_warn(
1613 "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1614 ASSERT(0);
1615 return XFS_ERROR(EIO);
1616 }
1617 itemq = itemq_next;
1618 } while (first_item != itemq);
1619 return 0;
1620}
1621
1622/*
1623 * Build up the table of buf cancel records so that we don't replay
1624 * cancelled data in the second pass. For buffer records that are
1625 * not cancel records, there is nothing to do here so we just return.
1626 *
1627 * If we get a cancel record which is already in the table, this indicates
1628 * that the buffer was cancelled multiple times. In order to ensure
1629 * that during pass 2 we keep the record in the table until we reach its
1630 * last occurrence in the log, we keep a reference count in the cancel
1631 * record in the table to tell us how many times we expect to see this
1632 * record during the second pass.
1633 */
1634STATIC void
1635xlog_recover_do_buffer_pass1(
1636 xlog_t *log,
1637 xfs_buf_log_format_t *buf_f)
1638{
1639 xfs_buf_cancel_t *bcp;
1640 xfs_buf_cancel_t *nextp;
1641 xfs_buf_cancel_t *prevp;
1642 xfs_buf_cancel_t **bucket;
1da177e4
LT
1643 xfs_daddr_t blkno = 0;
1644 uint len = 0;
1645 ushort flags = 0;
1646
1647 switch (buf_f->blf_type) {
1648 case XFS_LI_BUF:
1649 blkno = buf_f->blf_blkno;
1650 len = buf_f->blf_len;
1651 flags = buf_f->blf_flags;
1652 break;
1da177e4
LT
1653 }
1654
1655 /*
1656 * If this isn't a cancel buffer item, then just return.
1657 */
1658 if (!(flags & XFS_BLI_CANCEL))
1659 return;
1660
1661 /*
1662 * Insert an xfs_buf_cancel record into the hash table of
1663 * them. If there is already an identical record, bump
1664 * its reference count.
1665 */
1666 bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1667 XLOG_BC_TABLE_SIZE];
1668 /*
1669 * If the hash bucket is empty then just insert a new record into
1670 * the bucket.
1671 */
1672 if (*bucket == NULL) {
1673 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1674 KM_SLEEP);
1675 bcp->bc_blkno = blkno;
1676 bcp->bc_len = len;
1677 bcp->bc_refcount = 1;
1678 bcp->bc_next = NULL;
1679 *bucket = bcp;
1680 return;
1681 }
1682
1683 /*
1684 * The hash bucket is not empty, so search for duplicates of our
1685 * record. If we find one them just bump its refcount. If not
1686 * then add us at the end of the list.
1687 */
1688 prevp = NULL;
1689 nextp = *bucket;
1690 while (nextp != NULL) {
1691 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1692 nextp->bc_refcount++;
1693 return;
1694 }
1695 prevp = nextp;
1696 nextp = nextp->bc_next;
1697 }
1698 ASSERT(prevp != NULL);
1699 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1700 KM_SLEEP);
1701 bcp->bc_blkno = blkno;
1702 bcp->bc_len = len;
1703 bcp->bc_refcount = 1;
1704 bcp->bc_next = NULL;
1705 prevp->bc_next = bcp;
1706}
1707
1708/*
1709 * Check to see whether the buffer being recovered has a corresponding
1710 * entry in the buffer cancel record table. If it does then return 1
1711 * so that it will be cancelled, otherwise return 0. If the buffer is
1712 * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
1713 * the refcount on the entry in the table and remove it from the table
1714 * if this is the last reference.
1715 *
1716 * We remove the cancel record from the table when we encounter its
1717 * last occurrence in the log so that if the same buffer is re-used
1718 * again after its last cancellation we actually replay the changes
1719 * made at that point.
1720 */
1721STATIC int
1722xlog_check_buffer_cancelled(
1723 xlog_t *log,
1724 xfs_daddr_t blkno,
1725 uint len,
1726 ushort flags)
1727{
1728 xfs_buf_cancel_t *bcp;
1729 xfs_buf_cancel_t *prevp;
1730 xfs_buf_cancel_t **bucket;
1731
1732 if (log->l_buf_cancel_table == NULL) {
1733 /*
1734 * There is nothing in the table built in pass one,
1735 * so this buffer must not be cancelled.
1736 */
1737 ASSERT(!(flags & XFS_BLI_CANCEL));
1738 return 0;
1739 }
1740
1741 bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1742 XLOG_BC_TABLE_SIZE];
1743 bcp = *bucket;
1744 if (bcp == NULL) {
1745 /*
1746 * There is no corresponding entry in the table built
1747 * in pass one, so this buffer has not been cancelled.
1748 */
1749 ASSERT(!(flags & XFS_BLI_CANCEL));
1750 return 0;
1751 }
1752
1753 /*
1754 * Search for an entry in the buffer cancel table that
1755 * matches our buffer.
1756 */
1757 prevp = NULL;
1758 while (bcp != NULL) {
1759 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1760 /*
1761 * We've go a match, so return 1 so that the
1762 * recovery of this buffer is cancelled.
1763 * If this buffer is actually a buffer cancel
1764 * log item, then decrement the refcount on the
1765 * one in the table and remove it if this is the
1766 * last reference.
1767 */
1768 if (flags & XFS_BLI_CANCEL) {
1769 bcp->bc_refcount--;
1770 if (bcp->bc_refcount == 0) {
1771 if (prevp == NULL) {
1772 *bucket = bcp->bc_next;
1773 } else {
1774 prevp->bc_next = bcp->bc_next;
1775 }
f0e2d93c 1776 kmem_free(bcp);
1da177e4
LT
1777 }
1778 }
1779 return 1;
1780 }
1781 prevp = bcp;
1782 bcp = bcp->bc_next;
1783 }
1784 /*
1785 * We didn't find a corresponding entry in the table, so
1786 * return 0 so that the buffer is NOT cancelled.
1787 */
1788 ASSERT(!(flags & XFS_BLI_CANCEL));
1789 return 0;
1790}
1791
1792STATIC int
1793xlog_recover_do_buffer_pass2(
1794 xlog_t *log,
1795 xfs_buf_log_format_t *buf_f)
1796{
1da177e4
LT
1797 xfs_daddr_t blkno = 0;
1798 ushort flags = 0;
1799 uint len = 0;
1800
1801 switch (buf_f->blf_type) {
1802 case XFS_LI_BUF:
1803 blkno = buf_f->blf_blkno;
1804 flags = buf_f->blf_flags;
1805 len = buf_f->blf_len;
1806 break;
1da177e4
LT
1807 }
1808
1809 return xlog_check_buffer_cancelled(log, blkno, len, flags);
1810}
1811
1812/*
1813 * Perform recovery for a buffer full of inodes. In these buffers,
1814 * the only data which should be recovered is that which corresponds
1815 * to the di_next_unlinked pointers in the on disk inode structures.
1816 * The rest of the data for the inodes is always logged through the
1817 * inodes themselves rather than the inode buffer and is recovered
1818 * in xlog_recover_do_inode_trans().
1819 *
1820 * The only time when buffers full of inodes are fully recovered is
1821 * when the buffer is full of newly allocated inodes. In this case
1822 * the buffer will not be marked as an inode buffer and so will be
1823 * sent to xlog_recover_do_reg_buffer() below during recovery.
1824 */
1825STATIC int
1826xlog_recover_do_inode_buffer(
1827 xfs_mount_t *mp,
1828 xlog_recover_item_t *item,
1829 xfs_buf_t *bp,
1830 xfs_buf_log_format_t *buf_f)
1831{
1832 int i;
1833 int item_index;
1834 int bit;
1835 int nbits;
1836 int reg_buf_offset;
1837 int reg_buf_bytes;
1838 int next_unlinked_offset;
1839 int inodes_per_buf;
1840 xfs_agino_t *logged_nextp;
1841 xfs_agino_t *buffer_nextp;
1da177e4
LT
1842 unsigned int *data_map = NULL;
1843 unsigned int map_size = 0;
1844
1845 switch (buf_f->blf_type) {
1846 case XFS_LI_BUF:
1847 data_map = buf_f->blf_data_map;
1848 map_size = buf_f->blf_map_size;
1849 break;
1da177e4
LT
1850 }
1851 /*
1852 * Set the variables corresponding to the current region to
1853 * 0 so that we'll initialize them on the first pass through
1854 * the loop.
1855 */
1856 reg_buf_offset = 0;
1857 reg_buf_bytes = 0;
1858 bit = 0;
1859 nbits = 0;
1860 item_index = 0;
1861 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1862 for (i = 0; i < inodes_per_buf; i++) {
1863 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1864 offsetof(xfs_dinode_t, di_next_unlinked);
1865
1866 while (next_unlinked_offset >=
1867 (reg_buf_offset + reg_buf_bytes)) {
1868 /*
1869 * The next di_next_unlinked field is beyond
1870 * the current logged region. Find the next
1871 * logged region that contains or is beyond
1872 * the current di_next_unlinked field.
1873 */
1874 bit += nbits;
1875 bit = xfs_next_bit(data_map, map_size, bit);
1876
1877 /*
1878 * If there are no more logged regions in the
1879 * buffer, then we're done.
1880 */
1881 if (bit == -1) {
1882 return 0;
1883 }
1884
1885 nbits = xfs_contig_bits(data_map, map_size,
1886 bit);
1887 ASSERT(nbits > 0);
1888 reg_buf_offset = bit << XFS_BLI_SHIFT;
1889 reg_buf_bytes = nbits << XFS_BLI_SHIFT;
1890 item_index++;
1891 }
1892
1893 /*
1894 * If the current logged region starts after the current
1895 * di_next_unlinked field, then move on to the next
1896 * di_next_unlinked field.
1897 */
1898 if (next_unlinked_offset < reg_buf_offset) {
1899 continue;
1900 }
1901
1902 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1903 ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
1904 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1905
1906 /*
1907 * The current logged region contains a copy of the
1908 * current di_next_unlinked field. Extract its value
1909 * and copy it to the buffer copy.
1910 */
1911 logged_nextp = (xfs_agino_t *)
1912 ((char *)(item->ri_buf[item_index].i_addr) +
1913 (next_unlinked_offset - reg_buf_offset));
1914 if (unlikely(*logged_nextp == 0)) {
1915 xfs_fs_cmn_err(CE_ALERT, mp,
1916 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field",
1917 item, bp);
1918 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1919 XFS_ERRLEVEL_LOW, mp);
1920 return XFS_ERROR(EFSCORRUPTED);
1921 }
1922
1923 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1924 next_unlinked_offset);
87c199c2 1925 *buffer_nextp = *logged_nextp;
1da177e4
LT
1926 }
1927
1928 return 0;
1929}
1930
1931/*
1932 * Perform a 'normal' buffer recovery. Each logged region of the
1933 * buffer should be copied over the corresponding region in the
1934 * given buffer. The bitmap in the buf log format structure indicates
1935 * where to place the logged data.
1936 */
1937/*ARGSUSED*/
1938STATIC void
1939xlog_recover_do_reg_buffer(
1da177e4
LT
1940 xlog_recover_item_t *item,
1941 xfs_buf_t *bp,
1942 xfs_buf_log_format_t *buf_f)
1943{
1944 int i;
1945 int bit;
1946 int nbits;
1da177e4
LT
1947 unsigned int *data_map = NULL;
1948 unsigned int map_size = 0;
1949 int error;
1950
1951 switch (buf_f->blf_type) {
1952 case XFS_LI_BUF:
1953 data_map = buf_f->blf_data_map;
1954 map_size = buf_f->blf_map_size;
1955 break;
1da177e4
LT
1956 }
1957 bit = 0;
1958 i = 1; /* 0 is the buf format structure */
1959 while (1) {
1960 bit = xfs_next_bit(data_map, map_size, bit);
1961 if (bit == -1)
1962 break;
1963 nbits = xfs_contig_bits(data_map, map_size, bit);
1964 ASSERT(nbits > 0);
4b80916b 1965 ASSERT(item->ri_buf[i].i_addr != NULL);
1da177e4
LT
1966 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1967 ASSERT(XFS_BUF_COUNT(bp) >=
1968 ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
1969
1970 /*
1971 * Do a sanity check if this is a dquot buffer. Just checking
1972 * the first dquot in the buffer should do. XXXThis is
1973 * probably a good thing to do for other buf types also.
1974 */
1975 error = 0;
c8ad20ff
NS
1976 if (buf_f->blf_flags &
1977 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
0c5e1ce8
CH
1978 if (item->ri_buf[i].i_addr == NULL) {
1979 cmn_err(CE_ALERT,
1980 "XFS: NULL dquot in %s.", __func__);
1981 goto next;
1982 }
8ec6dba2 1983 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
0c5e1ce8
CH
1984 cmn_err(CE_ALERT,
1985 "XFS: dquot too small (%d) in %s.",
1986 item->ri_buf[i].i_len, __func__);
1987 goto next;
1988 }
1da177e4
LT
1989 error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1990 item->ri_buf[i].i_addr,
1991 -1, 0, XFS_QMOPT_DOWARN,
1992 "dquot_buf_recover");
0c5e1ce8
CH
1993 if (error)
1994 goto next;
1da177e4 1995 }
0c5e1ce8
CH
1996
1997 memcpy(xfs_buf_offset(bp,
1998 (uint)bit << XFS_BLI_SHIFT), /* dest */
1999 item->ri_buf[i].i_addr, /* source */
2000 nbits<<XFS_BLI_SHIFT); /* length */
2001 next:
1da177e4
LT
2002 i++;
2003 bit += nbits;
2004 }
2005
2006 /* Shouldn't be any more regions */
2007 ASSERT(i == item->ri_total);
2008}
2009
2010/*
2011 * Do some primitive error checking on ondisk dquot data structures.
2012 */
2013int
2014xfs_qm_dqcheck(
2015 xfs_disk_dquot_t *ddq,
2016 xfs_dqid_t id,
2017 uint type, /* used only when IO_dorepair is true */
2018 uint flags,
2019 char *str)
2020{
2021 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
2022 int errs = 0;
2023
2024 /*
2025 * We can encounter an uninitialized dquot buffer for 2 reasons:
2026 * 1. If we crash while deleting the quotainode(s), and those blks got
2027 * used for user data. This is because we take the path of regular
2028 * file deletion; however, the size field of quotainodes is never
2029 * updated, so all the tricks that we play in itruncate_finish
2030 * don't quite matter.
2031 *
2032 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2033 * But the allocation will be replayed so we'll end up with an
2034 * uninitialized quota block.
2035 *
2036 * This is all fine; things are still consistent, and we haven't lost
2037 * any quota information. Just don't complain about bad dquot blks.
2038 */
1149d96a 2039 if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
1da177e4
LT
2040 if (flags & XFS_QMOPT_DOWARN)
2041 cmn_err(CE_ALERT,
2042 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1149d96a 2043 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1da177e4
LT
2044 errs++;
2045 }
1149d96a 2046 if (ddq->d_version != XFS_DQUOT_VERSION) {
1da177e4
LT
2047 if (flags & XFS_QMOPT_DOWARN)
2048 cmn_err(CE_ALERT,
2049 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1149d96a 2050 str, id, ddq->d_version, XFS_DQUOT_VERSION);
1da177e4
LT
2051 errs++;
2052 }
2053
1149d96a
CH
2054 if (ddq->d_flags != XFS_DQ_USER &&
2055 ddq->d_flags != XFS_DQ_PROJ &&
2056 ddq->d_flags != XFS_DQ_GROUP) {
1da177e4
LT
2057 if (flags & XFS_QMOPT_DOWARN)
2058 cmn_err(CE_ALERT,
2059 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1149d96a 2060 str, id, ddq->d_flags);
1da177e4
LT
2061 errs++;
2062 }
2063
1149d96a 2064 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1da177e4
LT
2065 if (flags & XFS_QMOPT_DOWARN)
2066 cmn_err(CE_ALERT,
2067 "%s : ondisk-dquot 0x%p, ID mismatch: "
2068 "0x%x expected, found id 0x%x",
1149d96a 2069 str, ddq, id, be32_to_cpu(ddq->d_id));
1da177e4
LT
2070 errs++;
2071 }
2072
2073 if (!errs && ddq->d_id) {
1149d96a
CH
2074 if (ddq->d_blk_softlimit &&
2075 be64_to_cpu(ddq->d_bcount) >=
2076 be64_to_cpu(ddq->d_blk_softlimit)) {
1da177e4
LT
2077 if (!ddq->d_btimer) {
2078 if (flags & XFS_QMOPT_DOWARN)
2079 cmn_err(CE_ALERT,
2080 "%s : Dquot ID 0x%x (0x%p) "
2081 "BLK TIMER NOT STARTED",
1149d96a 2082 str, (int)be32_to_cpu(ddq->d_id), ddq);
1da177e4
LT
2083 errs++;
2084 }
2085 }
1149d96a
CH
2086 if (ddq->d_ino_softlimit &&
2087 be64_to_cpu(ddq->d_icount) >=
2088 be64_to_cpu(ddq->d_ino_softlimit)) {
1da177e4
LT
2089 if (!ddq->d_itimer) {
2090 if (flags & XFS_QMOPT_DOWARN)
2091 cmn_err(CE_ALERT,
2092 "%s : Dquot ID 0x%x (0x%p) "
2093 "INODE TIMER NOT STARTED",
1149d96a 2094 str, (int)be32_to_cpu(ddq->d_id), ddq);
1da177e4
LT
2095 errs++;
2096 }
2097 }
1149d96a
CH
2098 if (ddq->d_rtb_softlimit &&
2099 be64_to_cpu(ddq->d_rtbcount) >=
2100 be64_to_cpu(ddq->d_rtb_softlimit)) {
1da177e4
LT
2101 if (!ddq->d_rtbtimer) {
2102 if (flags & XFS_QMOPT_DOWARN)
2103 cmn_err(CE_ALERT,
2104 "%s : Dquot ID 0x%x (0x%p) "
2105 "RTBLK TIMER NOT STARTED",
1149d96a 2106 str, (int)be32_to_cpu(ddq->d_id), ddq);
1da177e4
LT
2107 errs++;
2108 }
2109 }
2110 }
2111
2112 if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2113 return errs;
2114
2115 if (flags & XFS_QMOPT_DOWARN)
2116 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2117
2118 /*
2119 * Typically, a repair is only requested by quotacheck.
2120 */
2121 ASSERT(id != -1);
2122 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2123 memset(d, 0, sizeof(xfs_dqblk_t));
1149d96a
CH
2124
2125 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2126 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2127 d->dd_diskdq.d_flags = type;
2128 d->dd_diskdq.d_id = cpu_to_be32(id);
1da177e4
LT
2129
2130 return errs;
2131}
2132
2133/*
2134 * Perform a dquot buffer recovery.
2135 * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2136 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2137 * Else, treat it as a regular buffer and do recovery.
2138 */
2139STATIC void
2140xlog_recover_do_dquot_buffer(
2141 xfs_mount_t *mp,
2142 xlog_t *log,
2143 xlog_recover_item_t *item,
2144 xfs_buf_t *bp,
2145 xfs_buf_log_format_t *buf_f)
2146{
2147 uint type;
2148
2149 /*
2150 * Filesystems are required to send in quota flags at mount time.
2151 */
2152 if (mp->m_qflags == 0) {
2153 return;
2154 }
2155
2156 type = 0;
2157 if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
2158 type |= XFS_DQ_USER;
c8ad20ff
NS
2159 if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
2160 type |= XFS_DQ_PROJ;
1da177e4
LT
2161 if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
2162 type |= XFS_DQ_GROUP;
2163 /*
2164 * This type of quotas was turned off, so ignore this buffer
2165 */
2166 if (log->l_quotaoffs_flag & type)
2167 return;
2168
053c59a0 2169 xlog_recover_do_reg_buffer(item, bp, buf_f);
1da177e4
LT
2170}
2171
2172/*
2173 * This routine replays a modification made to a buffer at runtime.
2174 * There are actually two types of buffer, regular and inode, which
2175 * are handled differently. Inode buffers are handled differently
2176 * in that we only recover a specific set of data from them, namely
2177 * the inode di_next_unlinked fields. This is because all other inode
2178 * data is actually logged via inode records and any data we replay
2179 * here which overlaps that may be stale.
2180 *
2181 * When meta-data buffers are freed at run time we log a buffer item
2182 * with the XFS_BLI_CANCEL bit set to indicate that previous copies
2183 * of the buffer in the log should not be replayed at recovery time.
2184 * This is so that if the blocks covered by the buffer are reused for
2185 * file data before we crash we don't end up replaying old, freed
2186 * meta-data into a user's file.
2187 *
2188 * To handle the cancellation of buffer log items, we make two passes
2189 * over the log during recovery. During the first we build a table of
2190 * those buffers which have been cancelled, and during the second we
2191 * only replay those buffers which do not have corresponding cancel
2192 * records in the table. See xlog_recover_do_buffer_pass[1,2] above
2193 * for more details on the implementation of the table of cancel records.
2194 */
2195STATIC int
2196xlog_recover_do_buffer_trans(
2197 xlog_t *log,
2198 xlog_recover_item_t *item,
2199 int pass)
2200{
2201 xfs_buf_log_format_t *buf_f;
1da177e4
LT
2202 xfs_mount_t *mp;
2203 xfs_buf_t *bp;
2204 int error;
2205 int cancel;
2206 xfs_daddr_t blkno;
2207 int len;
2208 ushort flags;
2209
2210 buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2211
2212 if (pass == XLOG_RECOVER_PASS1) {
2213 /*
2214 * In this pass we're only looking for buf items
2215 * with the XFS_BLI_CANCEL bit set.
2216 */
2217 xlog_recover_do_buffer_pass1(log, buf_f);
2218 return 0;
2219 } else {
2220 /*
2221 * In this pass we want to recover all the buffers
2222 * which have not been cancelled and are not
2223 * cancellation buffers themselves. The routine
2224 * we call here will tell us whether or not to
2225 * continue with the replay of this buffer.
2226 */
2227 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2228 if (cancel) {
2229 return 0;
2230 }
2231 }
2232 switch (buf_f->blf_type) {
2233 case XFS_LI_BUF:
2234 blkno = buf_f->blf_blkno;
2235 len = buf_f->blf_len;
2236 flags = buf_f->blf_flags;
2237 break;
1da177e4
LT
2238 default:
2239 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
fc1f8c1c
NS
2240 "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2241 buf_f->blf_type, log->l_mp->m_logname ?
2242 log->l_mp->m_logname : "internal");
1da177e4
LT
2243 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2244 XFS_ERRLEVEL_LOW, log->l_mp);
2245 return XFS_ERROR(EFSCORRUPTED);
2246 }
2247
2248 mp = log->l_mp;
2249 if (flags & XFS_BLI_INODE_BUF) {
2250 bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
2251 XFS_BUF_LOCK);
2252 } else {
2253 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
2254 }
2255 if (XFS_BUF_ISERROR(bp)) {
2256 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2257 bp, blkno);
2258 error = XFS_BUF_GETERROR(bp);
2259 xfs_buf_relse(bp);
2260 return error;
2261 }
2262
2263 error = 0;
2264 if (flags & XFS_BLI_INODE_BUF) {
2265 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
c8ad20ff
NS
2266 } else if (flags &
2267 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1da177e4
LT
2268 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2269 } else {
053c59a0 2270 xlog_recover_do_reg_buffer(item, bp, buf_f);
1da177e4
LT
2271 }
2272 if (error)
2273 return XFS_ERROR(error);
2274
2275 /*
2276 * Perform delayed write on the buffer. Asynchronous writes will be
2277 * slower when taking into account all the buffers to be flushed.
2278 *
2279 * Also make sure that only inode buffers with good sizes stay in
2280 * the buffer cache. The kernel moves inodes in buffers of 1 block
2281 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
2282 * buffers in the log can be a different size if the log was generated
2283 * by an older kernel using unclustered inode buffers or a newer kernel
2284 * running with a different inode cluster size. Regardless, if the
2285 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2286 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2287 * the buffer out of the buffer cache so that the buffer won't
2288 * overlap with future reads of those inodes.
2289 */
2290 if (XFS_DINODE_MAGIC ==
b53e675d 2291 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
1da177e4
LT
2292 (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2293 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2294 XFS_BUF_STALE(bp);
2295 error = xfs_bwrite(mp, bp);
2296 } else {
15ac08a8
CH
2297 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2298 bp->b_mount = mp;
1da177e4
LT
2299 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2300 xfs_bdwrite(mp, bp);
2301 }
2302
2303 return (error);
2304}
2305
2306STATIC int
2307xlog_recover_do_inode_trans(
2308 xlog_t *log,
2309 xlog_recover_item_t *item,
2310 int pass)
2311{
2312 xfs_inode_log_format_t *in_f;
2313 xfs_mount_t *mp;
2314 xfs_buf_t *bp;
1da177e4
LT
2315 xfs_dinode_t *dip;
2316 xfs_ino_t ino;
2317 int len;
2318 xfs_caddr_t src;
2319 xfs_caddr_t dest;
2320 int error;
2321 int attr_index;
2322 uint fields;
347d1c01 2323 xfs_icdinode_t *dicp;
6d192a9b 2324 int need_free = 0;
1da177e4
LT
2325
2326 if (pass == XLOG_RECOVER_PASS1) {
2327 return 0;
2328 }
2329
6d192a9b
TS
2330 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2331 in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2332 } else {
2333 in_f = (xfs_inode_log_format_t *)kmem_alloc(
2334 sizeof(xfs_inode_log_format_t), KM_SLEEP);
2335 need_free = 1;
2336 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2337 if (error)
2338 goto error;
2339 }
1da177e4
LT
2340 ino = in_f->ilf_ino;
2341 mp = log->l_mp;
1da177e4
LT
2342
2343 /*
2344 * Inode buffers can be freed, look out for it,
2345 * and do not replay the inode.
2346 */
a1941895
CH
2347 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2348 in_f->ilf_len, 0)) {
6d192a9b
TS
2349 error = 0;
2350 goto error;
2351 }
1da177e4 2352
a1941895
CH
2353 bp = xfs_buf_read_flags(mp->m_ddev_targp, in_f->ilf_blkno,
2354 in_f->ilf_len, XFS_BUF_LOCK);
1da177e4
LT
2355 if (XFS_BUF_ISERROR(bp)) {
2356 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
a1941895 2357 bp, in_f->ilf_blkno);
1da177e4
LT
2358 error = XFS_BUF_GETERROR(bp);
2359 xfs_buf_relse(bp);
6d192a9b 2360 goto error;
1da177e4
LT
2361 }
2362 error = 0;
2363 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
a1941895 2364 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
1da177e4
LT
2365
2366 /*
2367 * Make sure the place we're flushing out to really looks
2368 * like an inode!
2369 */
81591fe2 2370 if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
1da177e4
LT
2371 xfs_buf_relse(bp);
2372 xfs_fs_cmn_err(CE_ALERT, mp,
2373 "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2374 dip, bp, ino);
2375 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2376 XFS_ERRLEVEL_LOW, mp);
6d192a9b
TS
2377 error = EFSCORRUPTED;
2378 goto error;
1da177e4 2379 }
347d1c01 2380 dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
1da177e4
LT
2381 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2382 xfs_buf_relse(bp);
2383 xfs_fs_cmn_err(CE_ALERT, mp,
2384 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2385 item, ino);
2386 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2387 XFS_ERRLEVEL_LOW, mp);
6d192a9b
TS
2388 error = EFSCORRUPTED;
2389 goto error;
1da177e4
LT
2390 }
2391
2392 /* Skip replay when the on disk inode is newer than the log one */
81591fe2 2393 if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
1da177e4
LT
2394 /*
2395 * Deal with the wrap case, DI_MAX_FLUSH is less
2396 * than smaller numbers
2397 */
81591fe2 2398 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
347d1c01 2399 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
1da177e4
LT
2400 /* do nothing */
2401 } else {
2402 xfs_buf_relse(bp);
6d192a9b
TS
2403 error = 0;
2404 goto error;
1da177e4
LT
2405 }
2406 }
2407 /* Take the opportunity to reset the flush iteration count */
2408 dicp->di_flushiter = 0;
2409
2410 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2411 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2412 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2413 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2414 XFS_ERRLEVEL_LOW, mp, dicp);
2415 xfs_buf_relse(bp);
2416 xfs_fs_cmn_err(CE_ALERT, mp,
2417 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2418 item, dip, bp, ino);
6d192a9b
TS
2419 error = EFSCORRUPTED;
2420 goto error;
1da177e4
LT
2421 }
2422 } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2423 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2424 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2425 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2426 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2427 XFS_ERRLEVEL_LOW, mp, dicp);
2428 xfs_buf_relse(bp);
2429 xfs_fs_cmn_err(CE_ALERT, mp,
2430 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2431 item, dip, bp, ino);
6d192a9b
TS
2432 error = EFSCORRUPTED;
2433 goto error;
1da177e4
LT
2434 }
2435 }
2436 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2437 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2438 XFS_ERRLEVEL_LOW, mp, dicp);
2439 xfs_buf_relse(bp);
2440 xfs_fs_cmn_err(CE_ALERT, mp,
2441 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2442 item, dip, bp, ino,
2443 dicp->di_nextents + dicp->di_anextents,
2444 dicp->di_nblocks);
6d192a9b
TS
2445 error = EFSCORRUPTED;
2446 goto error;
1da177e4
LT
2447 }
2448 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2449 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2450 XFS_ERRLEVEL_LOW, mp, dicp);
2451 xfs_buf_relse(bp);
2452 xfs_fs_cmn_err(CE_ALERT, mp,
2453 "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2454 item, dip, bp, ino, dicp->di_forkoff);
6d192a9b
TS
2455 error = EFSCORRUPTED;
2456 goto error;
1da177e4 2457 }
81591fe2 2458 if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
1da177e4
LT
2459 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2460 XFS_ERRLEVEL_LOW, mp, dicp);
2461 xfs_buf_relse(bp);
2462 xfs_fs_cmn_err(CE_ALERT, mp,
2463 "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2464 item->ri_buf[1].i_len, item);
6d192a9b
TS
2465 error = EFSCORRUPTED;
2466 goto error;
1da177e4
LT
2467 }
2468
2469 /* The core is in in-core format */
81591fe2 2470 xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
1da177e4
LT
2471
2472 /* the rest is in on-disk format */
81591fe2
CH
2473 if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2474 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2475 item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2476 item->ri_buf[1].i_len - sizeof(struct xfs_icdinode));
1da177e4
LT
2477 }
2478
2479 fields = in_f->ilf_fields;
2480 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2481 case XFS_ILOG_DEV:
81591fe2 2482 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
1da177e4
LT
2483 break;
2484 case XFS_ILOG_UUID:
81591fe2
CH
2485 memcpy(XFS_DFORK_DPTR(dip),
2486 &in_f->ilf_u.ilfu_uuid,
2487 sizeof(uuid_t));
1da177e4
LT
2488 break;
2489 }
2490
2491 if (in_f->ilf_size == 2)
2492 goto write_inode_buffer;
2493 len = item->ri_buf[2].i_len;
2494 src = item->ri_buf[2].i_addr;
2495 ASSERT(in_f->ilf_size <= 4);
2496 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2497 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2498 (len == in_f->ilf_dsize));
2499
2500 switch (fields & XFS_ILOG_DFORK) {
2501 case XFS_ILOG_DDATA:
2502 case XFS_ILOG_DEXT:
81591fe2 2503 memcpy(XFS_DFORK_DPTR(dip), src, len);
1da177e4
LT
2504 break;
2505
2506 case XFS_ILOG_DBROOT:
7cc95a82 2507 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
81591fe2 2508 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
1da177e4
LT
2509 XFS_DFORK_DSIZE(dip, mp));
2510 break;
2511
2512 default:
2513 /*
2514 * There are no data fork flags set.
2515 */
2516 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2517 break;
2518 }
2519
2520 /*
2521 * If we logged any attribute data, recover it. There may or
2522 * may not have been any other non-core data logged in this
2523 * transaction.
2524 */
2525 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2526 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2527 attr_index = 3;
2528 } else {
2529 attr_index = 2;
2530 }
2531 len = item->ri_buf[attr_index].i_len;
2532 src = item->ri_buf[attr_index].i_addr;
2533 ASSERT(len == in_f->ilf_asize);
2534
2535 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2536 case XFS_ILOG_ADATA:
2537 case XFS_ILOG_AEXT:
2538 dest = XFS_DFORK_APTR(dip);
2539 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2540 memcpy(dest, src, len);
2541 break;
2542
2543 case XFS_ILOG_ABROOT:
2544 dest = XFS_DFORK_APTR(dip);
7cc95a82
CH
2545 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2546 len, (xfs_bmdr_block_t*)dest,
1da177e4
LT
2547 XFS_DFORK_ASIZE(dip, mp));
2548 break;
2549
2550 default:
2551 xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2552 ASSERT(0);
2553 xfs_buf_relse(bp);
6d192a9b
TS
2554 error = EIO;
2555 goto error;
1da177e4
LT
2556 }
2557 }
2558
2559write_inode_buffer:
dd0bbad8
CH
2560 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2561 bp->b_mount = mp;
2562 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2563 xfs_bdwrite(mp, bp);
6d192a9b
TS
2564error:
2565 if (need_free)
f0e2d93c 2566 kmem_free(in_f);
6d192a9b 2567 return XFS_ERROR(error);
1da177e4
LT
2568}
2569
2570/*
2571 * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2572 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2573 * of that type.
2574 */
2575STATIC int
2576xlog_recover_do_quotaoff_trans(
2577 xlog_t *log,
2578 xlog_recover_item_t *item,
2579 int pass)
2580{
2581 xfs_qoff_logformat_t *qoff_f;
2582
2583 if (pass == XLOG_RECOVER_PASS2) {
2584 return (0);
2585 }
2586
2587 qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2588 ASSERT(qoff_f);
2589
2590 /*
2591 * The logitem format's flag tells us if this was user quotaoff,
77a7cce4 2592 * group/project quotaoff or both.
1da177e4
LT
2593 */
2594 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2595 log->l_quotaoffs_flag |= XFS_DQ_USER;
77a7cce4
NS
2596 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2597 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
1da177e4
LT
2598 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2599 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2600
2601 return (0);
2602}
2603
2604/*
2605 * Recover a dquot record
2606 */
2607STATIC int
2608xlog_recover_do_dquot_trans(
2609 xlog_t *log,
2610 xlog_recover_item_t *item,
2611 int pass)
2612{
2613 xfs_mount_t *mp;
2614 xfs_buf_t *bp;
2615 struct xfs_disk_dquot *ddq, *recddq;
2616 int error;
2617 xfs_dq_logformat_t *dq_f;
2618 uint type;
2619
2620 if (pass == XLOG_RECOVER_PASS1) {
2621 return 0;
2622 }
2623 mp = log->l_mp;
2624
2625 /*
2626 * Filesystems are required to send in quota flags at mount time.
2627 */
2628 if (mp->m_qflags == 0)
2629 return (0);
2630
2631 recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
0c5e1ce8
CH
2632
2633 if (item->ri_buf[1].i_addr == NULL) {
2634 cmn_err(CE_ALERT,
2635 "XFS: NULL dquot in %s.", __func__);
2636 return XFS_ERROR(EIO);
2637 }
8ec6dba2 2638 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
0c5e1ce8
CH
2639 cmn_err(CE_ALERT,
2640 "XFS: dquot too small (%d) in %s.",
2641 item->ri_buf[1].i_len, __func__);
2642 return XFS_ERROR(EIO);
2643 }
2644
1da177e4
LT
2645 /*
2646 * This type of quotas was turned off, so ignore this record.
2647 */
b53e675d 2648 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
1da177e4
LT
2649 ASSERT(type);
2650 if (log->l_quotaoffs_flag & type)
2651 return (0);
2652
2653 /*
2654 * At this point we know that quota was _not_ turned off.
2655 * Since the mount flags are not indicating to us otherwise, this
2656 * must mean that quota is on, and the dquot needs to be replayed.
2657 * Remember that we may not have fully recovered the superblock yet,
2658 * so we can't do the usual trick of looking at the SB quota bits.
2659 *
2660 * The other possibility, of course, is that the quota subsystem was
2661 * removed since the last mount - ENOSYS.
2662 */
2663 dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2664 ASSERT(dq_f);
2665 if ((error = xfs_qm_dqcheck(recddq,
2666 dq_f->qlf_id,
2667 0, XFS_QMOPT_DOWARN,
2668 "xlog_recover_do_dquot_trans (log copy)"))) {
2669 return XFS_ERROR(EIO);
2670 }
2671 ASSERT(dq_f->qlf_len == 1);
2672
2673 error = xfs_read_buf(mp, mp->m_ddev_targp,
2674 dq_f->qlf_blkno,
2675 XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2676 0, &bp);
2677 if (error) {
2678 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2679 bp, dq_f->qlf_blkno);
2680 return error;
2681 }
2682 ASSERT(bp);
2683 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2684
2685 /*
2686 * At least the magic num portion should be on disk because this
2687 * was among a chunk of dquots created earlier, and we did some
2688 * minimal initialization then.
2689 */
2690 if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2691 "xlog_recover_do_dquot_trans")) {
2692 xfs_buf_relse(bp);
2693 return XFS_ERROR(EIO);
2694 }
2695
2696 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2697
2698 ASSERT(dq_f->qlf_size == 2);
15ac08a8
CH
2699 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2700 bp->b_mount = mp;
1da177e4
LT
2701 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2702 xfs_bdwrite(mp, bp);
2703
2704 return (0);
2705}
2706
2707/*
2708 * This routine is called to create an in-core extent free intent
2709 * item from the efi format structure which was logged on disk.
2710 * It allocates an in-core efi, copies the extents from the format
2711 * structure into it, and adds the efi to the AIL with the given
2712 * LSN.
2713 */
6d192a9b 2714STATIC int
1da177e4
LT
2715xlog_recover_do_efi_trans(
2716 xlog_t *log,
2717 xlog_recover_item_t *item,
2718 xfs_lsn_t lsn,
2719 int pass)
2720{
6d192a9b 2721 int error;
1da177e4
LT
2722 xfs_mount_t *mp;
2723 xfs_efi_log_item_t *efip;
2724 xfs_efi_log_format_t *efi_formatp;
1da177e4
LT
2725
2726 if (pass == XLOG_RECOVER_PASS1) {
6d192a9b 2727 return 0;
1da177e4
LT
2728 }
2729
2730 efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
1da177e4
LT
2731
2732 mp = log->l_mp;
2733 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
6d192a9b
TS
2734 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2735 &(efip->efi_format)))) {
2736 xfs_efi_item_free(efip);
2737 return error;
2738 }
1da177e4
LT
2739 efip->efi_next_extent = efi_formatp->efi_nextents;
2740 efip->efi_flags |= XFS_EFI_COMMITTED;
2741
a9c21c1b 2742 spin_lock(&log->l_ailp->xa_lock);
1da177e4 2743 /*
783a2f65 2744 * xfs_trans_ail_update() drops the AIL lock.
1da177e4 2745 */
783a2f65 2746 xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
6d192a9b 2747 return 0;
1da177e4
LT
2748}
2749
2750
2751/*
2752 * This routine is called when an efd format structure is found in
2753 * a committed transaction in the log. It's purpose is to cancel
2754 * the corresponding efi if it was still in the log. To do this
2755 * it searches the AIL for the efi with an id equal to that in the
2756 * efd format structure. If we find it, we remove the efi from the
2757 * AIL and free it.
2758 */
2759STATIC void
2760xlog_recover_do_efd_trans(
2761 xlog_t *log,
2762 xlog_recover_item_t *item,
2763 int pass)
2764{
1da177e4
LT
2765 xfs_efd_log_format_t *efd_formatp;
2766 xfs_efi_log_item_t *efip = NULL;
2767 xfs_log_item_t *lip;
1da177e4 2768 __uint64_t efi_id;
27d8d5fe 2769 struct xfs_ail_cursor cur;
783a2f65 2770 struct xfs_ail *ailp = log->l_ailp;
1da177e4
LT
2771
2772 if (pass == XLOG_RECOVER_PASS1) {
2773 return;
2774 }
2775
2776 efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
6d192a9b
TS
2777 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2778 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2779 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2780 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
1da177e4
LT
2781 efi_id = efd_formatp->efd_efi_id;
2782
2783 /*
2784 * Search for the efi with the id in the efd format structure
2785 * in the AIL.
2786 */
a9c21c1b
DC
2787 spin_lock(&ailp->xa_lock);
2788 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
1da177e4
LT
2789 while (lip != NULL) {
2790 if (lip->li_type == XFS_LI_EFI) {
2791 efip = (xfs_efi_log_item_t *)lip;
2792 if (efip->efi_format.efi_id == efi_id) {
2793 /*
783a2f65 2794 * xfs_trans_ail_delete() drops the
1da177e4
LT
2795 * AIL lock.
2796 */
783a2f65 2797 xfs_trans_ail_delete(ailp, lip);
8ae2c0f6 2798 xfs_efi_item_free(efip);
a9c21c1b 2799 spin_lock(&ailp->xa_lock);
27d8d5fe 2800 break;
1da177e4
LT
2801 }
2802 }
a9c21c1b 2803 lip = xfs_trans_ail_cursor_next(ailp, &cur);
1da177e4 2804 }
a9c21c1b
DC
2805 xfs_trans_ail_cursor_done(ailp, &cur);
2806 spin_unlock(&ailp->xa_lock);
1da177e4
LT
2807}
2808
2809/*
2810 * Perform the transaction
2811 *
2812 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2813 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2814 */
2815STATIC int
2816xlog_recover_do_trans(
2817 xlog_t *log,
2818 xlog_recover_t *trans,
2819 int pass)
2820{
2821 int error = 0;
2822 xlog_recover_item_t *item, *first_item;
2823
ff0205e0
CH
2824 error = xlog_recover_reorder_trans(trans);
2825 if (error)
1da177e4 2826 return error;
ff0205e0 2827
1da177e4
LT
2828 first_item = item = trans->r_itemq;
2829 do {
ff0205e0
CH
2830 switch (ITEM_TYPE(item)) {
2831 case XFS_LI_BUF:
2832 error = xlog_recover_do_buffer_trans(log, item, pass);
2833 break;
2834 case XFS_LI_INODE:
2835 error = xlog_recover_do_inode_trans(log, item, pass);
2836 break;
2837 case XFS_LI_EFI:
2838 error = xlog_recover_do_efi_trans(log, item,
2839 trans->r_lsn, pass);
2840 break;
2841 case XFS_LI_EFD:
1da177e4 2842 xlog_recover_do_efd_trans(log, item, pass);
ff0205e0
CH
2843 error = 0;
2844 break;
2845 case XFS_LI_DQUOT:
2846 error = xlog_recover_do_dquot_trans(log, item, pass);
2847 break;
2848 case XFS_LI_QUOTAOFF:
2849 error = xlog_recover_do_quotaoff_trans(log, item,
2850 pass);
2851 break;
2852 default:
2853 xlog_warn(
2854 "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
1da177e4
LT
2855 ASSERT(0);
2856 error = XFS_ERROR(EIO);
2857 break;
2858 }
ff0205e0
CH
2859
2860 if (error)
2861 return error;
1da177e4
LT
2862 item = item->ri_next;
2863 } while (first_item != item);
2864
ff0205e0 2865 return 0;
1da177e4
LT
2866}
2867
2868/*
2869 * Free up any resources allocated by the transaction
2870 *
2871 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2872 */
2873STATIC void
2874xlog_recover_free_trans(
2875 xlog_recover_t *trans)
2876{
2877 xlog_recover_item_t *first_item, *item, *free_item;
2878 int i;
2879
2880 item = first_item = trans->r_itemq;
2881 do {
2882 free_item = item;
2883 item = item->ri_next;
2884 /* Free the regions in the item. */
2885 for (i = 0; i < free_item->ri_cnt; i++) {
f0e2d93c 2886 kmem_free(free_item->ri_buf[i].i_addr);
1da177e4
LT
2887 }
2888 /* Free the item itself */
f0e2d93c
DV
2889 kmem_free(free_item->ri_buf);
2890 kmem_free(free_item);
1da177e4
LT
2891 } while (first_item != item);
2892 /* Free the transaction recover structure */
f0e2d93c 2893 kmem_free(trans);
1da177e4
LT
2894}
2895
2896STATIC int
2897xlog_recover_commit_trans(
2898 xlog_t *log,
2899 xlog_recover_t **q,
2900 xlog_recover_t *trans,
2901 int pass)
2902{
2903 int error;
2904
2905 if ((error = xlog_recover_unlink_tid(q, trans)))
2906 return error;
2907 if ((error = xlog_recover_do_trans(log, trans, pass)))
2908 return error;
2909 xlog_recover_free_trans(trans); /* no error */
2910 return 0;
2911}
2912
2913STATIC int
2914xlog_recover_unmount_trans(
2915 xlog_recover_t *trans)
2916{
2917 /* Do nothing now */
2918 xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2919 return 0;
2920}
2921
2922/*
2923 * There are two valid states of the r_state field. 0 indicates that the
2924 * transaction structure is in a normal state. We have either seen the
2925 * start of the transaction or the last operation we added was not a partial
2926 * operation. If the last operation we added to the transaction was a
2927 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2928 *
2929 * NOTE: skip LRs with 0 data length.
2930 */
2931STATIC int
2932xlog_recover_process_data(
2933 xlog_t *log,
2934 xlog_recover_t *rhash[],
2935 xlog_rec_header_t *rhead,
2936 xfs_caddr_t dp,
2937 int pass)
2938{
2939 xfs_caddr_t lp;
2940 int num_logops;
2941 xlog_op_header_t *ohead;
2942 xlog_recover_t *trans;
2943 xlog_tid_t tid;
2944 int error;
2945 unsigned long hash;
2946 uint flags;
2947
b53e675d
CH
2948 lp = dp + be32_to_cpu(rhead->h_len);
2949 num_logops = be32_to_cpu(rhead->h_num_logops);
1da177e4
LT
2950
2951 /* check the log format matches our own - else we can't recover */
2952 if (xlog_header_check_recover(log->l_mp, rhead))
2953 return (XFS_ERROR(EIO));
2954
2955 while ((dp < lp) && num_logops) {
2956 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2957 ohead = (xlog_op_header_t *)dp;
2958 dp += sizeof(xlog_op_header_t);
2959 if (ohead->oh_clientid != XFS_TRANSACTION &&
2960 ohead->oh_clientid != XFS_LOG) {
2961 xlog_warn(
2962 "XFS: xlog_recover_process_data: bad clientid");
2963 ASSERT(0);
2964 return (XFS_ERROR(EIO));
2965 }
67fcb7bf 2966 tid = be32_to_cpu(ohead->oh_tid);
1da177e4
LT
2967 hash = XLOG_RHASH(tid);
2968 trans = xlog_recover_find_tid(rhash[hash], tid);
2969 if (trans == NULL) { /* not found; add new tid */
2970 if (ohead->oh_flags & XLOG_START_TRANS)
2971 xlog_recover_new_tid(&rhash[hash], tid,
b53e675d 2972 be64_to_cpu(rhead->h_lsn));
1da177e4 2973 } else {
9742bb93
LM
2974 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2975 xlog_warn(
2976 "XFS: xlog_recover_process_data: bad length");
2977 WARN_ON(1);
2978 return (XFS_ERROR(EIO));
2979 }
1da177e4
LT
2980 flags = ohead->oh_flags & ~XLOG_END_TRANS;
2981 if (flags & XLOG_WAS_CONT_TRANS)
2982 flags &= ~XLOG_CONTINUE_TRANS;
2983 switch (flags) {
2984 case XLOG_COMMIT_TRANS:
2985 error = xlog_recover_commit_trans(log,
2986 &rhash[hash], trans, pass);
2987 break;
2988 case XLOG_UNMOUNT_TRANS:
2989 error = xlog_recover_unmount_trans(trans);
2990 break;
2991 case XLOG_WAS_CONT_TRANS:
2992 error = xlog_recover_add_to_cont_trans(trans,
67fcb7bf 2993 dp, be32_to_cpu(ohead->oh_len));
1da177e4
LT
2994 break;
2995 case XLOG_START_TRANS:
2996 xlog_warn(
2997 "XFS: xlog_recover_process_data: bad transaction");
2998 ASSERT(0);
2999 error = XFS_ERROR(EIO);
3000 break;
3001 case 0:
3002 case XLOG_CONTINUE_TRANS:
3003 error = xlog_recover_add_to_trans(trans,
67fcb7bf 3004 dp, be32_to_cpu(ohead->oh_len));
1da177e4
LT
3005 break;
3006 default:
3007 xlog_warn(
3008 "XFS: xlog_recover_process_data: bad flag");
3009 ASSERT(0);
3010 error = XFS_ERROR(EIO);
3011 break;
3012 }
3013 if (error)
3014 return error;
3015 }
67fcb7bf 3016 dp += be32_to_cpu(ohead->oh_len);
1da177e4
LT
3017 num_logops--;
3018 }
3019 return 0;
3020}
3021
3022/*
3023 * Process an extent free intent item that was recovered from
3024 * the log. We need to free the extents that it describes.
3025 */
3c1e2bbe 3026STATIC int
1da177e4
LT
3027xlog_recover_process_efi(
3028 xfs_mount_t *mp,
3029 xfs_efi_log_item_t *efip)
3030{
3031 xfs_efd_log_item_t *efdp;
3032 xfs_trans_t *tp;
3033 int i;
3c1e2bbe 3034 int error = 0;
1da177e4
LT
3035 xfs_extent_t *extp;
3036 xfs_fsblock_t startblock_fsb;
3037
3038 ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
3039
3040 /*
3041 * First check the validity of the extents described by the
3042 * EFI. If any are bad, then assume that all are bad and
3043 * just toss the EFI.
3044 */
3045 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3046 extp = &(efip->efi_format.efi_extents[i]);
3047 startblock_fsb = XFS_BB_TO_FSB(mp,
3048 XFS_FSB_TO_DADDR(mp, extp->ext_start));
3049 if ((startblock_fsb == 0) ||
3050 (extp->ext_len == 0) ||
3051 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3052 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3053 /*
3054 * This will pull the EFI from the AIL and
3055 * free the memory associated with it.
3056 */
3057 xfs_efi_release(efip, efip->efi_format.efi_nextents);
3c1e2bbe 3058 return XFS_ERROR(EIO);
1da177e4
LT
3059 }
3060 }
3061
3062 tp = xfs_trans_alloc(mp, 0);
3c1e2bbe 3063 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
fc6149d8
DC
3064 if (error)
3065 goto abort_error;
1da177e4
LT
3066 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3067
3068 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3069 extp = &(efip->efi_format.efi_extents[i]);
fc6149d8
DC
3070 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3071 if (error)
3072 goto abort_error;
1da177e4
LT
3073 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3074 extp->ext_len);
3075 }
3076
3077 efip->efi_flags |= XFS_EFI_RECOVERED;
e5720eec 3078 error = xfs_trans_commit(tp, 0);
3c1e2bbe 3079 return error;
fc6149d8
DC
3080
3081abort_error:
3082 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3083 return error;
1da177e4
LT
3084}
3085
1da177e4
LT
3086/*
3087 * When this is called, all of the EFIs which did not have
3088 * corresponding EFDs should be in the AIL. What we do now
3089 * is free the extents associated with each one.
3090 *
3091 * Since we process the EFIs in normal transactions, they
3092 * will be removed at some point after the commit. This prevents
3093 * us from just walking down the list processing each one.
3094 * We'll use a flag in the EFI to skip those that we've already
3095 * processed and use the AIL iteration mechanism's generation
3096 * count to try to speed this up at least a bit.
3097 *
3098 * When we start, we know that the EFIs are the only things in
3099 * the AIL. As we process them, however, other items are added
3100 * to the AIL. Since everything added to the AIL must come after
3101 * everything already in the AIL, we stop processing as soon as
3102 * we see something other than an EFI in the AIL.
3103 */
3c1e2bbe 3104STATIC int
1da177e4
LT
3105xlog_recover_process_efis(
3106 xlog_t *log)
3107{
3108 xfs_log_item_t *lip;
3109 xfs_efi_log_item_t *efip;
3c1e2bbe 3110 int error = 0;
27d8d5fe 3111 struct xfs_ail_cursor cur;
a9c21c1b 3112 struct xfs_ail *ailp;
1da177e4 3113
a9c21c1b
DC
3114 ailp = log->l_ailp;
3115 spin_lock(&ailp->xa_lock);
3116 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
1da177e4
LT
3117 while (lip != NULL) {
3118 /*
3119 * We're done when we see something other than an EFI.
27d8d5fe 3120 * There should be no EFIs left in the AIL now.
1da177e4
LT
3121 */
3122 if (lip->li_type != XFS_LI_EFI) {
27d8d5fe 3123#ifdef DEBUG
a9c21c1b 3124 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
27d8d5fe
DC
3125 ASSERT(lip->li_type != XFS_LI_EFI);
3126#endif
1da177e4
LT
3127 break;
3128 }
3129
3130 /*
3131 * Skip EFIs that we've already processed.
3132 */
3133 efip = (xfs_efi_log_item_t *)lip;
3134 if (efip->efi_flags & XFS_EFI_RECOVERED) {
a9c21c1b 3135 lip = xfs_trans_ail_cursor_next(ailp, &cur);
1da177e4
LT
3136 continue;
3137 }
3138
a9c21c1b
DC
3139 spin_unlock(&ailp->xa_lock);
3140 error = xlog_recover_process_efi(log->l_mp, efip);
3141 spin_lock(&ailp->xa_lock);
27d8d5fe
DC
3142 if (error)
3143 goto out;
a9c21c1b 3144 lip = xfs_trans_ail_cursor_next(ailp, &cur);
1da177e4 3145 }
27d8d5fe 3146out:
a9c21c1b
DC
3147 xfs_trans_ail_cursor_done(ailp, &cur);
3148 spin_unlock(&ailp->xa_lock);
3c1e2bbe 3149 return error;
1da177e4
LT
3150}
3151
3152/*
3153 * This routine performs a transaction to null out a bad inode pointer
3154 * in an agi unlinked inode hash bucket.
3155 */
3156STATIC void
3157xlog_recover_clear_agi_bucket(
3158 xfs_mount_t *mp,
3159 xfs_agnumber_t agno,
3160 int bucket)
3161{
3162 xfs_trans_t *tp;
3163 xfs_agi_t *agi;
3164 xfs_buf_t *agibp;
3165 int offset;
3166 int error;
3167
3168 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
5e1be0fb
CH
3169 error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3170 0, 0, 0);
e5720eec
DC
3171 if (error)
3172 goto out_abort;
1da177e4 3173
5e1be0fb
CH
3174 error = xfs_read_agi(mp, tp, agno, &agibp);
3175 if (error)
e5720eec 3176 goto out_abort;
1da177e4 3177
5e1be0fb 3178 agi = XFS_BUF_TO_AGI(agibp);
16259e7d 3179 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
1da177e4
LT
3180 offset = offsetof(xfs_agi_t, agi_unlinked) +
3181 (sizeof(xfs_agino_t) * bucket);
3182 xfs_trans_log_buf(tp, agibp, offset,
3183 (offset + sizeof(xfs_agino_t) - 1));
3184
e5720eec
DC
3185 error = xfs_trans_commit(tp, 0);
3186 if (error)
3187 goto out_error;
3188 return;
3189
3190out_abort:
3191 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3192out_error:
3193 xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
3194 "failed to clear agi %d. Continuing.", agno);
3195 return;
1da177e4
LT
3196}
3197
23fac50f
CH
3198STATIC xfs_agino_t
3199xlog_recover_process_one_iunlink(
3200 struct xfs_mount *mp,
3201 xfs_agnumber_t agno,
3202 xfs_agino_t agino,
3203 int bucket)
3204{
3205 struct xfs_buf *ibp;
3206 struct xfs_dinode *dip;
3207 struct xfs_inode *ip;
3208 xfs_ino_t ino;
3209 int error;
3210
3211 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3212 error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3213 if (error)
3214 goto fail;
3215
3216 /*
3217 * Get the on disk inode to find the next inode in the bucket.
3218 */
76d8b277 3219 error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK);
23fac50f 3220 if (error)
0e446673 3221 goto fail_iput;
23fac50f 3222
23fac50f 3223 ASSERT(ip->i_d.di_nlink == 0);
0e446673 3224 ASSERT(ip->i_d.di_mode != 0);
23fac50f
CH
3225
3226 /* setup for the next pass */
3227 agino = be32_to_cpu(dip->di_next_unlinked);
3228 xfs_buf_relse(ibp);
3229
3230 /*
3231 * Prevent any DMAPI event from being sent when the reference on
3232 * the inode is dropped.
3233 */
3234 ip->i_d.di_dmevmask = 0;
3235
0e446673 3236 IRELE(ip);
23fac50f
CH
3237 return agino;
3238
0e446673
CH
3239 fail_iput:
3240 IRELE(ip);
23fac50f
CH
3241 fail:
3242 /*
3243 * We can't read in the inode this bucket points to, or this inode
3244 * is messed up. Just ditch this bucket of inodes. We will lose
3245 * some inodes and space, but at least we won't hang.
3246 *
3247 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3248 * clear the inode pointer in the bucket.
3249 */
3250 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3251 return NULLAGINO;
3252}
3253
1da177e4
LT
3254/*
3255 * xlog_iunlink_recover
3256 *
3257 * This is called during recovery to process any inodes which
3258 * we unlinked but not freed when the system crashed. These
3259 * inodes will be on the lists in the AGI blocks. What we do
3260 * here is scan all the AGIs and fully truncate and free any
3261 * inodes found on the lists. Each inode is removed from the
3262 * lists when it has been fully truncated and is freed. The
3263 * freeing of the inode and its removal from the list must be
3264 * atomic.
3265 */
d96f8f89 3266STATIC void
1da177e4
LT
3267xlog_recover_process_iunlinks(
3268 xlog_t *log)
3269{
3270 xfs_mount_t *mp;
3271 xfs_agnumber_t agno;
3272 xfs_agi_t *agi;
3273 xfs_buf_t *agibp;
1da177e4 3274 xfs_agino_t agino;
1da177e4
LT
3275 int bucket;
3276 int error;
3277 uint mp_dmevmask;
3278
3279 mp = log->l_mp;
3280
3281 /*
3282 * Prevent any DMAPI event from being sent while in this function.
3283 */
3284 mp_dmevmask = mp->m_dmevmask;
3285 mp->m_dmevmask = 0;
3286
3287 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3288 /*
3289 * Find the agi for this ag.
3290 */
5e1be0fb
CH
3291 error = xfs_read_agi(mp, NULL, agno, &agibp);
3292 if (error) {
3293 /*
3294 * AGI is b0rked. Don't process it.
3295 *
3296 * We should probably mark the filesystem as corrupt
3297 * after we've recovered all the ag's we can....
3298 */
3299 continue;
1da177e4
LT
3300 }
3301 agi = XFS_BUF_TO_AGI(agibp);
1da177e4
LT
3302
3303 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
16259e7d 3304 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
1da177e4 3305 while (agino != NULLAGINO) {
1da177e4
LT
3306 /*
3307 * Release the agi buffer so that it can
3308 * be acquired in the normal course of the
3309 * transaction to truncate and free the inode.
3310 */
3311 xfs_buf_relse(agibp);
3312
23fac50f
CH
3313 agino = xlog_recover_process_one_iunlink(mp,
3314 agno, agino, bucket);
1da177e4
LT
3315
3316 /*
3317 * Reacquire the agibuffer and continue around
5e1be0fb
CH
3318 * the loop. This should never fail as we know
3319 * the buffer was good earlier on.
1da177e4 3320 */
5e1be0fb
CH
3321 error = xfs_read_agi(mp, NULL, agno, &agibp);
3322 ASSERT(error == 0);
1da177e4 3323 agi = XFS_BUF_TO_AGI(agibp);
1da177e4
LT
3324 }
3325 }
3326
3327 /*
3328 * Release the buffer for the current agi so we can
3329 * go on to the next one.
3330 */
3331 xfs_buf_relse(agibp);
3332 }
3333
3334 mp->m_dmevmask = mp_dmevmask;
3335}
3336
3337
3338#ifdef DEBUG
3339STATIC void
3340xlog_pack_data_checksum(
3341 xlog_t *log,
3342 xlog_in_core_t *iclog,
3343 int size)
3344{
3345 int i;
b53e675d 3346 __be32 *up;
1da177e4
LT
3347 uint chksum = 0;
3348
b53e675d 3349 up = (__be32 *)iclog->ic_datap;
1da177e4
LT
3350 /* divide length by 4 to get # words */
3351 for (i = 0; i < (size >> 2); i++) {
b53e675d 3352 chksum ^= be32_to_cpu(*up);
1da177e4
LT
3353 up++;
3354 }
b53e675d 3355 iclog->ic_header.h_chksum = cpu_to_be32(chksum);
1da177e4
LT
3356}
3357#else
3358#define xlog_pack_data_checksum(log, iclog, size)
3359#endif
3360
3361/*
3362 * Stamp cycle number in every block
3363 */
3364void
3365xlog_pack_data(
3366 xlog_t *log,
3367 xlog_in_core_t *iclog,
3368 int roundoff)
3369{
3370 int i, j, k;
3371 int size = iclog->ic_offset + roundoff;
b53e675d 3372 __be32 cycle_lsn;
1da177e4 3373 xfs_caddr_t dp;
1da177e4
LT
3374
3375 xlog_pack_data_checksum(log, iclog, size);
3376
3377 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3378
3379 dp = iclog->ic_datap;
3380 for (i = 0; i < BTOBB(size) &&
3381 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
b53e675d
CH
3382 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3383 *(__be32 *)dp = cycle_lsn;
1da177e4
LT
3384 dp += BBSIZE;
3385 }
3386
62118709 3387 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
b28708d6
CH
3388 xlog_in_core_2_t *xhdr = iclog->ic_data;
3389
1da177e4
LT
3390 for ( ; i < BTOBB(size); i++) {
3391 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3392 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
b53e675d
CH
3393 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3394 *(__be32 *)dp = cycle_lsn;
1da177e4
LT
3395 dp += BBSIZE;
3396 }
3397
3398 for (i = 1; i < log->l_iclog_heads; i++) {
3399 xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3400 }
3401 }
3402}
3403
3404#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
3405STATIC void
3406xlog_unpack_data_checksum(
3407 xlog_rec_header_t *rhead,
3408 xfs_caddr_t dp,
3409 xlog_t *log)
3410{
b53e675d 3411 __be32 *up = (__be32 *)dp;
1da177e4
LT
3412 uint chksum = 0;
3413 int i;
3414
3415 /* divide length by 4 to get # words */
b53e675d
CH
3416 for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
3417 chksum ^= be32_to_cpu(*up);
1da177e4
LT
3418 up++;
3419 }
b53e675d 3420 if (chksum != be32_to_cpu(rhead->h_chksum)) {
1da177e4
LT
3421 if (rhead->h_chksum ||
3422 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3423 cmn_err(CE_DEBUG,
b6574520 3424 "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
b53e675d 3425 be32_to_cpu(rhead->h_chksum), chksum);
1da177e4
LT
3426 cmn_err(CE_DEBUG,
3427"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
62118709 3428 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1da177e4 3429 cmn_err(CE_DEBUG,
b6574520 3430 "XFS: LogR this is a LogV2 filesystem\n");
1da177e4
LT
3431 }
3432 log->l_flags |= XLOG_CHKSUM_MISMATCH;
3433 }
3434 }
3435}
3436#else
3437#define xlog_unpack_data_checksum(rhead, dp, log)
3438#endif
3439
3440STATIC void
3441xlog_unpack_data(
3442 xlog_rec_header_t *rhead,
3443 xfs_caddr_t dp,
3444 xlog_t *log)
3445{
3446 int i, j, k;
1da177e4 3447
b53e675d 3448 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
1da177e4 3449 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
b53e675d 3450 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
1da177e4
LT
3451 dp += BBSIZE;
3452 }
3453
62118709 3454 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
b28708d6 3455 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
b53e675d 3456 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
1da177e4
LT
3457 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3458 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
b53e675d 3459 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
1da177e4
LT
3460 dp += BBSIZE;
3461 }
3462 }
3463
3464 xlog_unpack_data_checksum(rhead, dp, log);
3465}
3466
3467STATIC int
3468xlog_valid_rec_header(
3469 xlog_t *log,
3470 xlog_rec_header_t *rhead,
3471 xfs_daddr_t blkno)
3472{
3473 int hlen;
3474
b53e675d 3475 if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
1da177e4
LT
3476 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3477 XFS_ERRLEVEL_LOW, log->l_mp);
3478 return XFS_ERROR(EFSCORRUPTED);
3479 }
3480 if (unlikely(
3481 (!rhead->h_version ||
b53e675d 3482 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
1da177e4 3483 xlog_warn("XFS: %s: unrecognised log version (%d).",
34a622b2 3484 __func__, be32_to_cpu(rhead->h_version));
1da177e4
LT
3485 return XFS_ERROR(EIO);
3486 }
3487
3488 /* LR body must have data or it wouldn't have been written */
b53e675d 3489 hlen = be32_to_cpu(rhead->h_len);
1da177e4
LT
3490 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3491 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3492 XFS_ERRLEVEL_LOW, log->l_mp);
3493 return XFS_ERROR(EFSCORRUPTED);
3494 }
3495 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3496 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3497 XFS_ERRLEVEL_LOW, log->l_mp);
3498 return XFS_ERROR(EFSCORRUPTED);
3499 }
3500 return 0;
3501}
3502
3503/*
3504 * Read the log from tail to head and process the log records found.
3505 * Handle the two cases where the tail and head are in the same cycle
3506 * and where the active portion of the log wraps around the end of
3507 * the physical log separately. The pass parameter is passed through
3508 * to the routines called to process the data and is not looked at
3509 * here.
3510 */
3511STATIC int
3512xlog_do_recovery_pass(
3513 xlog_t *log,
3514 xfs_daddr_t head_blk,
3515 xfs_daddr_t tail_blk,
3516 int pass)
3517{
3518 xlog_rec_header_t *rhead;
3519 xfs_daddr_t blk_no;
fc5bc4c8 3520 xfs_caddr_t offset;
1da177e4
LT
3521 xfs_buf_t *hbp, *dbp;
3522 int error = 0, h_size;
3523 int bblks, split_bblks;
3524 int hblks, split_hblks, wrapped_hblks;
3525 xlog_recover_t *rhash[XLOG_RHASH_SIZE];
3526
3527 ASSERT(head_blk != tail_blk);
3528
3529 /*
3530 * Read the header of the tail block and get the iclog buffer size from
3531 * h_size. Use this to tell how many sectors make up the log header.
3532 */
62118709 3533 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1da177e4
LT
3534 /*
3535 * When using variable length iclogs, read first sector of
3536 * iclog header and extract the header size from it. Get a
3537 * new hbp that is the correct size.
3538 */
3539 hbp = xlog_get_bp(log, 1);
3540 if (!hbp)
3541 return ENOMEM;
076e6acb
CH
3542
3543 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3544 if (error)
1da177e4 3545 goto bread_err1;
076e6acb 3546
1da177e4
LT
3547 rhead = (xlog_rec_header_t *)offset;
3548 error = xlog_valid_rec_header(log, rhead, tail_blk);
3549 if (error)
3550 goto bread_err1;
b53e675d
CH
3551 h_size = be32_to_cpu(rhead->h_size);
3552 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
1da177e4
LT
3553 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3554 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3555 if (h_size % XLOG_HEADER_CYCLE_SIZE)
3556 hblks++;
3557 xlog_put_bp(hbp);
3558 hbp = xlog_get_bp(log, hblks);
3559 } else {
3560 hblks = 1;
3561 }
3562 } else {
3563 ASSERT(log->l_sectbb_log == 0);
3564 hblks = 1;
3565 hbp = xlog_get_bp(log, 1);
3566 h_size = XLOG_BIG_RECORD_BSIZE;
3567 }
3568
3569 if (!hbp)
3570 return ENOMEM;
3571 dbp = xlog_get_bp(log, BTOBB(h_size));
3572 if (!dbp) {
3573 xlog_put_bp(hbp);
3574 return ENOMEM;
3575 }
3576
3577 memset(rhash, 0, sizeof(rhash));
3578 if (tail_blk <= head_blk) {
3579 for (blk_no = tail_blk; blk_no < head_blk; ) {
076e6acb
CH
3580 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3581 if (error)
1da177e4 3582 goto bread_err2;
076e6acb 3583
1da177e4
LT
3584 rhead = (xlog_rec_header_t *)offset;
3585 error = xlog_valid_rec_header(log, rhead, blk_no);
3586 if (error)
3587 goto bread_err2;
3588
3589 /* blocks in data section */
b53e675d 3590 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
076e6acb
CH
3591 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3592 &offset);
1da177e4
LT
3593 if (error)
3594 goto bread_err2;
076e6acb 3595
1da177e4
LT
3596 xlog_unpack_data(rhead, offset, log);
3597 if ((error = xlog_recover_process_data(log,
3598 rhash, rhead, offset, pass)))
3599 goto bread_err2;
3600 blk_no += bblks + hblks;
3601 }
3602 } else {
3603 /*
3604 * Perform recovery around the end of the physical log.
3605 * When the head is not on the same cycle number as the tail,
3606 * we can't do a sequential recovery as above.
3607 */
3608 blk_no = tail_blk;
3609 while (blk_no < log->l_logBBsize) {
3610 /*
3611 * Check for header wrapping around physical end-of-log
3612 */
fc5bc4c8 3613 offset = XFS_BUF_PTR(hbp);
1da177e4
LT
3614 split_hblks = 0;
3615 wrapped_hblks = 0;
3616 if (blk_no + hblks <= log->l_logBBsize) {
3617 /* Read header in one read */
076e6acb
CH
3618 error = xlog_bread(log, blk_no, hblks, hbp,
3619 &offset);
1da177e4
LT
3620 if (error)
3621 goto bread_err2;
1da177e4
LT
3622 } else {
3623 /* This LR is split across physical log end */
3624 if (blk_no != log->l_logBBsize) {
3625 /* some data before physical log end */
3626 ASSERT(blk_no <= INT_MAX);
3627 split_hblks = log->l_logBBsize - (int)blk_no;
3628 ASSERT(split_hblks > 0);
076e6acb
CH
3629 error = xlog_bread(log, blk_no,
3630 split_hblks, hbp,
3631 &offset);
3632 if (error)
1da177e4 3633 goto bread_err2;
1da177e4 3634 }
076e6acb 3635
1da177e4
LT
3636 /*
3637 * Note: this black magic still works with
3638 * large sector sizes (non-512) only because:
3639 * - we increased the buffer size originally
3640 * by 1 sector giving us enough extra space
3641 * for the second read;
3642 * - the log start is guaranteed to be sector
3643 * aligned;
3644 * - we read the log end (LR header start)
3645 * _first_, then the log start (LR header end)
3646 * - order is important.
3647 */
234f56ac 3648 wrapped_hblks = hblks - split_hblks;
234f56ac 3649 error = XFS_BUF_SET_PTR(hbp,
fc5bc4c8 3650 offset + BBTOB(split_hblks),
1da177e4 3651 BBTOB(hblks - split_hblks));
076e6acb
CH
3652 if (error)
3653 goto bread_err2;
3654
3655 error = xlog_bread_noalign(log, 0,
3656 wrapped_hblks, hbp);
3657 if (error)
3658 goto bread_err2;
3659
fc5bc4c8 3660 error = XFS_BUF_SET_PTR(hbp, offset,
234f56ac 3661 BBTOB(hblks));
1da177e4
LT
3662 if (error)
3663 goto bread_err2;
1da177e4
LT
3664 }
3665 rhead = (xlog_rec_header_t *)offset;
3666 error = xlog_valid_rec_header(log, rhead,
3667 split_hblks ? blk_no : 0);
3668 if (error)
3669 goto bread_err2;
3670
b53e675d 3671 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
1da177e4
LT
3672 blk_no += hblks;
3673
3674 /* Read in data for log record */
3675 if (blk_no + bblks <= log->l_logBBsize) {
076e6acb
CH
3676 error = xlog_bread(log, blk_no, bblks, dbp,
3677 &offset);
1da177e4
LT
3678 if (error)
3679 goto bread_err2;
1da177e4
LT
3680 } else {
3681 /* This log record is split across the
3682 * physical end of log */
fc5bc4c8 3683 offset = XFS_BUF_PTR(dbp);
1da177e4
LT
3684 split_bblks = 0;
3685 if (blk_no != log->l_logBBsize) {
3686 /* some data is before the physical
3687 * end of log */
3688 ASSERT(!wrapped_hblks);
3689 ASSERT(blk_no <= INT_MAX);
3690 split_bblks =
3691 log->l_logBBsize - (int)blk_no;
3692 ASSERT(split_bblks > 0);
076e6acb
CH
3693 error = xlog_bread(log, blk_no,
3694 split_bblks, dbp,
3695 &offset);
3696 if (error)
1da177e4 3697 goto bread_err2;
1da177e4 3698 }
076e6acb 3699
1da177e4
LT
3700 /*
3701 * Note: this black magic still works with
3702 * large sector sizes (non-512) only because:
3703 * - we increased the buffer size originally
3704 * by 1 sector giving us enough extra space
3705 * for the second read;
3706 * - the log start is guaranteed to be sector
3707 * aligned;
3708 * - we read the log end (LR header start)
3709 * _first_, then the log start (LR header end)
3710 * - order is important.
3711 */
234f56ac 3712 error = XFS_BUF_SET_PTR(dbp,
fc5bc4c8 3713 offset + BBTOB(split_bblks),
1da177e4 3714 BBTOB(bblks - split_bblks));
234f56ac 3715 if (error)
1da177e4 3716 goto bread_err2;
076e6acb
CH
3717
3718 error = xlog_bread_noalign(log, wrapped_hblks,
3719 bblks - split_bblks,
3720 dbp);
3721 if (error)
3722 goto bread_err2;
3723
fc5bc4c8 3724 error = XFS_BUF_SET_PTR(dbp, offset, h_size);
076e6acb
CH
3725 if (error)
3726 goto bread_err2;
1da177e4
LT
3727 }
3728 xlog_unpack_data(rhead, offset, log);
3729 if ((error = xlog_recover_process_data(log, rhash,
3730 rhead, offset, pass)))
3731 goto bread_err2;
3732 blk_no += bblks;
3733 }
3734
3735 ASSERT(blk_no >= log->l_logBBsize);
3736 blk_no -= log->l_logBBsize;
3737
3738 /* read first part of physical log */
3739 while (blk_no < head_blk) {
076e6acb
CH
3740 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3741 if (error)
1da177e4 3742 goto bread_err2;
076e6acb 3743
1da177e4
LT
3744 rhead = (xlog_rec_header_t *)offset;
3745 error = xlog_valid_rec_header(log, rhead, blk_no);
3746 if (error)
3747 goto bread_err2;
076e6acb 3748
b53e675d 3749 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
076e6acb
CH
3750 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3751 &offset);
3752 if (error)
1da177e4 3753 goto bread_err2;
076e6acb 3754
1da177e4
LT
3755 xlog_unpack_data(rhead, offset, log);
3756 if ((error = xlog_recover_process_data(log, rhash,
3757 rhead, offset, pass)))
3758 goto bread_err2;
3759 blk_no += bblks + hblks;
3760 }
3761 }
3762
3763 bread_err2:
3764 xlog_put_bp(dbp);
3765 bread_err1:
3766 xlog_put_bp(hbp);
3767 return error;
3768}
3769
3770/*
3771 * Do the recovery of the log. We actually do this in two phases.
3772 * The two passes are necessary in order to implement the function
3773 * of cancelling a record written into the log. The first pass
3774 * determines those things which have been cancelled, and the
3775 * second pass replays log items normally except for those which
3776 * have been cancelled. The handling of the replay and cancellations
3777 * takes place in the log item type specific routines.
3778 *
3779 * The table of items which have cancel records in the log is allocated
3780 * and freed at this level, since only here do we know when all of
3781 * the log recovery has been completed.
3782 */
3783STATIC int
3784xlog_do_log_recovery(
3785 xlog_t *log,
3786 xfs_daddr_t head_blk,
3787 xfs_daddr_t tail_blk)
3788{
3789 int error;
3790
3791 ASSERT(head_blk != tail_blk);
3792
3793 /*
3794 * First do a pass to find all of the cancelled buf log items.
3795 * Store them in the buf_cancel_table for use in the second pass.
3796 */
3797 log->l_buf_cancel_table =
3798 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3799 sizeof(xfs_buf_cancel_t*),
3800 KM_SLEEP);
3801 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3802 XLOG_RECOVER_PASS1);
3803 if (error != 0) {
f0e2d93c 3804 kmem_free(log->l_buf_cancel_table);
1da177e4
LT
3805 log->l_buf_cancel_table = NULL;
3806 return error;
3807 }
3808 /*
3809 * Then do a second pass to actually recover the items in the log.
3810 * When it is complete free the table of buf cancel items.
3811 */
3812 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3813 XLOG_RECOVER_PASS2);
3814#ifdef DEBUG
6d192a9b 3815 if (!error) {
1da177e4
LT
3816 int i;
3817
3818 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3819 ASSERT(log->l_buf_cancel_table[i] == NULL);
3820 }
3821#endif /* DEBUG */
3822
f0e2d93c 3823 kmem_free(log->l_buf_cancel_table);
1da177e4
LT
3824 log->l_buf_cancel_table = NULL;
3825
3826 return error;
3827}
3828
3829/*
3830 * Do the actual recovery
3831 */
3832STATIC int
3833xlog_do_recover(
3834 xlog_t *log,
3835 xfs_daddr_t head_blk,
3836 xfs_daddr_t tail_blk)
3837{
3838 int error;
3839 xfs_buf_t *bp;
3840 xfs_sb_t *sbp;
3841
3842 /*
3843 * First replay the images in the log.
3844 */
3845 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3846 if (error) {
3847 return error;
3848 }
3849
3850 XFS_bflush(log->l_mp->m_ddev_targp);
3851
3852 /*
3853 * If IO errors happened during recovery, bail out.
3854 */
3855 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3856 return (EIO);
3857 }
3858
3859 /*
3860 * We now update the tail_lsn since much of the recovery has completed
3861 * and there may be space available to use. If there were no extent
3862 * or iunlinks, we can free up the entire log and set the tail_lsn to
3863 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3864 * lsn of the last known good LR on disk. If there are extent frees
3865 * or iunlinks they will have some entries in the AIL; so we look at
3866 * the AIL to determine how to set the tail_lsn.
3867 */
3868 xlog_assign_tail_lsn(log->l_mp);
3869
3870 /*
3871 * Now that we've finished replaying all buffer and inode
3872 * updates, re-read in the superblock.
3873 */
3874 bp = xfs_getsb(log->l_mp, 0);
3875 XFS_BUF_UNDONE(bp);
bebf963f
LM
3876 ASSERT(!(XFS_BUF_ISWRITE(bp)));
3877 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
1da177e4 3878 XFS_BUF_READ(bp);
bebf963f 3879 XFS_BUF_UNASYNC(bp);
1da177e4 3880 xfsbdstrat(log->l_mp, bp);
d64e31a2
DC
3881 error = xfs_iowait(bp);
3882 if (error) {
1da177e4
LT
3883 xfs_ioerror_alert("xlog_do_recover",
3884 log->l_mp, bp, XFS_BUF_ADDR(bp));
3885 ASSERT(0);
3886 xfs_buf_relse(bp);
3887 return error;
3888 }
3889
3890 /* Convert superblock from on-disk format */
3891 sbp = &log->l_mp->m_sb;
2bdf7cd0 3892 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
1da177e4 3893 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
62118709 3894 ASSERT(xfs_sb_good_version(sbp));
1da177e4
LT
3895 xfs_buf_relse(bp);
3896
5478eead
LM
3897 /* We've re-read the superblock so re-initialize per-cpu counters */
3898 xfs_icsb_reinit_counters(log->l_mp);
3899
1da177e4
LT
3900 xlog_recover_check_summary(log);
3901
3902 /* Normal transactions can now occur */
3903 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3904 return 0;
3905}
3906
3907/*
3908 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3909 *
3910 * Return error or zero.
3911 */
3912int
3913xlog_recover(
65be6054 3914 xlog_t *log)
1da177e4
LT
3915{
3916 xfs_daddr_t head_blk, tail_blk;
3917 int error;
3918
3919 /* find the tail of the log */
65be6054 3920 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
1da177e4
LT
3921 return error;
3922
3923 if (tail_blk != head_blk) {
3924 /* There used to be a comment here:
3925 *
3926 * disallow recovery on read-only mounts. note -- mount
3927 * checks for ENOSPC and turns it into an intelligent
3928 * error message.
3929 * ...but this is no longer true. Now, unless you specify
3930 * NORECOVERY (in which case this function would never be
3931 * called), we just go ahead and recover. We do this all
3932 * under the vfs layer, so we can get away with it unless
3933 * the device itself is read-only, in which case we fail.
3934 */
3a02ee18 3935 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
1da177e4
LT
3936 return error;
3937 }
3938
3939 cmn_err(CE_NOTE,
fc1f8c1c
NS
3940 "Starting XFS recovery on filesystem: %s (logdev: %s)",
3941 log->l_mp->m_fsname, log->l_mp->m_logname ?
3942 log->l_mp->m_logname : "internal");
1da177e4
LT
3943
3944 error = xlog_do_recover(log, head_blk, tail_blk);
3945 log->l_flags |= XLOG_RECOVERY_NEEDED;
3946 }
3947 return error;
3948}
3949
3950/*
3951 * In the first part of recovery we replay inodes and buffers and build
3952 * up the list of extent free items which need to be processed. Here
3953 * we process the extent free items and clean up the on disk unlinked
3954 * inode lists. This is separated from the first part of recovery so
3955 * that the root and real-time bitmap inodes can be read in from disk in
3956 * between the two stages. This is necessary so that we can free space
3957 * in the real-time portion of the file system.
3958 */
3959int
3960xlog_recover_finish(
4249023a 3961 xlog_t *log)
1da177e4
LT
3962{
3963 /*
3964 * Now we're ready to do the transactions needed for the
3965 * rest of recovery. Start with completing all the extent
3966 * free intent records and then process the unlinked inode
3967 * lists. At this point, we essentially run in normal mode
3968 * except that we're still performing recovery actions
3969 * rather than accepting new requests.
3970 */
3971 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3c1e2bbe
DC
3972 int error;
3973 error = xlog_recover_process_efis(log);
3974 if (error) {
3975 cmn_err(CE_ALERT,
3976 "Failed to recover EFIs on filesystem: %s",
3977 log->l_mp->m_fsname);
3978 return error;
3979 }
1da177e4
LT
3980 /*
3981 * Sync the log to get all the EFIs out of the AIL.
3982 * This isn't absolutely necessary, but it helps in
3983 * case the unlink transactions would have problems
3984 * pushing the EFIs out of the way.
3985 */
3986 xfs_log_force(log->l_mp, (xfs_lsn_t)0,
3987 (XFS_LOG_FORCE | XFS_LOG_SYNC));
3988
4249023a 3989 xlog_recover_process_iunlinks(log);
1da177e4
LT
3990
3991 xlog_recover_check_summary(log);
3992
3993 cmn_err(CE_NOTE,
fc1f8c1c
NS
3994 "Ending XFS recovery on filesystem: %s (logdev: %s)",
3995 log->l_mp->m_fsname, log->l_mp->m_logname ?
3996 log->l_mp->m_logname : "internal");
1da177e4
LT
3997 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3998 } else {
3999 cmn_err(CE_DEBUG,
b6574520 4000 "!Ending clean XFS mount for filesystem: %s\n",
1da177e4
LT
4001 log->l_mp->m_fsname);
4002 }
4003 return 0;
4004}
4005
4006
4007#if defined(DEBUG)
4008/*
4009 * Read all of the agf and agi counters and check that they
4010 * are consistent with the superblock counters.
4011 */
4012void
4013xlog_recover_check_summary(
4014 xlog_t *log)
4015{
4016 xfs_mount_t *mp;
4017 xfs_agf_t *agfp;
1da177e4
LT
4018 xfs_buf_t *agfbp;
4019 xfs_buf_t *agibp;
1da177e4
LT
4020 xfs_buf_t *sbbp;
4021#ifdef XFS_LOUD_RECOVERY
4022 xfs_sb_t *sbp;
4023#endif
4024 xfs_agnumber_t agno;
4025 __uint64_t freeblks;
4026 __uint64_t itotal;
4027 __uint64_t ifree;
5e1be0fb 4028 int error;
1da177e4
LT
4029
4030 mp = log->l_mp;
4031
4032 freeblks = 0LL;
4033 itotal = 0LL;
4034 ifree = 0LL;
4035 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4805621a
CH
4036 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4037 if (error) {
4038 xfs_fs_cmn_err(CE_ALERT, mp,
4039 "xlog_recover_check_summary(agf)"
4040 "agf read failed agno %d error %d",
4041 agno, error);
4042 } else {
4043 agfp = XFS_BUF_TO_AGF(agfbp);
4044 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4045 be32_to_cpu(agfp->agf_flcount);
4046 xfs_buf_relse(agfbp);
1da177e4 4047 }
1da177e4 4048
5e1be0fb
CH
4049 error = xfs_read_agi(mp, NULL, agno, &agibp);
4050 if (!error) {
4051 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
16259e7d 4052
5e1be0fb
CH
4053 itotal += be32_to_cpu(agi->agi_count);
4054 ifree += be32_to_cpu(agi->agi_freecount);
4055 xfs_buf_relse(agibp);
4056 }
1da177e4
LT
4057 }
4058
4059 sbbp = xfs_getsb(mp, 0);
4060#ifdef XFS_LOUD_RECOVERY
4061 sbp = &mp->m_sb;
2bdf7cd0 4062 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
1da177e4
LT
4063 cmn_err(CE_NOTE,
4064 "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
4065 sbp->sb_icount, itotal);
4066 cmn_err(CE_NOTE,
4067 "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
4068 sbp->sb_ifree, ifree);
4069 cmn_err(CE_NOTE,
4070 "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
4071 sbp->sb_fdblocks, freeblks);
4072#if 0
4073 /*
4074 * This is turned off until I account for the allocation
4075 * btree blocks which live in free space.
4076 */
4077 ASSERT(sbp->sb_icount == itotal);
4078 ASSERT(sbp->sb_ifree == ifree);
4079 ASSERT(sbp->sb_fdblocks == freeblks);
4080#endif
4081#endif
4082 xfs_buf_relse(sbbp);
4083}
4084#endif /* DEBUG */