2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
19 #include <linux/list_sort.h>
32 #include "trace_gfs2.h"
35 * gfs2_pin - Pin a buffer in memory
36 * @sdp: The superblock
37 * @bh: The buffer to be pinned
39 * The log lock must be held when calling this function
41 void gfs2_pin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
43 struct gfs2_bufdata
*bd
;
45 BUG_ON(!current
->journal_info
);
47 clear_buffer_dirty(bh
);
48 if (test_set_buffer_pinned(bh
))
49 gfs2_assert_withdraw(sdp
, 0);
50 if (!buffer_uptodate(bh
))
51 gfs2_io_error_bh(sdp
, bh
);
53 /* If this buffer is in the AIL and it has already been written
54 * to in-place disk block, remove it from the AIL.
56 spin_lock(&sdp
->sd_ail_lock
);
58 list_move(&bd
->bd_ail_st_list
, &bd
->bd_tr
->tr_ail2_list
);
59 spin_unlock(&sdp
->sd_ail_lock
);
61 atomic_inc(&sdp
->sd_log_pinned
);
62 trace_gfs2_pin(bd
, 1);
65 static bool buffer_is_rgrp(const struct gfs2_bufdata
*bd
)
67 return bd
->bd_gl
->gl_name
.ln_type
== LM_TYPE_RGRP
;
70 static void maybe_release_space(struct gfs2_bufdata
*bd
)
72 struct gfs2_glock
*gl
= bd
->bd_gl
;
73 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
74 struct gfs2_rgrpd
*rgd
= gl
->gl_object
;
75 unsigned int index
= bd
->bd_bh
->b_blocknr
- gl
->gl_name
.ln_number
;
76 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ index
;
78 if (bi
->bi_clone
== 0)
80 if (sdp
->sd_args
.ar_discard
)
81 gfs2_rgrp_send_discards(sdp
, rgd
->rd_data0
, bd
->bd_bh
, bi
, 1, NULL
);
82 memcpy(bi
->bi_clone
+ bi
->bi_offset
,
83 bd
->bd_bh
->b_data
+ bi
->bi_offset
, bi
->bi_len
);
84 clear_bit(GBF_FULL
, &bi
->bi_flags
);
85 rgd
->rd_free_clone
= rgd
->rd_free
;
89 * gfs2_unpin - Unpin a buffer
90 * @sdp: the filesystem the buffer belongs to
91 * @bh: The buffer to unpin
93 * @flags: The inode dirty flags
97 static void gfs2_unpin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
,
98 struct gfs2_trans
*tr
)
100 struct gfs2_bufdata
*bd
= bh
->b_private
;
102 BUG_ON(!buffer_uptodate(bh
));
103 BUG_ON(!buffer_pinned(bh
));
106 mark_buffer_dirty(bh
);
107 clear_buffer_pinned(bh
);
109 if (buffer_is_rgrp(bd
))
110 maybe_release_space(bd
);
112 spin_lock(&sdp
->sd_ail_lock
);
114 list_del(&bd
->bd_ail_st_list
);
117 struct gfs2_glock
*gl
= bd
->bd_gl
;
118 list_add(&bd
->bd_ail_gl_list
, &gl
->gl_ail_list
);
119 atomic_inc(&gl
->gl_ail_count
);
122 list_add(&bd
->bd_ail_st_list
, &tr
->tr_ail1_list
);
123 spin_unlock(&sdp
->sd_ail_lock
);
125 clear_bit(GLF_LFLUSH
, &bd
->bd_gl
->gl_flags
);
126 trace_gfs2_pin(bd
, 0);
128 atomic_dec(&sdp
->sd_log_pinned
);
131 static void gfs2_log_incr_head(struct gfs2_sbd
*sdp
)
133 BUG_ON((sdp
->sd_log_flush_head
== sdp
->sd_log_tail
) &&
134 (sdp
->sd_log_flush_head
!= sdp
->sd_log_head
));
136 if (++sdp
->sd_log_flush_head
== sdp
->sd_jdesc
->jd_blocks
) {
137 sdp
->sd_log_flush_head
= 0;
138 sdp
->sd_log_flush_wrapped
= 1;
142 static u64
gfs2_log_bmap(struct gfs2_sbd
*sdp
)
144 unsigned int lbn
= sdp
->sd_log_flush_head
;
145 struct gfs2_journal_extent
*je
;
148 list_for_each_entry(je
, &sdp
->sd_jdesc
->extent_list
, extent_list
) {
149 if (lbn
>= je
->lblock
&& lbn
< je
->lblock
+ je
->blocks
) {
150 block
= je
->dblock
+ lbn
- je
->lblock
;
151 gfs2_log_incr_head(sdp
);
160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
161 * @sdp: The superblock
163 * @error: The i/o status
165 * This finds the relavent buffers and unlocks then and sets the
166 * error flag according to the status of the i/o request. This is
167 * used when the log is writing data which has an in-place version
168 * that is pinned in the pagecache.
171 static void gfs2_end_log_write_bh(struct gfs2_sbd
*sdp
, struct bio_vec
*bvec
,
174 struct buffer_head
*bh
, *next
;
175 struct page
*page
= bvec
->bv_page
;
178 bh
= page_buffers(page
);
180 while (bh_offset(bh
) < bvec
->bv_offset
)
181 bh
= bh
->b_this_page
;
184 set_buffer_write_io_error(bh
);
186 next
= bh
->b_this_page
;
194 * gfs2_end_log_write - end of i/o to the log
196 * @error: Status of i/o request
198 * Each bio_vec contains either data from the pagecache or data
199 * relating to the log itself. Here we iterate over the bio_vec
200 * array, processing both kinds of data.
204 static void gfs2_end_log_write(struct bio
*bio
, int error
)
206 struct gfs2_sbd
*sdp
= bio
->bi_private
;
207 struct bio_vec
*bvec
;
212 sdp
->sd_log_error
= error
;
213 fs_err(sdp
, "Error %d writing to log\n", error
);
216 bio_for_each_segment_all(bvec
, bio
, i
) {
217 page
= bvec
->bv_page
;
218 if (page_has_buffers(page
))
219 gfs2_end_log_write_bh(sdp
, bvec
, error
);
221 mempool_free(page
, gfs2_page_pool
);
225 if (atomic_dec_and_test(&sdp
->sd_log_in_flight
))
226 wake_up(&sdp
->sd_log_flush_wait
);
230 * gfs2_log_flush_bio - Submit any pending log bio
231 * @sdp: The superblock
234 * Submit any pending part-built or full bio to the block device. If
235 * there is no pending bio, then this is a no-op.
238 void gfs2_log_flush_bio(struct gfs2_sbd
*sdp
, int rw
)
240 if (sdp
->sd_log_bio
) {
241 atomic_inc(&sdp
->sd_log_in_flight
);
242 submit_bio(rw
, sdp
->sd_log_bio
);
243 sdp
->sd_log_bio
= NULL
;
248 * gfs2_log_alloc_bio - Allocate a new bio for log writing
249 * @sdp: The superblock
250 * @blkno: The next device block number we want to write to
252 * This should never be called when there is a cached bio in the
253 * super block. When it returns, there will be a cached bio in the
254 * super block which will have as many bio_vecs as the device is
257 * Returns: Newly allocated bio
260 static struct bio
*gfs2_log_alloc_bio(struct gfs2_sbd
*sdp
, u64 blkno
)
262 struct super_block
*sb
= sdp
->sd_vfs
;
263 unsigned nrvecs
= bio_get_nr_vecs(sb
->s_bdev
);
266 BUG_ON(sdp
->sd_log_bio
);
269 bio
= bio_alloc(GFP_NOIO
, nrvecs
);
272 nrvecs
= max(nrvecs
/2, 1U);
275 bio
->bi_sector
= blkno
* (sb
->s_blocksize
>> 9);
276 bio
->bi_bdev
= sb
->s_bdev
;
277 bio
->bi_end_io
= gfs2_end_log_write
;
278 bio
->bi_private
= sdp
;
280 sdp
->sd_log_bio
= bio
;
286 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
287 * @sdp: The superblock
288 * @blkno: The device block number we want to write to
290 * If there is a cached bio, then if the next block number is sequential
291 * with the previous one, return it, otherwise flush the bio to the
292 * device. If there is not a cached bio, or we just flushed it, then
293 * allocate a new one.
295 * Returns: The bio to use for log writes
298 static struct bio
*gfs2_log_get_bio(struct gfs2_sbd
*sdp
, u64 blkno
)
300 struct bio
*bio
= sdp
->sd_log_bio
;
304 nblk
= bio_end_sector(bio
);
305 nblk
>>= sdp
->sd_fsb2bb_shift
;
308 gfs2_log_flush_bio(sdp
, WRITE
);
311 return gfs2_log_alloc_bio(sdp
, blkno
);
316 * gfs2_log_write - write to log
317 * @sdp: the filesystem
318 * @page: the page to write
319 * @size: the size of the data to write
320 * @offset: the offset within the page
322 * Try and add the page segment to the current bio. If that fails,
323 * submit the current bio to the device and create a new one, and
324 * then add the page segment to that.
327 static void gfs2_log_write(struct gfs2_sbd
*sdp
, struct page
*page
,
328 unsigned size
, unsigned offset
)
330 u64 blkno
= gfs2_log_bmap(sdp
);
334 bio
= gfs2_log_get_bio(sdp
, blkno
);
335 ret
= bio_add_page(bio
, page
, size
, offset
);
337 gfs2_log_flush_bio(sdp
, WRITE
);
338 bio
= gfs2_log_alloc_bio(sdp
, blkno
);
339 ret
= bio_add_page(bio
, page
, size
, offset
);
345 * gfs2_log_write_bh - write a buffer's content to the log
346 * @sdp: The super block
347 * @bh: The buffer pointing to the in-place location
349 * This writes the content of the buffer to the next available location
350 * in the log. The buffer will be unlocked once the i/o to the log has
354 static void gfs2_log_write_bh(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
356 gfs2_log_write(sdp
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
360 * gfs2_log_write_page - write one block stored in a page, into the log
361 * @sdp: The superblock
362 * @page: The struct page
364 * This writes the first block-sized part of the page into the log. Note
365 * that the page must have been allocated from the gfs2_page_pool mempool
366 * and that after this has been called, ownership has been transferred and
367 * the page may be freed at any time.
370 void gfs2_log_write_page(struct gfs2_sbd
*sdp
, struct page
*page
)
372 struct super_block
*sb
= sdp
->sd_vfs
;
373 gfs2_log_write(sdp
, page
, sb
->s_blocksize
, 0);
376 static struct page
*gfs2_get_log_desc(struct gfs2_sbd
*sdp
, u32 ld_type
,
377 u32 ld_length
, u32 ld_data1
)
379 struct page
*page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
380 struct gfs2_log_descriptor
*ld
= page_address(page
);
382 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
383 ld
->ld_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_LD
);
384 ld
->ld_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_LD
);
385 ld
->ld_type
= cpu_to_be32(ld_type
);
386 ld
->ld_length
= cpu_to_be32(ld_length
);
387 ld
->ld_data1
= cpu_to_be32(ld_data1
);
392 static void gfs2_check_magic(struct buffer_head
*bh
)
397 clear_buffer_escaped(bh
);
398 kaddr
= kmap_atomic(bh
->b_page
);
399 ptr
= kaddr
+ bh_offset(bh
);
400 if (*ptr
== cpu_to_be32(GFS2_MAGIC
))
401 set_buffer_escaped(bh
);
402 kunmap_atomic(kaddr
);
405 static int blocknr_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
407 struct gfs2_bufdata
*bda
, *bdb
;
409 bda
= list_entry(a
, struct gfs2_bufdata
, bd_list
);
410 bdb
= list_entry(b
, struct gfs2_bufdata
, bd_list
);
412 if (bda
->bd_bh
->b_blocknr
< bdb
->bd_bh
->b_blocknr
)
414 if (bda
->bd_bh
->b_blocknr
> bdb
->bd_bh
->b_blocknr
)
419 static void gfs2_before_commit(struct gfs2_sbd
*sdp
, unsigned int limit
,
420 unsigned int total
, struct list_head
*blist
,
423 struct gfs2_log_descriptor
*ld
;
424 struct gfs2_bufdata
*bd1
= NULL
, *bd2
;
431 list_sort(NULL
, blist
, blocknr_cmp
);
432 bd1
= bd2
= list_prepare_entry(bd1
, blist
, bd_list
);
437 gfs2_log_unlock(sdp
);
438 page
= gfs2_get_log_desc(sdp
,
439 is_databuf
? GFS2_LOG_DESC_JDATA
:
440 GFS2_LOG_DESC_METADATA
, num
+ 1, num
);
441 ld
= page_address(page
);
443 ptr
= (__be64
*)(ld
+ 1);
446 list_for_each_entry_continue(bd1
, blist
, bd_list
) {
447 *ptr
++ = cpu_to_be64(bd1
->bd_bh
->b_blocknr
);
449 gfs2_check_magic(bd1
->bd_bh
);
450 *ptr
++ = cpu_to_be64(buffer_escaped(bd1
->bd_bh
) ? 1 : 0);
456 gfs2_log_unlock(sdp
);
457 gfs2_log_write_page(sdp
, page
);
461 list_for_each_entry_continue(bd2
, blist
, bd_list
) {
463 gfs2_log_unlock(sdp
);
464 lock_buffer(bd2
->bd_bh
);
466 if (buffer_escaped(bd2
->bd_bh
)) {
468 page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
469 ptr
= page_address(page
);
470 kaddr
= kmap_atomic(bd2
->bd_bh
->b_page
);
471 memcpy(ptr
, kaddr
+ bh_offset(bd2
->bd_bh
),
473 kunmap_atomic(kaddr
);
475 clear_buffer_escaped(bd2
->bd_bh
);
476 unlock_buffer(bd2
->bd_bh
);
478 gfs2_log_write_page(sdp
, page
);
480 gfs2_log_write_bh(sdp
, bd2
->bd_bh
);
490 gfs2_log_unlock(sdp
);
493 static void buf_lo_before_commit(struct gfs2_sbd
*sdp
)
495 unsigned int limit
= buf_limit(sdp
); /* 503 for 4k blocks */
497 gfs2_before_commit(sdp
, limit
, sdp
->sd_log_num_buf
,
498 &sdp
->sd_log_le_buf
, 0);
501 static void buf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
503 struct list_head
*head
= &sdp
->sd_log_le_buf
;
504 struct gfs2_bufdata
*bd
;
507 gfs2_assert(sdp
, list_empty(head
));
511 while (!list_empty(head
)) {
512 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_list
);
513 list_del_init(&bd
->bd_list
);
514 sdp
->sd_log_num_buf
--;
516 gfs2_unpin(sdp
, bd
->bd_bh
, tr
);
518 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_buf
);
521 static void buf_lo_before_scan(struct gfs2_jdesc
*jd
,
522 struct gfs2_log_header_host
*head
, int pass
)
524 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
529 sdp
->sd_found_blocks
= 0;
530 sdp
->sd_replayed_blocks
= 0;
533 static int buf_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
534 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
537 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
538 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
539 struct gfs2_glock
*gl
= ip
->i_gl
;
540 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
541 struct buffer_head
*bh_log
, *bh_ip
;
545 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_METADATA
)
548 gfs2_replay_incr_blk(sdp
, &start
);
550 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
551 blkno
= be64_to_cpu(*ptr
++);
553 sdp
->sd_found_blocks
++;
555 if (gfs2_revoke_check(sdp
, blkno
, start
))
558 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
562 bh_ip
= gfs2_meta_new(gl
, blkno
);
563 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
565 if (gfs2_meta_check(sdp
, bh_ip
))
568 mark_buffer_dirty(bh_ip
);
576 sdp
->sd_replayed_blocks
++;
582 static void buf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
584 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
585 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
588 gfs2_meta_sync(ip
->i_gl
);
594 gfs2_meta_sync(ip
->i_gl
);
596 fs_info(sdp
, "jid=%u: Replayed %u of %u blocks\n",
597 jd
->jd_jid
, sdp
->sd_replayed_blocks
, sdp
->sd_found_blocks
);
600 static void revoke_lo_before_commit(struct gfs2_sbd
*sdp
)
602 struct gfs2_meta_header
*mh
;
604 struct list_head
*head
= &sdp
->sd_log_le_revoke
;
605 struct gfs2_bufdata
*bd
;
609 gfs2_write_revokes(sdp
);
610 if (!sdp
->sd_log_num_revoke
)
613 length
= gfs2_struct2blk(sdp
, sdp
->sd_log_num_revoke
, sizeof(u64
));
614 page
= gfs2_get_log_desc(sdp
, GFS2_LOG_DESC_REVOKE
, length
, sdp
->sd_log_num_revoke
);
615 offset
= sizeof(struct gfs2_log_descriptor
);
617 list_for_each_entry(bd
, head
, bd_list
) {
618 sdp
->sd_log_num_revoke
--;
620 if (offset
+ sizeof(u64
) > sdp
->sd_sb
.sb_bsize
) {
622 gfs2_log_write_page(sdp
, page
);
623 page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
624 mh
= page_address(page
);
626 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
627 mh
->mh_type
= cpu_to_be32(GFS2_METATYPE_LB
);
628 mh
->mh_format
= cpu_to_be32(GFS2_FORMAT_LB
);
629 offset
= sizeof(struct gfs2_meta_header
);
632 *(__be64
*)(page_address(page
) + offset
) = cpu_to_be64(bd
->bd_blkno
);
633 offset
+= sizeof(u64
);
635 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_revoke
);
637 gfs2_log_write_page(sdp
, page
);
640 static void revoke_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
642 struct list_head
*head
= &sdp
->sd_log_le_revoke
;
643 struct gfs2_bufdata
*bd
;
644 struct gfs2_glock
*gl
;
646 while (!list_empty(head
)) {
647 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_list
);
648 list_del_init(&bd
->bd_list
);
650 atomic_dec(&gl
->gl_revokes
);
651 clear_bit(GLF_LFLUSH
, &gl
->gl_flags
);
652 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
656 static void revoke_lo_before_scan(struct gfs2_jdesc
*jd
,
657 struct gfs2_log_header_host
*head
, int pass
)
659 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
664 sdp
->sd_found_revokes
= 0;
665 sdp
->sd_replay_tail
= head
->lh_tail
;
668 static int revoke_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
669 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
672 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
673 unsigned int blks
= be32_to_cpu(ld
->ld_length
);
674 unsigned int revokes
= be32_to_cpu(ld
->ld_data1
);
675 struct buffer_head
*bh
;
681 if (pass
!= 0 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_REVOKE
)
684 offset
= sizeof(struct gfs2_log_descriptor
);
686 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
687 error
= gfs2_replay_read_block(jd
, start
, &bh
);
692 gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_LB
);
694 while (offset
+ sizeof(u64
) <= sdp
->sd_sb
.sb_bsize
) {
695 blkno
= be64_to_cpu(*(__be64
*)(bh
->b_data
+ offset
));
697 error
= gfs2_revoke_add(sdp
, blkno
, start
);
703 sdp
->sd_found_revokes
++;
707 offset
+= sizeof(u64
);
711 offset
= sizeof(struct gfs2_meta_header
);
718 static void revoke_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
720 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
723 gfs2_revoke_clean(sdp
);
729 fs_info(sdp
, "jid=%u: Found %u revoke tags\n",
730 jd
->jd_jid
, sdp
->sd_found_revokes
);
732 gfs2_revoke_clean(sdp
);
736 * databuf_lo_before_commit - Scan the data buffers, writing as we go
740 static void databuf_lo_before_commit(struct gfs2_sbd
*sdp
)
742 unsigned int limit
= buf_limit(sdp
) / 2;
744 gfs2_before_commit(sdp
, limit
, sdp
->sd_log_num_databuf
,
745 &sdp
->sd_log_le_databuf
, 1);
748 static int databuf_lo_scan_elements(struct gfs2_jdesc
*jd
, unsigned int start
,
749 struct gfs2_log_descriptor
*ld
,
750 __be64
*ptr
, int pass
)
752 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
753 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
754 struct gfs2_glock
*gl
= ip
->i_gl
;
755 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
756 struct buffer_head
*bh_log
, *bh_ip
;
761 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_JDATA
)
764 gfs2_replay_incr_blk(sdp
, &start
);
765 for (; blks
; gfs2_replay_incr_blk(sdp
, &start
), blks
--) {
766 blkno
= be64_to_cpu(*ptr
++);
767 esc
= be64_to_cpu(*ptr
++);
769 sdp
->sd_found_blocks
++;
771 if (gfs2_revoke_check(sdp
, blkno
, start
))
774 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
778 bh_ip
= gfs2_meta_new(gl
, blkno
);
779 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
783 __be32
*eptr
= (__be32
*)bh_ip
->b_data
;
784 *eptr
= cpu_to_be32(GFS2_MAGIC
);
786 mark_buffer_dirty(bh_ip
);
791 sdp
->sd_replayed_blocks
++;
797 /* FIXME: sort out accounting for log blocks etc. */
799 static void databuf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
801 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
802 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
805 gfs2_meta_sync(ip
->i_gl
);
812 gfs2_meta_sync(ip
->i_gl
);
814 fs_info(sdp
, "jid=%u: Replayed %u of %u data blocks\n",
815 jd
->jd_jid
, sdp
->sd_replayed_blocks
, sdp
->sd_found_blocks
);
818 static void databuf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
820 struct list_head
*head
= &sdp
->sd_log_le_databuf
;
821 struct gfs2_bufdata
*bd
;
824 gfs2_assert(sdp
, list_empty(head
));
828 while (!list_empty(head
)) {
829 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_list
);
830 list_del_init(&bd
->bd_list
);
831 sdp
->sd_log_num_databuf
--;
832 gfs2_unpin(sdp
, bd
->bd_bh
, tr
);
834 gfs2_assert_warn(sdp
, !sdp
->sd_log_num_databuf
);
838 const struct gfs2_log_operations gfs2_buf_lops
= {
839 .lo_before_commit
= buf_lo_before_commit
,
840 .lo_after_commit
= buf_lo_after_commit
,
841 .lo_before_scan
= buf_lo_before_scan
,
842 .lo_scan_elements
= buf_lo_scan_elements
,
843 .lo_after_scan
= buf_lo_after_scan
,
847 const struct gfs2_log_operations gfs2_revoke_lops
= {
848 .lo_before_commit
= revoke_lo_before_commit
,
849 .lo_after_commit
= revoke_lo_after_commit
,
850 .lo_before_scan
= revoke_lo_before_scan
,
851 .lo_scan_elements
= revoke_lo_scan_elements
,
852 .lo_after_scan
= revoke_lo_after_scan
,
856 const struct gfs2_log_operations gfs2_databuf_lops
= {
857 .lo_before_commit
= databuf_lo_before_commit
,
858 .lo_after_commit
= databuf_lo_after_commit
,
859 .lo_scan_elements
= databuf_lo_scan_elements
,
860 .lo_after_scan
= databuf_lo_after_scan
,
861 .lo_name
= "databuf",
864 const struct gfs2_log_operations
*gfs2_log_ops
[] = {