2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
19 #include <linux/list_sort.h>
20 #include <linux/blkdev.h>
35 #include "trace_gfs2.h"
38 * gfs2_pin - Pin a buffer in memory
39 * @sdp: The superblock
40 * @bh: The buffer to be pinned
42 * The log lock must be held when calling this function
44 void gfs2_pin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
46 struct gfs2_bufdata
*bd
;
48 BUG_ON(!current
->journal_info
);
50 clear_buffer_dirty(bh
);
51 if (test_set_buffer_pinned(bh
))
52 gfs2_assert_withdraw(sdp
, 0);
53 if (!buffer_uptodate(bh
))
54 gfs2_io_error_bh_wd(sdp
, bh
);
56 /* If this buffer is in the AIL and it has already been written
57 * to in-place disk block, remove it from the AIL.
59 spin_lock(&sdp
->sd_ail_lock
);
61 list_move(&bd
->bd_ail_st_list
, &bd
->bd_tr
->tr_ail2_list
);
62 spin_unlock(&sdp
->sd_ail_lock
);
64 atomic_inc(&sdp
->sd_log_pinned
);
65 trace_gfs2_pin(bd
, 1);
68 static bool buffer_is_rgrp(const struct gfs2_bufdata
*bd
)
70 return bd
->bd_gl
->gl_name
.ln_type
== LM_TYPE_RGRP
;
73 static void maybe_release_space(struct gfs2_bufdata
*bd
)
75 struct gfs2_glock
*gl
= bd
->bd_gl
;
76 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
77 struct gfs2_rgrpd
*rgd
= gfs2_glock2rgrp(gl
);
78 unsigned int index
= bd
->bd_bh
->b_blocknr
- gl
->gl_name
.ln_number
;
79 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ index
;
81 if (bi
->bi_clone
== NULL
)
83 if (sdp
->sd_args
.ar_discard
)
84 gfs2_rgrp_send_discards(sdp
, rgd
->rd_data0
, bd
->bd_bh
, bi
, 1, NULL
);
85 memcpy(bi
->bi_clone
+ bi
->bi_offset
,
86 bd
->bd_bh
->b_data
+ bi
->bi_offset
, bi
->bi_bytes
);
87 clear_bit(GBF_FULL
, &bi
->bi_flags
);
88 rgd
->rd_free_clone
= rgd
->rd_free
;
89 rgd
->rd_extfail_pt
= rgd
->rd_free
;
93 * gfs2_unpin - Unpin a buffer
94 * @sdp: the filesystem the buffer belongs to
95 * @bh: The buffer to unpin
97 * @flags: The inode dirty flags
101 static void gfs2_unpin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
,
102 struct gfs2_trans
*tr
)
104 struct gfs2_bufdata
*bd
= bh
->b_private
;
106 BUG_ON(!buffer_uptodate(bh
));
107 BUG_ON(!buffer_pinned(bh
));
110 mark_buffer_dirty(bh
);
111 clear_buffer_pinned(bh
);
113 if (buffer_is_rgrp(bd
))
114 maybe_release_space(bd
);
116 spin_lock(&sdp
->sd_ail_lock
);
118 list_del(&bd
->bd_ail_st_list
);
121 struct gfs2_glock
*gl
= bd
->bd_gl
;
122 list_add(&bd
->bd_ail_gl_list
, &gl
->gl_ail_list
);
123 atomic_inc(&gl
->gl_ail_count
);
126 list_add(&bd
->bd_ail_st_list
, &tr
->tr_ail1_list
);
127 spin_unlock(&sdp
->sd_ail_lock
);
129 clear_bit(GLF_LFLUSH
, &bd
->bd_gl
->gl_flags
);
130 trace_gfs2_pin(bd
, 0);
132 atomic_dec(&sdp
->sd_log_pinned
);
135 static void gfs2_log_incr_head(struct gfs2_sbd
*sdp
)
137 BUG_ON((sdp
->sd_log_flush_head
== sdp
->sd_log_tail
) &&
138 (sdp
->sd_log_flush_head
!= sdp
->sd_log_head
));
140 if (++sdp
->sd_log_flush_head
== sdp
->sd_jdesc
->jd_blocks
)
141 sdp
->sd_log_flush_head
= 0;
144 u64
gfs2_log_bmap(struct gfs2_sbd
*sdp
)
146 unsigned int lbn
= sdp
->sd_log_flush_head
;
147 struct gfs2_journal_extent
*je
;
150 list_for_each_entry(je
, &sdp
->sd_jdesc
->extent_list
, list
) {
151 if ((lbn
>= je
->lblock
) && (lbn
< (je
->lblock
+ je
->blocks
))) {
152 block
= je
->dblock
+ lbn
- je
->lblock
;
153 gfs2_log_incr_head(sdp
);
162 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
163 * @sdp: The superblock
165 * @error: The i/o status
167 * This finds the relevant buffers and unlocks them and sets the
168 * error flag according to the status of the i/o request. This is
169 * used when the log is writing data which has an in-place version
170 * that is pinned in the pagecache.
173 static void gfs2_end_log_write_bh(struct gfs2_sbd
*sdp
,
174 struct bio_vec
*bvec
,
177 struct buffer_head
*bh
, *next
;
178 struct page
*page
= bvec
->bv_page
;
181 bh
= page_buffers(page
);
183 while (bh_offset(bh
) < bvec
->bv_offset
)
184 bh
= bh
->b_this_page
;
187 mark_buffer_write_io_error(bh
);
189 next
= bh
->b_this_page
;
197 * gfs2_end_log_write - end of i/o to the log
200 * Each bio_vec contains either data from the pagecache or data
201 * relating to the log itself. Here we iterate over the bio_vec
202 * array, processing both kinds of data.
206 static void gfs2_end_log_write(struct bio
*bio
)
208 struct gfs2_sbd
*sdp
= bio
->bi_private
;
209 struct bio_vec
*bvec
;
211 struct bvec_iter_all iter_all
;
213 if (bio
->bi_status
) {
214 fs_err(sdp
, "Error %d writing to journal, jid=%u\n",
215 bio
->bi_status
, sdp
->sd_jdesc
->jd_jid
);
216 wake_up(&sdp
->sd_logd_waitq
);
219 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
220 page
= bvec
->bv_page
;
221 if (page_has_buffers(page
))
222 gfs2_end_log_write_bh(sdp
, bvec
, bio
->bi_status
);
224 mempool_free(page
, gfs2_page_pool
);
228 if (atomic_dec_and_test(&sdp
->sd_log_in_flight
))
229 wake_up(&sdp
->sd_log_flush_wait
);
233 * gfs2_log_submit_bio - Submit any pending log bio
234 * @biop: Address of the bio pointer
235 * @opf: REQ_OP | op_flags
237 * Submit any pending part-built or full bio to the block device. If
238 * there is no pending bio, then this is a no-op.
241 void gfs2_log_submit_bio(struct bio
**biop
, int opf
)
243 struct bio
*bio
= *biop
;
245 struct gfs2_sbd
*sdp
= bio
->bi_private
;
246 atomic_inc(&sdp
->sd_log_in_flight
);
254 * gfs2_log_alloc_bio - Allocate a bio
255 * @sdp: The super block
256 * @blkno: The device block number we want to write to
257 * @end_io: The bi_end_io callback
259 * Allocate a new bio, initialize it with the given parameters and return it.
261 * Returns: The newly allocated bio
264 static struct bio
*gfs2_log_alloc_bio(struct gfs2_sbd
*sdp
, u64 blkno
,
265 bio_end_io_t
*end_io
)
267 struct super_block
*sb
= sdp
->sd_vfs
;
268 struct bio
*bio
= bio_alloc(GFP_NOIO
, BIO_MAX_PAGES
);
270 bio
->bi_iter
.bi_sector
= blkno
* (sb
->s_blocksize
>> 9);
271 bio_set_dev(bio
, sb
->s_bdev
);
272 bio
->bi_end_io
= end_io
;
273 bio
->bi_private
= sdp
;
279 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
280 * @sdp: The super block
281 * @blkno: The device block number we want to write to
282 * @bio: The bio to get or allocate
284 * @end_io: The bi_end_io callback
285 * @flush: Always flush the current bio and allocate a new one?
287 * If there is a cached bio, then if the next block number is sequential
288 * with the previous one, return it, otherwise flush the bio to the
289 * device. If there is no cached bio, or we just flushed it, then
290 * allocate a new one.
292 * Returns: The bio to use for log writes
295 static struct bio
*gfs2_log_get_bio(struct gfs2_sbd
*sdp
, u64 blkno
,
296 struct bio
**biop
, int op
,
297 bio_end_io_t
*end_io
, bool flush
)
299 struct bio
*bio
= *biop
;
304 nblk
= bio_end_sector(bio
);
305 nblk
>>= sdp
->sd_fsb2bb_shift
;
306 if (blkno
== nblk
&& !flush
)
308 gfs2_log_submit_bio(biop
, op
);
311 *biop
= gfs2_log_alloc_bio(sdp
, blkno
, end_io
);
316 * gfs2_log_write - write to log
317 * @sdp: the filesystem
318 * @page: the page to write
319 * @size: the size of the data to write
320 * @offset: the offset within the page
321 * @blkno: block number of the log entry
323 * Try and add the page segment to the current bio. If that fails,
324 * submit the current bio to the device and create a new one, and
325 * then add the page segment to that.
328 void gfs2_log_write(struct gfs2_sbd
*sdp
, struct page
*page
,
329 unsigned size
, unsigned offset
, u64 blkno
)
334 bio
= gfs2_log_get_bio(sdp
, blkno
, &sdp
->sd_log_bio
, REQ_OP_WRITE
,
335 gfs2_end_log_write
, false);
336 ret
= bio_add_page(bio
, page
, size
, offset
);
338 bio
= gfs2_log_get_bio(sdp
, blkno
, &sdp
->sd_log_bio
,
339 REQ_OP_WRITE
, gfs2_end_log_write
, true);
340 ret
= bio_add_page(bio
, page
, size
, offset
);
346 * gfs2_log_write_bh - write a buffer's content to the log
347 * @sdp: The super block
348 * @bh: The buffer pointing to the in-place location
350 * This writes the content of the buffer to the next available location
351 * in the log. The buffer will be unlocked once the i/o to the log has
355 static void gfs2_log_write_bh(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
357 gfs2_log_write(sdp
, bh
->b_page
, bh
->b_size
, bh_offset(bh
),
362 * gfs2_log_write_page - write one block stored in a page, into the log
363 * @sdp: The superblock
364 * @page: The struct page
366 * This writes the first block-sized part of the page into the log. Note
367 * that the page must have been allocated from the gfs2_page_pool mempool
368 * and that after this has been called, ownership has been transferred and
369 * the page may be freed at any time.
372 void gfs2_log_write_page(struct gfs2_sbd
*sdp
, struct page
*page
)
374 struct super_block
*sb
= sdp
->sd_vfs
;
375 gfs2_log_write(sdp
, page
, sb
->s_blocksize
, 0,
380 * gfs2_end_log_read - end I/O callback for reads from the log
383 * Simply unlock the pages in the bio. The main thread will wait on them and
384 * process them in order as necessary.
387 static void gfs2_end_log_read(struct bio
*bio
)
390 struct bio_vec
*bvec
;
391 struct bvec_iter_all iter_all
;
393 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
394 page
= bvec
->bv_page
;
395 if (bio
->bi_status
) {
396 int err
= blk_status_to_errno(bio
->bi_status
);
399 mapping_set_error(page
->mapping
, err
);
408 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
409 * @jd: The journal descriptor
410 * @page: The page to look in
412 * Returns: 1 if found, 0 otherwise.
415 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc
*jd
,
416 struct gfs2_log_header_host
*head
,
419 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
420 struct gfs2_log_header_host
uninitialized_var(lh
);
421 void *kaddr
= kmap_atomic(page
);
425 for (offset
= 0; offset
< PAGE_SIZE
; offset
+= sdp
->sd_sb
.sb_bsize
) {
426 if (!__get_log_header(sdp
, kaddr
+ offset
, 0, &lh
)) {
427 if (lh
.lh_sequence
> head
->lh_sequence
)
435 kunmap_atomic(kaddr
);
440 * gfs2_jhead_process_page - Search/cleanup a page
441 * @jd: The journal descriptor
442 * @index: Index of the page to look into
443 * @done: If set, perform only cleanup, else search and set if found.
445 * Find the page with 'index' in the journal's mapping. Search the page for
446 * the journal head if requested (cleanup == false). Release refs on the
447 * page so the page cache can reclaim it (put_page() twice). We grabbed a
448 * reference on this page two times, first when we did a find_or_create_page()
449 * to obtain the page to add it to the bio and second when we do a
450 * find_get_page() here to get the page to wait on while I/O on it is being
452 * This function is also used to free up a page we might've grabbed but not
453 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
454 * submitted the I/O, but we already found the jhead so we only need to drop
455 * our references to the page.
458 static void gfs2_jhead_process_page(struct gfs2_jdesc
*jd
, unsigned long index
,
459 struct gfs2_log_header_host
*head
,
464 page
= find_get_page(jd
->jd_inode
->i_mapping
, index
);
465 wait_on_page_locked(page
);
471 *done
= gfs2_jhead_pg_srch(jd
, head
, page
);
473 put_page(page
); /* Once for find_get_page */
474 put_page(page
); /* Once more for find_or_create_page */
478 * gfs2_find_jhead - find the head of a log
479 * @jd: The journal descriptor
480 * @head: The log descriptor for the head of the log is returned here
482 * Do a search of a journal by reading it in large chunks using bios and find
483 * the valid log entry with the highest sequence number. (i.e. the log head)
485 * Returns: 0 on success, errno otherwise
487 int gfs2_find_jhead(struct gfs2_jdesc
*jd
, struct gfs2_log_header_host
*head
,
490 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
491 struct address_space
*mapping
= jd
->jd_inode
->i_mapping
;
492 unsigned int block
= 0, blocks_submitted
= 0, blocks_read
= 0;
493 unsigned int bsize
= sdp
->sd_sb
.sb_bsize
;
494 unsigned int bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
;
495 unsigned int shift
= PAGE_SHIFT
- bsize_shift
;
496 unsigned int readhead_blocks
= BIO_MAX_PAGES
<< shift
;
497 struct gfs2_journal_extent
*je
;
499 struct bio
*bio
= NULL
;
500 struct page
*page
= NULL
;
504 memset(head
, 0, sizeof(*head
));
505 if (list_empty(&jd
->extent_list
))
506 gfs2_map_journal_extents(sdp
, jd
);
508 since
= filemap_sample_wb_err(mapping
);
509 list_for_each_entry(je
, &jd
->extent_list
, list
) {
510 for (; block
< je
->lblock
+ je
->blocks
; block
++) {
514 page
= find_or_create_page(mapping
,
515 block
>> shift
, GFP_NOFS
);
526 off
= (block
<< bsize_shift
) & ~PAGE_MASK
;
527 sz
= bio_add_page(bio
, page
, bsize
, off
);
528 if (sz
== bsize
) { /* block added */
529 if (off
+ bsize
== PAGE_SIZE
) {
535 blocks_submitted
= block
+ 1;
540 dblock
= je
->dblock
+ (block
- je
->lblock
);
541 bio
= gfs2_log_alloc_bio(sdp
, dblock
, gfs2_end_log_read
);
542 bio
->bi_opf
= REQ_OP_READ
;
543 sz
= bio_add_page(bio
, page
, bsize
, 0);
544 gfs2_assert_warn(sdp
, sz
== bsize
);
545 if (bsize
== PAGE_SIZE
)
549 if (blocks_submitted
< blocks_read
+ readhead_blocks
) {
550 /* Keep at least one bio in flight */
554 gfs2_jhead_process_page(jd
, blocks_read
>> shift
, head
, &done
);
555 blocks_read
+= PAGE_SIZE
>> bsize_shift
;
557 goto out
; /* found */
564 while (blocks_read
< block
) {
565 gfs2_jhead_process_page(jd
, blocks_read
>> shift
, head
, &done
);
566 blocks_read
+= PAGE_SIZE
>> bsize_shift
;
570 ret
= filemap_check_wb_err(mapping
, since
);
573 truncate_inode_pages(mapping
, 0);
578 static struct page
*gfs2_get_log_desc(struct gfs2_sbd
*sdp
, u32 ld_type
,
579 u32 ld_length
, u32 ld_data1
)
581 struct page
*page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
582 struct gfs2_log_descriptor
*ld
= page_address(page
);
584 ld
->ld_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
585 ld
->ld_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_LD
);
586 ld
->ld_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_LD
);
587 ld
->ld_type
= cpu_to_be32(ld_type
);
588 ld
->ld_length
= cpu_to_be32(ld_length
);
589 ld
->ld_data1
= cpu_to_be32(ld_data1
);
594 static void gfs2_check_magic(struct buffer_head
*bh
)
599 clear_buffer_escaped(bh
);
600 kaddr
= kmap_atomic(bh
->b_page
);
601 ptr
= kaddr
+ bh_offset(bh
);
602 if (*ptr
== cpu_to_be32(GFS2_MAGIC
))
603 set_buffer_escaped(bh
);
604 kunmap_atomic(kaddr
);
607 static int blocknr_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
609 struct gfs2_bufdata
*bda
, *bdb
;
611 bda
= list_entry(a
, struct gfs2_bufdata
, bd_list
);
612 bdb
= list_entry(b
, struct gfs2_bufdata
, bd_list
);
614 if (bda
->bd_bh
->b_blocknr
< bdb
->bd_bh
->b_blocknr
)
616 if (bda
->bd_bh
->b_blocknr
> bdb
->bd_bh
->b_blocknr
)
621 static void gfs2_before_commit(struct gfs2_sbd
*sdp
, unsigned int limit
,
622 unsigned int total
, struct list_head
*blist
,
625 struct gfs2_log_descriptor
*ld
;
626 struct gfs2_bufdata
*bd1
= NULL
, *bd2
;
633 list_sort(NULL
, blist
, blocknr_cmp
);
634 bd1
= bd2
= list_prepare_entry(bd1
, blist
, bd_list
);
639 gfs2_log_unlock(sdp
);
640 page
= gfs2_get_log_desc(sdp
,
641 is_databuf
? GFS2_LOG_DESC_JDATA
:
642 GFS2_LOG_DESC_METADATA
, num
+ 1, num
);
643 ld
= page_address(page
);
645 ptr
= (__be64
*)(ld
+ 1);
648 list_for_each_entry_continue(bd1
, blist
, bd_list
) {
649 *ptr
++ = cpu_to_be64(bd1
->bd_bh
->b_blocknr
);
651 gfs2_check_magic(bd1
->bd_bh
);
652 *ptr
++ = cpu_to_be64(buffer_escaped(bd1
->bd_bh
) ? 1 : 0);
658 gfs2_log_unlock(sdp
);
659 gfs2_log_write_page(sdp
, page
);
663 list_for_each_entry_continue(bd2
, blist
, bd_list
) {
665 gfs2_log_unlock(sdp
);
666 lock_buffer(bd2
->bd_bh
);
668 if (buffer_escaped(bd2
->bd_bh
)) {
670 page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
671 ptr
= page_address(page
);
672 kaddr
= kmap_atomic(bd2
->bd_bh
->b_page
);
673 memcpy(ptr
, kaddr
+ bh_offset(bd2
->bd_bh
),
675 kunmap_atomic(kaddr
);
677 clear_buffer_escaped(bd2
->bd_bh
);
678 unlock_buffer(bd2
->bd_bh
);
680 gfs2_log_write_page(sdp
, page
);
682 gfs2_log_write_bh(sdp
, bd2
->bd_bh
);
692 gfs2_log_unlock(sdp
);
695 static void buf_lo_before_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
697 unsigned int limit
= buf_limit(sdp
); /* 503 for 4k blocks */
701 nbuf
= tr
->tr_num_buf_new
- tr
->tr_num_buf_rm
;
702 gfs2_before_commit(sdp
, limit
, nbuf
, &tr
->tr_buf
, 0);
705 static void buf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
707 struct list_head
*head
;
708 struct gfs2_bufdata
*bd
;
714 while (!list_empty(head
)) {
715 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_list
);
716 list_del_init(&bd
->bd_list
);
717 gfs2_unpin(sdp
, bd
->bd_bh
, tr
);
721 static void buf_lo_before_scan(struct gfs2_jdesc
*jd
,
722 struct gfs2_log_header_host
*head
, int pass
)
727 jd
->jd_found_blocks
= 0;
728 jd
->jd_replayed_blocks
= 0;
731 static int buf_lo_scan_elements(struct gfs2_jdesc
*jd
, u32 start
,
732 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
735 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
736 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
737 struct gfs2_glock
*gl
= ip
->i_gl
;
738 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
739 struct buffer_head
*bh_log
, *bh_ip
;
743 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_METADATA
)
746 gfs2_replay_incr_blk(jd
, &start
);
748 for (; blks
; gfs2_replay_incr_blk(jd
, &start
), blks
--) {
749 blkno
= be64_to_cpu(*ptr
++);
751 jd
->jd_found_blocks
++;
753 if (gfs2_revoke_check(jd
, blkno
, start
))
756 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
760 bh_ip
= gfs2_meta_new(gl
, blkno
);
761 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
763 if (gfs2_meta_check(sdp
, bh_ip
))
766 mark_buffer_dirty(bh_ip
);
774 jd
->jd_replayed_blocks
++;
781 * gfs2_meta_sync - Sync all buffers associated with a glock
786 static void gfs2_meta_sync(struct gfs2_glock
*gl
)
788 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
789 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
793 mapping
= &sdp
->sd_aspace
;
795 filemap_fdatawrite(mapping
);
796 error
= filemap_fdatawait(mapping
);
799 gfs2_io_error(gl
->gl_name
.ln_sbd
);
802 static void buf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
804 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
805 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
808 gfs2_meta_sync(ip
->i_gl
);
814 gfs2_meta_sync(ip
->i_gl
);
816 fs_info(sdp
, "jid=%u: Replayed %u of %u blocks\n",
817 jd
->jd_jid
, jd
->jd_replayed_blocks
, jd
->jd_found_blocks
);
820 static void revoke_lo_before_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
822 struct gfs2_meta_header
*mh
;
824 struct list_head
*head
= &sdp
->sd_log_revokes
;
825 struct gfs2_bufdata
*bd
;
829 gfs2_write_revokes(sdp
);
830 if (!sdp
->sd_log_num_revoke
)
833 length
= gfs2_struct2blk(sdp
, sdp
->sd_log_num_revoke
, sizeof(u64
));
834 page
= gfs2_get_log_desc(sdp
, GFS2_LOG_DESC_REVOKE
, length
, sdp
->sd_log_num_revoke
);
835 offset
= sizeof(struct gfs2_log_descriptor
);
837 list_for_each_entry(bd
, head
, bd_list
) {
838 sdp
->sd_log_num_revoke
--;
840 if (offset
+ sizeof(u64
) > sdp
->sd_sb
.sb_bsize
) {
842 gfs2_log_write_page(sdp
, page
);
843 page
= mempool_alloc(gfs2_page_pool
, GFP_NOIO
);
844 mh
= page_address(page
);
846 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
847 mh
->mh_type
= cpu_to_be32(GFS2_METATYPE_LB
);
848 mh
->mh_format
= cpu_to_be32(GFS2_FORMAT_LB
);
849 offset
= sizeof(struct gfs2_meta_header
);
852 *(__be64
*)(page_address(page
) + offset
) = cpu_to_be64(bd
->bd_blkno
);
853 offset
+= sizeof(u64
);
855 gfs2_assert_withdraw(sdp
, !sdp
->sd_log_num_revoke
);
857 gfs2_log_write_page(sdp
, page
);
860 static void revoke_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
862 struct list_head
*head
= &sdp
->sd_log_revokes
;
863 struct gfs2_bufdata
*bd
, *tmp
;
866 * Glocks can be referenced repeatedly on the revoke list, but the list
867 * only holds one reference. All glocks on the list will have the
868 * GLF_REVOKES flag set initially.
871 list_for_each_entry_safe(bd
, tmp
, head
, bd_list
) {
872 struct gfs2_glock
*gl
= bd
->bd_gl
;
874 if (test_bit(GLF_REVOKES
, &gl
->gl_flags
)) {
875 /* Keep each glock on the list exactly once. */
876 clear_bit(GLF_REVOKES
, &gl
->gl_flags
);
879 list_del(&bd
->bd_list
);
880 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
882 list_for_each_entry_safe(bd
, tmp
, head
, bd_list
) {
883 struct gfs2_glock
*gl
= bd
->bd_gl
;
885 list_del(&bd
->bd_list
);
886 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
887 clear_bit(GLF_LFLUSH
, &gl
->gl_flags
);
888 gfs2_glock_queue_put(gl
);
890 /* the list is empty now */
893 static void revoke_lo_before_scan(struct gfs2_jdesc
*jd
,
894 struct gfs2_log_header_host
*head
, int pass
)
899 jd
->jd_found_revokes
= 0;
900 jd
->jd_replay_tail
= head
->lh_tail
;
903 static int revoke_lo_scan_elements(struct gfs2_jdesc
*jd
, u32 start
,
904 struct gfs2_log_descriptor
*ld
, __be64
*ptr
,
907 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
908 unsigned int blks
= be32_to_cpu(ld
->ld_length
);
909 unsigned int revokes
= be32_to_cpu(ld
->ld_data1
);
910 struct buffer_head
*bh
;
916 if (pass
!= 0 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_REVOKE
)
919 offset
= sizeof(struct gfs2_log_descriptor
);
921 for (; blks
; gfs2_replay_incr_blk(jd
, &start
), blks
--) {
922 error
= gfs2_replay_read_block(jd
, start
, &bh
);
927 gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_LB
);
929 while (offset
+ sizeof(u64
) <= sdp
->sd_sb
.sb_bsize
) {
930 blkno
= be64_to_cpu(*(__be64
*)(bh
->b_data
+ offset
));
932 error
= gfs2_revoke_add(jd
, blkno
, start
);
938 jd
->jd_found_revokes
++;
942 offset
+= sizeof(u64
);
946 offset
= sizeof(struct gfs2_meta_header
);
953 static void revoke_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
955 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
958 gfs2_revoke_clean(jd
);
964 fs_info(sdp
, "jid=%u: Found %u revoke tags\n",
965 jd
->jd_jid
, jd
->jd_found_revokes
);
967 gfs2_revoke_clean(jd
);
971 * databuf_lo_before_commit - Scan the data buffers, writing as we go
975 static void databuf_lo_before_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
977 unsigned int limit
= databuf_limit(sdp
);
981 nbuf
= tr
->tr_num_databuf_new
- tr
->tr_num_databuf_rm
;
982 gfs2_before_commit(sdp
, limit
, nbuf
, &tr
->tr_databuf
, 1);
985 static int databuf_lo_scan_elements(struct gfs2_jdesc
*jd
, u32 start
,
986 struct gfs2_log_descriptor
*ld
,
987 __be64
*ptr
, int pass
)
989 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
990 struct gfs2_glock
*gl
= ip
->i_gl
;
991 unsigned int blks
= be32_to_cpu(ld
->ld_data1
);
992 struct buffer_head
*bh_log
, *bh_ip
;
997 if (pass
!= 1 || be32_to_cpu(ld
->ld_type
) != GFS2_LOG_DESC_JDATA
)
1000 gfs2_replay_incr_blk(jd
, &start
);
1001 for (; blks
; gfs2_replay_incr_blk(jd
, &start
), blks
--) {
1002 blkno
= be64_to_cpu(*ptr
++);
1003 esc
= be64_to_cpu(*ptr
++);
1005 jd
->jd_found_blocks
++;
1007 if (gfs2_revoke_check(jd
, blkno
, start
))
1010 error
= gfs2_replay_read_block(jd
, start
, &bh_log
);
1014 bh_ip
= gfs2_meta_new(gl
, blkno
);
1015 memcpy(bh_ip
->b_data
, bh_log
->b_data
, bh_log
->b_size
);
1019 __be32
*eptr
= (__be32
*)bh_ip
->b_data
;
1020 *eptr
= cpu_to_be32(GFS2_MAGIC
);
1022 mark_buffer_dirty(bh_ip
);
1027 jd
->jd_replayed_blocks
++;
1033 /* FIXME: sort out accounting for log blocks etc. */
1035 static void databuf_lo_after_scan(struct gfs2_jdesc
*jd
, int error
, int pass
)
1037 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
1038 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
1041 gfs2_meta_sync(ip
->i_gl
);
1048 gfs2_meta_sync(ip
->i_gl
);
1050 fs_info(sdp
, "jid=%u: Replayed %u of %u data blocks\n",
1051 jd
->jd_jid
, jd
->jd_replayed_blocks
, jd
->jd_found_blocks
);
1054 static void databuf_lo_after_commit(struct gfs2_sbd
*sdp
, struct gfs2_trans
*tr
)
1056 struct list_head
*head
;
1057 struct gfs2_bufdata
*bd
;
1062 head
= &tr
->tr_databuf
;
1063 while (!list_empty(head
)) {
1064 bd
= list_entry(head
->next
, struct gfs2_bufdata
, bd_list
);
1065 list_del_init(&bd
->bd_list
);
1066 gfs2_unpin(sdp
, bd
->bd_bh
, tr
);
1071 static const struct gfs2_log_operations gfs2_buf_lops
= {
1072 .lo_before_commit
= buf_lo_before_commit
,
1073 .lo_after_commit
= buf_lo_after_commit
,
1074 .lo_before_scan
= buf_lo_before_scan
,
1075 .lo_scan_elements
= buf_lo_scan_elements
,
1076 .lo_after_scan
= buf_lo_after_scan
,
1080 static const struct gfs2_log_operations gfs2_revoke_lops
= {
1081 .lo_before_commit
= revoke_lo_before_commit
,
1082 .lo_after_commit
= revoke_lo_after_commit
,
1083 .lo_before_scan
= revoke_lo_before_scan
,
1084 .lo_scan_elements
= revoke_lo_scan_elements
,
1085 .lo_after_scan
= revoke_lo_after_scan
,
1086 .lo_name
= "revoke",
1089 static const struct gfs2_log_operations gfs2_databuf_lops
= {
1090 .lo_before_commit
= databuf_lo_before_commit
,
1091 .lo_after_commit
= databuf_lo_after_commit
,
1092 .lo_scan_elements
= databuf_lo_scan_elements
,
1093 .lo_after_scan
= databuf_lo_after_scan
,
1094 .lo_name
= "databuf",
1097 const struct gfs2_log_operations
*gfs2_log_ops
[] = {